3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/fdtable.h>
22 #include <linux/file.h>
23 #include <linux/freezer.h>
25 #include <linux/list.h>
26 #include <linux/miscdevice.h>
28 #include <linux/module.h>
29 #include <linux/mutex.h>
30 #include <linux/nsproxy.h>
31 #include <linux/poll.h>
32 #include <linux/debugfs.h>
33 #include <linux/rbtree.h>
34 #include <linux/sched.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/vmalloc.h>
38 #include <linux/slab.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/security.h>
42 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
43 #define BINDER_IPC_32BIT 1
46 #include <uapi/linux/android/binder.h>
47 #include "binder_trace.h"
49 static DEFINE_MUTEX(binder_main_lock
);
50 static DEFINE_MUTEX(binder_deferred_lock
);
51 static DEFINE_MUTEX(binder_mmap_lock
);
53 static HLIST_HEAD(binder_devices
);
54 static HLIST_HEAD(binder_procs
);
55 static HLIST_HEAD(binder_deferred_list
);
56 static HLIST_HEAD(binder_dead_nodes
);
58 static struct dentry
*binder_debugfs_dir_entry_root
;
59 static struct dentry
*binder_debugfs_dir_entry_proc
;
60 static int binder_last_id
;
61 static struct workqueue_struct
*binder_deferred_workqueue
;
63 #define BINDER_DEBUG_ENTRY(name) \
64 static int binder_##name##_open(struct inode *inode, struct file *file) \
66 return single_open(file, binder_##name##_show, inode->i_private); \
69 static const struct file_operations binder_##name##_fops = { \
70 .owner = THIS_MODULE, \
71 .open = binder_##name##_open, \
73 .llseek = seq_lseek, \
74 .release = single_release, \
77 static int binder_proc_show(struct seq_file
*m
, void *unused
);
78 BINDER_DEBUG_ENTRY(proc
);
80 /* This is only defined in include/asm-arm/sizes.h */
86 #define SZ_4M 0x400000
89 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
94 BINDER_DEBUG_USER_ERROR
= 1U << 0,
95 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
96 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
97 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
98 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
99 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
100 BINDER_DEBUG_READ_WRITE
= 1U << 6,
101 BINDER_DEBUG_USER_REFS
= 1U << 7,
102 BINDER_DEBUG_THREADS
= 1U << 8,
103 BINDER_DEBUG_TRANSACTION
= 1U << 9,
104 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
105 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
106 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
107 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 13,
108 BINDER_DEBUG_PRIORITY_CAP
= 1U << 14,
109 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 15,
111 static uint32_t binder_debug_mask
;
113 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
115 static bool binder_debug_no_lock
;
116 module_param_named(proc_no_lock
, binder_debug_no_lock
, bool, S_IWUSR
| S_IRUGO
);
118 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
119 module_param_named(devices
, binder_devices_param
, charp
, S_IRUGO
);
121 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
122 static int binder_stop_on_user_error
;
124 static int binder_set_stop_on_user_error(const char *val
,
125 struct kernel_param
*kp
)
129 ret
= param_set_int(val
, kp
);
130 if (binder_stop_on_user_error
< 2)
131 wake_up(&binder_user_error_wait
);
134 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
135 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
137 #define binder_debug(mask, x...) \
139 if (binder_debug_mask & mask) \
143 #define binder_user_error(x...) \
145 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
147 if (binder_stop_on_user_error) \
148 binder_stop_on_user_error = 2; \
151 #define to_flat_binder_object(hdr) \
152 container_of(hdr, struct flat_binder_object, hdr)
154 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
156 #define to_binder_buffer_object(hdr) \
157 container_of(hdr, struct binder_buffer_object, hdr)
159 #define to_binder_fd_array_object(hdr) \
160 container_of(hdr, struct binder_fd_array_object, hdr)
162 enum binder_stat_types
{
168 BINDER_STAT_TRANSACTION
,
169 BINDER_STAT_TRANSACTION_COMPLETE
,
173 struct binder_stats
{
174 int br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
175 int bc
[_IOC_NR(BC_REPLY_SG
) + 1];
176 int obj_created
[BINDER_STAT_COUNT
];
177 int obj_deleted
[BINDER_STAT_COUNT
];
180 static struct binder_stats binder_stats
;
182 static inline void binder_stats_deleted(enum binder_stat_types type
)
184 binder_stats
.obj_deleted
[type
]++;
187 static inline void binder_stats_created(enum binder_stat_types type
)
189 binder_stats
.obj_created
[type
]++;
192 struct binder_transaction_log_entry
{
203 const char *context_name
;
205 struct binder_transaction_log
{
208 struct binder_transaction_log_entry entry
[32];
210 static struct binder_transaction_log binder_transaction_log
;
211 static struct binder_transaction_log binder_transaction_log_failed
;
213 static struct binder_transaction_log_entry
*binder_transaction_log_add(
214 struct binder_transaction_log
*log
)
216 struct binder_transaction_log_entry
*e
;
218 e
= &log
->entry
[log
->next
];
219 memset(e
, 0, sizeof(*e
));
221 if (log
->next
== ARRAY_SIZE(log
->entry
)) {
228 struct binder_context
{
229 struct binder_node
*binder_context_mgr_node
;
230 kuid_t binder_context_mgr_uid
;
234 struct binder_device
{
235 struct hlist_node hlist
;
236 struct miscdevice miscdev
;
237 struct binder_context context
;
241 struct list_head entry
;
243 BINDER_WORK_TRANSACTION
= 1,
244 BINDER_WORK_TRANSACTION_COMPLETE
,
246 BINDER_WORK_DEAD_BINDER
,
247 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
248 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
254 struct binder_work work
;
256 struct rb_node rb_node
;
257 struct hlist_node dead_node
;
259 struct binder_proc
*proc
;
260 struct hlist_head refs
;
261 int internal_strong_refs
;
263 int local_strong_refs
;
264 binder_uintptr_t ptr
;
265 binder_uintptr_t cookie
;
266 unsigned has_strong_ref
:1;
267 unsigned pending_strong_ref
:1;
268 unsigned has_weak_ref
:1;
269 unsigned pending_weak_ref
:1;
270 unsigned has_async_transaction
:1;
271 unsigned accept_fds
:1;
272 unsigned min_priority
:8;
273 struct list_head async_todo
;
276 struct binder_ref_death
{
277 struct binder_work work
;
278 binder_uintptr_t cookie
;
282 /* Lookups needed: */
283 /* node + proc => ref (transaction) */
284 /* desc + proc => ref (transaction, inc/dec ref) */
285 /* node => refs + procs (proc exit) */
287 struct rb_node rb_node_desc
;
288 struct rb_node rb_node_node
;
289 struct hlist_node node_entry
;
290 struct binder_proc
*proc
;
291 struct binder_node
*node
;
295 struct binder_ref_death
*death
;
298 struct binder_buffer
{
299 struct list_head entry
; /* free and allocated entries by address */
300 struct rb_node rb_node
; /* free entry by size or allocated entry */
303 unsigned allow_user_free
:1;
304 unsigned async_transaction
:1;
305 unsigned debug_id
:29;
307 struct binder_transaction
*transaction
;
309 struct binder_node
*target_node
;
312 size_t extra_buffers_size
;
316 enum binder_deferred_state
{
317 BINDER_DEFERRED_PUT_FILES
= 0x01,
318 BINDER_DEFERRED_FLUSH
= 0x02,
319 BINDER_DEFERRED_RELEASE
= 0x04,
323 struct hlist_node proc_node
;
324 struct rb_root threads
;
325 struct rb_root nodes
;
326 struct rb_root refs_by_desc
;
327 struct rb_root refs_by_node
;
329 struct vm_area_struct
*vma
;
330 struct mm_struct
*vma_vm_mm
;
331 struct task_struct
*tsk
;
332 struct files_struct
*files
;
333 struct hlist_node deferred_work_node
;
336 ptrdiff_t user_buffer_offset
;
338 struct list_head buffers
;
339 struct rb_root free_buffers
;
340 struct rb_root allocated_buffers
;
341 size_t free_async_space
;
345 uint32_t buffer_free
;
346 struct list_head todo
;
347 wait_queue_head_t wait
;
348 struct binder_stats stats
;
349 struct list_head delivered_death
;
351 int requested_threads
;
352 int requested_threads_started
;
354 long default_priority
;
355 struct dentry
*debugfs_entry
;
356 struct binder_context
*context
;
360 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
361 BINDER_LOOPER_STATE_ENTERED
= 0x02,
362 BINDER_LOOPER_STATE_EXITED
= 0x04,
363 BINDER_LOOPER_STATE_INVALID
= 0x08,
364 BINDER_LOOPER_STATE_WAITING
= 0x10,
365 BINDER_LOOPER_STATE_NEED_RETURN
= 0x20
368 struct binder_thread
{
369 struct binder_proc
*proc
;
370 struct rb_node rb_node
;
373 struct binder_transaction
*transaction_stack
;
374 struct list_head todo
;
375 uint32_t return_error
; /* Write failed, return error code in read buf */
376 uint32_t return_error2
; /* Write failed, return error code in read */
377 /* buffer. Used when sending a reply to a dead process that */
378 /* we are also waiting on */
379 wait_queue_head_t wait
;
380 struct binder_stats stats
;
383 struct binder_transaction
{
385 struct binder_work work
;
386 struct binder_thread
*from
;
387 struct binder_transaction
*from_parent
;
388 struct binder_proc
*to_proc
;
389 struct binder_thread
*to_thread
;
390 struct binder_transaction
*to_parent
;
391 unsigned need_reply
:1;
392 /* unsigned is_dead:1; */ /* not used at the moment */
394 struct binder_buffer
*buffer
;
403 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
405 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
407 struct files_struct
*files
= proc
->files
;
408 unsigned long rlim_cur
;
415 if (!lock_task_sighand(proc
->tsk
, &irqs
))
418 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
419 unlock_task_sighand(proc
->tsk
, &irqs
);
421 preempt_enable_no_resched();
422 ret
= __alloc_fd(files
, 0, rlim_cur
, flags
);
429 * copied from fd_install
431 static void task_fd_install(
432 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
435 preempt_enable_no_resched();
436 __fd_install(proc
->files
, fd
, file
);
442 * copied from sys_close
444 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
448 if (proc
->files
== NULL
)
451 retval
= __close_fd(proc
->files
, fd
);
452 /* can't restart close syscall because file table entry was cleared */
453 if (unlikely(retval
== -ERESTARTSYS
||
454 retval
== -ERESTARTNOINTR
||
455 retval
== -ERESTARTNOHAND
||
456 retval
== -ERESTART_RESTARTBLOCK
))
462 static inline void binder_lock(const char *tag
)
464 trace_binder_lock(tag
);
465 mutex_lock(&binder_main_lock
);
467 trace_binder_locked(tag
);
470 static inline void binder_unlock(const char *tag
)
472 trace_binder_unlock(tag
);
473 mutex_unlock(&binder_main_lock
);
477 static inline void *kzalloc_preempt_disabled(size_t size
)
481 ptr
= kzalloc(size
, GFP_NOWAIT
);
485 preempt_enable_no_resched();
486 ptr
= kzalloc(size
, GFP_KERNEL
);
492 static inline long copy_to_user_preempt_disabled(void __user
*to
, const void *from
, long n
)
496 preempt_enable_no_resched();
497 ret
= copy_to_user(to
, from
, n
);
502 static inline long copy_from_user_preempt_disabled(void *to
, const void __user
*from
, long n
)
506 preempt_enable_no_resched();
507 ret
= copy_from_user(to
, from
, n
);
512 #define get_user_preempt_disabled(x, ptr) \
515 preempt_enable_no_resched(); \
516 __ret = get_user(x, ptr); \
521 #define put_user_preempt_disabled(x, ptr) \
524 preempt_enable_no_resched(); \
525 __ret = put_user(x, ptr); \
530 static void binder_set_nice(long nice
)
534 if (can_nice(current
, nice
)) {
535 set_user_nice(current
, nice
);
538 min_nice
= 20 - current
->signal
->rlim
[RLIMIT_NICE
].rlim_cur
;
539 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
540 "%d: nice value %ld not allowed use %ld instead\n",
541 current
->pid
, nice
, min_nice
);
542 set_user_nice(current
, min_nice
);
545 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
548 static size_t binder_buffer_size(struct binder_proc
*proc
,
549 struct binder_buffer
*buffer
)
551 if (list_is_last(&buffer
->entry
, &proc
->buffers
))
552 return proc
->buffer
+ proc
->buffer_size
- (void *)buffer
->data
;
553 return (size_t)list_entry(buffer
->entry
.next
,
554 struct binder_buffer
, entry
) - (size_t)buffer
->data
;
557 static void binder_insert_free_buffer(struct binder_proc
*proc
,
558 struct binder_buffer
*new_buffer
)
560 struct rb_node
**p
= &proc
->free_buffers
.rb_node
;
561 struct rb_node
*parent
= NULL
;
562 struct binder_buffer
*buffer
;
564 size_t new_buffer_size
;
566 BUG_ON(!new_buffer
->free
);
568 new_buffer_size
= binder_buffer_size(proc
, new_buffer
);
570 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
571 "%d: add free buffer, size %zd, at %pK\n",
572 proc
->pid
, new_buffer_size
, new_buffer
);
576 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
577 BUG_ON(!buffer
->free
);
579 buffer_size
= binder_buffer_size(proc
, buffer
);
581 if (new_buffer_size
< buffer_size
)
582 p
= &parent
->rb_left
;
584 p
= &parent
->rb_right
;
586 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
587 rb_insert_color(&new_buffer
->rb_node
, &proc
->free_buffers
);
590 static void binder_insert_allocated_buffer(struct binder_proc
*proc
,
591 struct binder_buffer
*new_buffer
)
593 struct rb_node
**p
= &proc
->allocated_buffers
.rb_node
;
594 struct rb_node
*parent
= NULL
;
595 struct binder_buffer
*buffer
;
597 BUG_ON(new_buffer
->free
);
601 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
602 BUG_ON(buffer
->free
);
604 if (new_buffer
< buffer
)
605 p
= &parent
->rb_left
;
606 else if (new_buffer
> buffer
)
607 p
= &parent
->rb_right
;
611 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
612 rb_insert_color(&new_buffer
->rb_node
, &proc
->allocated_buffers
);
615 static struct binder_buffer
*binder_buffer_lookup(struct binder_proc
*proc
,
618 struct rb_node
*n
= proc
->allocated_buffers
.rb_node
;
619 struct binder_buffer
*buffer
;
620 struct binder_buffer
*kern_ptr
;
622 kern_ptr
= (struct binder_buffer
*)(user_ptr
- proc
->user_buffer_offset
623 - offsetof(struct binder_buffer
, data
));
626 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
627 BUG_ON(buffer
->free
);
629 if (kern_ptr
< buffer
)
631 else if (kern_ptr
> buffer
)
639 static int binder_update_page_range(struct binder_proc
*proc
, int allocate
,
640 void *start
, void *end
,
641 struct vm_area_struct
*vma
)
644 unsigned long user_page_addr
;
645 struct vm_struct tmp_area
;
647 struct mm_struct
*mm
;
649 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
650 "%d: %s pages %pK-%pK\n", proc
->pid
,
651 allocate
? "allocate" : "free", start
, end
);
656 trace_binder_update_page_range(proc
, allocate
, start
, end
);
661 mm
= get_task_mm(proc
->tsk
);
663 preempt_enable_no_resched();
666 down_write(&mm
->mmap_sem
);
668 if (vma
&& mm
!= proc
->vma_vm_mm
) {
669 pr_err("%d: vma mm and task mm mismatch\n",
679 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
684 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
687 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
690 *page
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
692 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
693 proc
->pid
, page_addr
);
694 goto err_alloc_page_failed
;
696 tmp_area
.addr
= page_addr
;
697 tmp_area
.size
= PAGE_SIZE
+ PAGE_SIZE
/* guard page? */;
698 ret
= map_vm_area(&tmp_area
, PAGE_KERNEL
, page
);
700 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
701 proc
->pid
, page_addr
);
702 goto err_map_kernel_failed
;
705 (uintptr_t)page_addr
+ proc
->user_buffer_offset
;
706 ret
= vm_insert_page(vma
, user_page_addr
, page
[0]);
708 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
709 proc
->pid
, user_page_addr
);
710 goto err_vm_insert_page_failed
;
712 /* vm_insert_page does not seem to increment the refcount */
715 up_write(&mm
->mmap_sem
);
724 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
725 page_addr
-= PAGE_SIZE
) {
726 page
= &proc
->pages
[(page_addr
- proc
->buffer
) / PAGE_SIZE
];
728 zap_page_range(vma
, (uintptr_t)page_addr
+
729 proc
->user_buffer_offset
, PAGE_SIZE
, NULL
);
730 err_vm_insert_page_failed
:
731 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
732 err_map_kernel_failed
:
735 err_alloc_page_failed
:
740 up_write(&mm
->mmap_sem
);
749 static struct binder_buffer
*binder_alloc_buf(struct binder_proc
*proc
,
752 size_t extra_buffers_size
,
755 struct rb_node
*n
= proc
->free_buffers
.rb_node
;
756 struct binder_buffer
*buffer
;
758 struct rb_node
*best_fit
= NULL
;
761 size_t size
, data_offsets_size
;
763 if (proc
->vma
== NULL
) {
764 pr_err("%d: binder_alloc_buf, no vma\n",
769 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
770 ALIGN(offsets_size
, sizeof(void *));
772 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
773 binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
774 proc
->pid
, data_size
, offsets_size
);
777 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
778 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
779 binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
780 proc
->pid
, extra_buffers_size
);
784 proc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
785 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
786 "%d: binder_alloc_buf size %zd failed, no async space left\n",
792 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
793 BUG_ON(!buffer
->free
);
794 buffer_size
= binder_buffer_size(proc
, buffer
);
796 if (size
< buffer_size
) {
799 } else if (size
> buffer_size
)
806 if (best_fit
== NULL
) {
807 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
812 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
813 buffer_size
= binder_buffer_size(proc
, buffer
);
816 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
817 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
818 proc
->pid
, size
, buffer
, buffer_size
);
821 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
823 if (size
+ sizeof(struct binder_buffer
) + 4 >= buffer_size
)
824 buffer_size
= size
; /* no room for other buffers */
826 buffer_size
= size
+ sizeof(struct binder_buffer
);
829 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ buffer_size
);
830 if (end_page_addr
> has_page_addr
)
831 end_page_addr
= has_page_addr
;
832 if (binder_update_page_range(proc
, 1,
833 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
, NULL
))
836 rb_erase(best_fit
, &proc
->free_buffers
);
838 binder_insert_allocated_buffer(proc
, buffer
);
839 if (buffer_size
!= size
) {
840 struct binder_buffer
*new_buffer
= (void *)buffer
->data
+ size
;
842 list_add(&new_buffer
->entry
, &buffer
->entry
);
843 new_buffer
->free
= 1;
844 binder_insert_free_buffer(proc
, new_buffer
);
846 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
847 "%d: binder_alloc_buf size %zd got %pK\n",
848 proc
->pid
, size
, buffer
);
849 buffer
->data_size
= data_size
;
850 buffer
->offsets_size
= offsets_size
;
851 buffer
->extra_buffers_size
= extra_buffers_size
;
852 buffer
->async_transaction
= is_async
;
854 proc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
855 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
856 "%d: binder_alloc_buf size %zd async free %zd\n",
857 proc
->pid
, size
, proc
->free_async_space
);
863 static void *buffer_start_page(struct binder_buffer
*buffer
)
865 return (void *)((uintptr_t)buffer
& PAGE_MASK
);
868 static void *buffer_end_page(struct binder_buffer
*buffer
)
870 return (void *)(((uintptr_t)(buffer
+ 1) - 1) & PAGE_MASK
);
873 static void binder_delete_free_buffer(struct binder_proc
*proc
,
874 struct binder_buffer
*buffer
)
876 struct binder_buffer
*prev
, *next
= NULL
;
877 int free_page_end
= 1;
878 int free_page_start
= 1;
880 BUG_ON(proc
->buffers
.next
== &buffer
->entry
);
881 prev
= list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
883 if (buffer_end_page(prev
) == buffer_start_page(buffer
)) {
885 if (buffer_end_page(prev
) == buffer_end_page(buffer
))
887 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
888 "%d: merge free, buffer %pK share page with %pK\n",
889 proc
->pid
, buffer
, prev
);
892 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
893 next
= list_entry(buffer
->entry
.next
,
894 struct binder_buffer
, entry
);
895 if (buffer_start_page(next
) == buffer_end_page(buffer
)) {
897 if (buffer_start_page(next
) ==
898 buffer_start_page(buffer
))
900 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
901 "%d: merge free, buffer %pK share page with %pK\n",
902 proc
->pid
, buffer
, prev
);
905 list_del(&buffer
->entry
);
906 if (free_page_start
|| free_page_end
) {
907 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
908 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
909 proc
->pid
, buffer
, free_page_start
? "" : " end",
910 free_page_end
? "" : " start", prev
, next
);
911 binder_update_page_range(proc
, 0, free_page_start
?
912 buffer_start_page(buffer
) : buffer_end_page(buffer
),
913 (free_page_end
? buffer_end_page(buffer
) :
914 buffer_start_page(buffer
)) + PAGE_SIZE
, NULL
);
918 static void binder_free_buf(struct binder_proc
*proc
,
919 struct binder_buffer
*buffer
)
921 size_t size
, buffer_size
;
923 buffer_size
= binder_buffer_size(proc
, buffer
);
925 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
926 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
927 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
929 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
930 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
931 proc
->pid
, buffer
, size
, buffer_size
);
933 BUG_ON(buffer
->free
);
934 BUG_ON(size
> buffer_size
);
935 BUG_ON(buffer
->transaction
!= NULL
);
936 BUG_ON((void *)buffer
< proc
->buffer
);
937 BUG_ON((void *)buffer
> proc
->buffer
+ proc
->buffer_size
);
939 if (buffer
->async_transaction
) {
940 proc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
942 binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
943 "%d: binder_free_buf size %zd async free %zd\n",
944 proc
->pid
, size
, proc
->free_async_space
);
947 binder_update_page_range(proc
, 0,
948 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
949 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
),
951 rb_erase(&buffer
->rb_node
, &proc
->allocated_buffers
);
953 if (!list_is_last(&buffer
->entry
, &proc
->buffers
)) {
954 struct binder_buffer
*next
= list_entry(buffer
->entry
.next
,
955 struct binder_buffer
, entry
);
958 rb_erase(&next
->rb_node
, &proc
->free_buffers
);
959 binder_delete_free_buffer(proc
, next
);
962 if (proc
->buffers
.next
!= &buffer
->entry
) {
963 struct binder_buffer
*prev
= list_entry(buffer
->entry
.prev
,
964 struct binder_buffer
, entry
);
967 binder_delete_free_buffer(proc
, buffer
);
968 rb_erase(&prev
->rb_node
, &proc
->free_buffers
);
972 binder_insert_free_buffer(proc
, buffer
);
975 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
976 binder_uintptr_t ptr
)
978 struct rb_node
*n
= proc
->nodes
.rb_node
;
979 struct binder_node
*node
;
982 node
= rb_entry(n
, struct binder_node
, rb_node
);
986 else if (ptr
> node
->ptr
)
994 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
995 binder_uintptr_t ptr
,
996 binder_uintptr_t cookie
)
998 struct rb_node
**p
= &proc
->nodes
.rb_node
;
999 struct rb_node
*parent
= NULL
;
1000 struct binder_node
*node
;
1004 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1006 if (ptr
< node
->ptr
)
1008 else if (ptr
> node
->ptr
)
1009 p
= &(*p
)->rb_right
;
1014 node
= kzalloc_preempt_disabled(sizeof(*node
));
1017 binder_stats_created(BINDER_STAT_NODE
);
1018 rb_link_node(&node
->rb_node
, parent
, p
);
1019 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1020 node
->debug_id
= ++binder_last_id
;
1023 node
->cookie
= cookie
;
1024 node
->work
.type
= BINDER_WORK_NODE
;
1025 INIT_LIST_HEAD(&node
->work
.entry
);
1026 INIT_LIST_HEAD(&node
->async_todo
);
1027 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1028 "%d:%d node %d u%016llx c%016llx created\n",
1029 proc
->pid
, current
->pid
, node
->debug_id
,
1030 (u64
)node
->ptr
, (u64
)node
->cookie
);
1034 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1035 struct list_head
*target_list
)
1039 if (target_list
== NULL
&&
1040 node
->internal_strong_refs
== 0 &&
1042 node
== node
->proc
->context
->
1043 binder_context_mgr_node
&&
1044 node
->has_strong_ref
)) {
1045 pr_err("invalid inc strong node for %d\n",
1049 node
->internal_strong_refs
++;
1051 node
->local_strong_refs
++;
1052 if (!node
->has_strong_ref
&& target_list
) {
1053 list_del_init(&node
->work
.entry
);
1054 list_add_tail(&node
->work
.entry
, target_list
);
1058 node
->local_weak_refs
++;
1059 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1060 if (target_list
== NULL
) {
1061 pr_err("invalid inc weak node for %d\n",
1065 list_add_tail(&node
->work
.entry
, target_list
);
1071 static int binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1075 node
->internal_strong_refs
--;
1077 node
->local_strong_refs
--;
1078 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1082 node
->local_weak_refs
--;
1083 if (node
->local_weak_refs
|| !hlist_empty(&node
->refs
))
1086 if (node
->proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1087 if (list_empty(&node
->work
.entry
)) {
1088 list_add_tail(&node
->work
.entry
, &node
->proc
->todo
);
1089 wake_up_interruptible(&node
->proc
->wait
);
1092 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1093 !node
->local_weak_refs
) {
1094 list_del_init(&node
->work
.entry
);
1096 rb_erase(&node
->rb_node
, &node
->proc
->nodes
);
1097 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1098 "refless node %d deleted\n",
1101 hlist_del(&node
->dead_node
);
1102 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1103 "dead node %d deleted\n",
1107 binder_stats_deleted(BINDER_STAT_NODE
);
1115 static struct binder_ref
*binder_get_ref(struct binder_proc
*proc
,
1116 uint32_t desc
, bool need_strong_ref
)
1118 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1119 struct binder_ref
*ref
;
1122 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1124 if (desc
< ref
->desc
) {
1126 } else if (desc
> ref
->desc
) {
1128 } else if (need_strong_ref
&& !ref
->strong
) {
1129 binder_user_error("tried to use weak ref as strong ref\n");
1138 static struct binder_ref
*binder_get_ref_for_node(struct binder_proc
*proc
,
1139 struct binder_node
*node
)
1142 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1143 struct rb_node
*parent
= NULL
;
1144 struct binder_ref
*ref
, *new_ref
;
1145 struct binder_context
*context
= proc
->context
;
1149 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1151 if (node
< ref
->node
)
1153 else if (node
> ref
->node
)
1154 p
= &(*p
)->rb_right
;
1158 new_ref
= kzalloc_preempt_disabled(sizeof(*ref
));
1159 if (new_ref
== NULL
)
1161 binder_stats_created(BINDER_STAT_REF
);
1162 new_ref
->debug_id
= ++binder_last_id
;
1163 new_ref
->proc
= proc
;
1164 new_ref
->node
= node
;
1165 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1166 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1168 new_ref
->desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1169 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1170 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1171 if (ref
->desc
> new_ref
->desc
)
1173 new_ref
->desc
= ref
->desc
+ 1;
1176 p
= &proc
->refs_by_desc
.rb_node
;
1179 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1181 if (new_ref
->desc
< ref
->desc
)
1183 else if (new_ref
->desc
> ref
->desc
)
1184 p
= &(*p
)->rb_right
;
1188 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1189 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1191 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1193 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1194 "%d new ref %d desc %d for node %d\n",
1195 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
,
1198 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1199 "%d new ref %d desc %d for dead node\n",
1200 proc
->pid
, new_ref
->debug_id
, new_ref
->desc
);
1205 static void binder_delete_ref(struct binder_ref
*ref
)
1207 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1208 "%d delete ref %d desc %d for node %d\n",
1209 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
,
1210 ref
->node
->debug_id
);
1212 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1213 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1215 binder_dec_node(ref
->node
, 1, 1);
1216 hlist_del(&ref
->node_entry
);
1217 binder_dec_node(ref
->node
, 0, 1);
1219 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1220 "%d delete ref %d desc %d has death notification\n",
1221 ref
->proc
->pid
, ref
->debug_id
, ref
->desc
);
1222 list_del(&ref
->death
->work
.entry
);
1224 binder_stats_deleted(BINDER_STAT_DEATH
);
1227 binder_stats_deleted(BINDER_STAT_REF
);
1230 static int binder_inc_ref(struct binder_ref
*ref
, int strong
,
1231 struct list_head
*target_list
)
1236 if (ref
->strong
== 0) {
1237 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1243 if (ref
->weak
== 0) {
1244 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1254 static int binder_dec_ref(struct binder_ref
**ptr_to_ref
, int strong
)
1256 struct binder_ref
*ref
= *ptr_to_ref
;
1258 if (ref
->strong
== 0) {
1259 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1260 ref
->proc
->pid
, ref
->debug_id
,
1261 ref
->desc
, ref
->strong
, ref
->weak
);
1265 if (ref
->strong
== 0) {
1268 ret
= binder_dec_node(ref
->node
, strong
, 1);
1273 if (ref
->weak
== 0) {
1274 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1275 ref
->proc
->pid
, ref
->debug_id
,
1276 ref
->desc
, ref
->strong
, ref
->weak
);
1281 if (ref
->strong
== 0 && ref
->weak
== 0) {
1282 binder_delete_ref(ref
);
1288 static void binder_pop_transaction(struct binder_thread
*target_thread
,
1289 struct binder_transaction
*t
)
1291 if (target_thread
) {
1292 BUG_ON(target_thread
->transaction_stack
!= t
);
1293 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1294 target_thread
->transaction_stack
=
1295 target_thread
->transaction_stack
->from_parent
;
1300 t
->buffer
->transaction
= NULL
;
1302 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1305 static void binder_send_failed_reply(struct binder_transaction
*t
,
1306 uint32_t error_code
)
1308 struct binder_thread
*target_thread
;
1309 struct binder_transaction
*next
;
1311 BUG_ON(t
->flags
& TF_ONE_WAY
);
1313 target_thread
= t
->from
;
1314 if (target_thread
) {
1315 if (target_thread
->return_error
!= BR_OK
&&
1316 target_thread
->return_error2
== BR_OK
) {
1317 target_thread
->return_error2
=
1318 target_thread
->return_error
;
1319 target_thread
->return_error
= BR_OK
;
1321 if (target_thread
->return_error
== BR_OK
) {
1322 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1323 "send failed reply for transaction %d to %d:%d\n",
1325 target_thread
->proc
->pid
,
1326 target_thread
->pid
);
1328 binder_pop_transaction(target_thread
, t
);
1329 target_thread
->return_error
= error_code
;
1330 wake_up_interruptible(&target_thread
->wait
);
1332 pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
1333 target_thread
->proc
->pid
,
1335 target_thread
->return_error
);
1339 next
= t
->from_parent
;
1341 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1342 "send failed reply for transaction %d, target dead\n",
1345 binder_pop_transaction(target_thread
, t
);
1347 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1348 "reply failed, no target thread at root\n");
1352 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1353 "reply failed, no target thread -- retry %d\n",
1359 * binder_validate_object() - checks for a valid metadata object in a buffer.
1360 * @buffer: binder_buffer that we're parsing.
1361 * @offset: offset in the buffer at which to validate an object.
1363 * Return: If there's a valid metadata object at @offset in @buffer, the
1364 * size of that object. Otherwise, it returns zero.
1366 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
1368 /* Check if we can read a header first */
1369 struct binder_object_header
*hdr
;
1370 size_t object_size
= 0;
1372 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
1373 buffer
->data_size
< sizeof(*hdr
) ||
1374 !IS_ALIGNED(offset
, sizeof(u32
)))
1377 /* Ok, now see if we can read a complete object. */
1378 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
1379 switch (hdr
->type
) {
1380 case BINDER_TYPE_BINDER
:
1381 case BINDER_TYPE_WEAK_BINDER
:
1382 case BINDER_TYPE_HANDLE
:
1383 case BINDER_TYPE_WEAK_HANDLE
:
1384 object_size
= sizeof(struct flat_binder_object
);
1386 case BINDER_TYPE_FD
:
1387 object_size
= sizeof(struct binder_fd_object
);
1389 case BINDER_TYPE_PTR
:
1390 object_size
= sizeof(struct binder_buffer_object
);
1392 case BINDER_TYPE_FDA
:
1393 object_size
= sizeof(struct binder_fd_array_object
);
1398 if (offset
<= buffer
->data_size
- object_size
&&
1399 buffer
->data_size
>= object_size
)
1406 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1407 * @b: binder_buffer containing the object
1408 * @index: index in offset array at which the binder_buffer_object is
1410 * @start: points to the start of the offset array
1411 * @num_valid: the number of valid offsets in the offset array
1413 * Return: If @index is within the valid range of the offset array
1414 * described by @start and @num_valid, and if there's a valid
1415 * binder_buffer_object at the offset found in index @index
1416 * of the offset array, that object is returned. Otherwise,
1417 * %NULL is returned.
1418 * Note that the offset found in index @index itself is not
1419 * verified; this function assumes that @num_valid elements
1420 * from @start were previously verified to have valid offsets.
1422 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
1423 binder_size_t index
,
1424 binder_size_t
*start
,
1425 binder_size_t num_valid
)
1427 struct binder_buffer_object
*buffer_obj
;
1428 binder_size_t
*offp
;
1430 if (index
>= num_valid
)
1433 offp
= start
+ index
;
1434 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
1435 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
1442 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1443 * @b: transaction buffer
1444 * @objects_start start of objects buffer
1445 * @buffer: binder_buffer_object in which to fix up
1446 * @offset: start offset in @buffer to fix up
1447 * @last_obj: last binder_buffer_object that we fixed up in
1448 * @last_min_offset: minimum fixup offset in @last_obj
1450 * Return: %true if a fixup in buffer @buffer at offset @offset is
1453 * For safety reasons, we only allow fixups inside a buffer to happen
1454 * at increasing offsets; additionally, we only allow fixup on the last
1455 * buffer object that was verified, or one of its parents.
1457 * Example of what is allowed:
1460 * B (parent = A, offset = 0)
1461 * C (parent = A, offset = 16)
1462 * D (parent = C, offset = 0)
1463 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1465 * Examples of what is not allowed:
1467 * Decreasing offsets within the same parent:
1469 * C (parent = A, offset = 16)
1470 * B (parent = A, offset = 0) // decreasing offset within A
1472 * Referring to a parent that wasn't the last object or any of its parents:
1474 * B (parent = A, offset = 0)
1475 * C (parent = A, offset = 0)
1476 * C (parent = A, offset = 16)
1477 * D (parent = B, offset = 0) // B is not A or any of A's parents
1479 static bool binder_validate_fixup(struct binder_buffer
*b
,
1480 binder_size_t
*objects_start
,
1481 struct binder_buffer_object
*buffer
,
1482 binder_size_t fixup_offset
,
1483 struct binder_buffer_object
*last_obj
,
1484 binder_size_t last_min_offset
)
1487 /* Nothing to fix up in */
1491 while (last_obj
!= buffer
) {
1493 * Safe to retrieve the parent of last_obj, since it
1494 * was already previously verified by the driver.
1496 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1498 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
1499 last_obj
= (struct binder_buffer_object
*)
1500 (b
->data
+ *(objects_start
+ last_obj
->parent
));
1502 return (fixup_offset
>= last_min_offset
);
1505 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
1506 struct binder_buffer
*buffer
,
1507 binder_size_t
*failed_at
)
1509 binder_size_t
*offp
, *off_start
, *off_end
;
1510 int debug_id
= buffer
->debug_id
;
1512 binder_debug(BINDER_DEBUG_TRANSACTION
,
1513 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
1514 proc
->pid
, buffer
->debug_id
,
1515 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
1517 if (buffer
->target_node
)
1518 binder_dec_node(buffer
->target_node
, 1, 0);
1520 off_start
= (binder_size_t
*)(buffer
->data
+
1521 ALIGN(buffer
->data_size
, sizeof(void *)));
1523 off_end
= failed_at
;
1525 off_end
= (void *)off_start
+ buffer
->offsets_size
;
1526 for (offp
= off_start
; offp
< off_end
; offp
++) {
1527 struct binder_object_header
*hdr
;
1528 size_t object_size
= binder_validate_object(buffer
, *offp
);
1530 if (object_size
== 0) {
1531 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1532 debug_id
, (u64
)*offp
, buffer
->data_size
);
1535 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
1536 switch (hdr
->type
) {
1537 case BINDER_TYPE_BINDER
:
1538 case BINDER_TYPE_WEAK_BINDER
: {
1539 struct flat_binder_object
*fp
;
1540 struct binder_node
*node
;
1542 fp
= to_flat_binder_object(hdr
);
1543 node
= binder_get_node(proc
, fp
->binder
);
1545 pr_err("transaction release %d bad node %016llx\n",
1546 debug_id
, (u64
)fp
->binder
);
1549 binder_debug(BINDER_DEBUG_TRANSACTION
,
1550 " node %d u%016llx\n",
1551 node
->debug_id
, (u64
)node
->ptr
);
1552 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
1555 case BINDER_TYPE_HANDLE
:
1556 case BINDER_TYPE_WEAK_HANDLE
: {
1557 struct flat_binder_object
*fp
;
1558 struct binder_ref
*ref
;
1560 fp
= to_flat_binder_object(hdr
);
1561 ref
= binder_get_ref(proc
, fp
->handle
,
1562 hdr
->type
== BINDER_TYPE_HANDLE
);
1564 pr_err("transaction release %d bad handle %d\n",
1565 debug_id
, fp
->handle
);
1568 binder_debug(BINDER_DEBUG_TRANSACTION
,
1569 " ref %d desc %d (node %d)\n",
1570 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
);
1571 binder_dec_ref(&ref
, hdr
->type
== BINDER_TYPE_HANDLE
);
1574 case BINDER_TYPE_FD
: {
1575 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
1577 binder_debug(BINDER_DEBUG_TRANSACTION
,
1578 " fd %d\n", fp
->fd
);
1580 task_close_fd(proc
, fp
->fd
);
1582 case BINDER_TYPE_PTR
:
1584 * Nothing to do here, this will get cleaned up when the
1585 * transaction buffer gets freed
1588 case BINDER_TYPE_FDA
: {
1589 struct binder_fd_array_object
*fda
;
1590 struct binder_buffer_object
*parent
;
1591 uintptr_t parent_buffer
;
1594 binder_size_t fd_buf_size
;
1596 fda
= to_binder_fd_array_object(hdr
);
1597 parent
= binder_validate_ptr(buffer
, fda
->parent
,
1601 pr_err("transaction release %d bad parent offset",
1606 * Since the parent was already fixed up, convert it
1607 * back to kernel address space to access it
1609 parent_buffer
= parent
->buffer
-
1610 proc
->user_buffer_offset
;
1612 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1613 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1614 pr_err("transaction release %d invalid number of fds (%lld)\n",
1615 debug_id
, (u64
)fda
->num_fds
);
1618 if (fd_buf_size
> parent
->length
||
1619 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1620 /* No space for all file descriptors here. */
1621 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1622 debug_id
, (u64
)fda
->num_fds
);
1625 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
1626 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
1627 task_close_fd(proc
, fd_array
[fd_index
]);
1630 pr_err("transaction release %d bad object type %x\n",
1631 debug_id
, hdr
->type
);
1637 static int binder_translate_binder(struct flat_binder_object
*fp
,
1638 struct binder_transaction
*t
,
1639 struct binder_thread
*thread
)
1641 struct binder_node
*node
;
1642 struct binder_ref
*ref
;
1643 struct binder_proc
*proc
= thread
->proc
;
1644 struct binder_proc
*target_proc
= t
->to_proc
;
1646 node
= binder_get_node(proc
, fp
->binder
);
1648 node
= binder_new_node(proc
, fp
->binder
, fp
->cookie
);
1652 node
->min_priority
= fp
->flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1653 node
->accept_fds
= !!(fp
->flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1655 if (fp
->cookie
!= node
->cookie
) {
1656 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1657 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
1658 node
->debug_id
, (u64
)fp
->cookie
,
1662 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
))
1665 ref
= binder_get_ref_for_node(target_proc
, node
);
1669 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
1670 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
1672 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
1674 fp
->handle
= ref
->desc
;
1676 binder_inc_ref(ref
, fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &thread
->todo
);
1678 trace_binder_transaction_node_to_ref(t
, node
, ref
);
1679 binder_debug(BINDER_DEBUG_TRANSACTION
,
1680 " node %d u%016llx -> ref %d desc %d\n",
1681 node
->debug_id
, (u64
)node
->ptr
,
1682 ref
->debug_id
, ref
->desc
);
1687 static int binder_translate_handle(struct flat_binder_object
*fp
,
1688 struct binder_transaction
*t
,
1689 struct binder_thread
*thread
)
1691 struct binder_ref
*ref
;
1692 struct binder_proc
*proc
= thread
->proc
;
1693 struct binder_proc
*target_proc
= t
->to_proc
;
1695 ref
= binder_get_ref(proc
, fp
->handle
,
1696 fp
->hdr
.type
== BINDER_TYPE_HANDLE
);
1698 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1699 proc
->pid
, thread
->pid
, fp
->handle
);
1702 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
))
1705 if (ref
->node
->proc
== target_proc
) {
1706 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
1707 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
1709 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
1710 fp
->binder
= ref
->node
->ptr
;
1711 fp
->cookie
= ref
->node
->cookie
;
1712 binder_inc_node(ref
->node
, fp
->hdr
.type
== BINDER_TYPE_BINDER
,
1714 trace_binder_transaction_ref_to_node(t
, ref
);
1715 binder_debug(BINDER_DEBUG_TRANSACTION
,
1716 " ref %d desc %d -> node %d u%016llx\n",
1717 ref
->debug_id
, ref
->desc
, ref
->node
->debug_id
,
1718 (u64
)ref
->node
->ptr
);
1720 struct binder_ref
*new_ref
;
1722 new_ref
= binder_get_ref_for_node(target_proc
, ref
->node
);
1727 fp
->handle
= new_ref
->desc
;
1729 binder_inc_ref(new_ref
, fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
1731 trace_binder_transaction_ref_to_ref(t
, ref
, new_ref
);
1732 binder_debug(BINDER_DEBUG_TRANSACTION
,
1733 " ref %d desc %d -> ref %d desc %d (node %d)\n",
1734 ref
->debug_id
, ref
->desc
, new_ref
->debug_id
,
1735 new_ref
->desc
, ref
->node
->debug_id
);
1740 static int binder_translate_fd(int fd
,
1741 struct binder_transaction
*t
,
1742 struct binder_thread
*thread
,
1743 struct binder_transaction
*in_reply_to
)
1745 struct binder_proc
*proc
= thread
->proc
;
1746 struct binder_proc
*target_proc
= t
->to_proc
;
1750 bool target_allows_fd
;
1753 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
1755 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
1756 if (!target_allows_fd
) {
1757 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1758 proc
->pid
, thread
->pid
,
1759 in_reply_to
? "reply" : "transaction",
1762 goto err_fd_not_accepted
;
1767 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1768 proc
->pid
, thread
->pid
, fd
);
1772 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
1778 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
1779 if (target_fd
< 0) {
1781 goto err_get_unused_fd
;
1783 task_fd_install(target_proc
, target_fd
, file
);
1784 trace_binder_transaction_fd(t
, fd
, target_fd
);
1785 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
1794 err_fd_not_accepted
:
1798 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
1799 struct binder_buffer_object
*parent
,
1800 struct binder_transaction
*t
,
1801 struct binder_thread
*thread
,
1802 struct binder_transaction
*in_reply_to
)
1804 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
1806 uintptr_t parent_buffer
;
1808 struct binder_proc
*proc
= thread
->proc
;
1809 struct binder_proc
*target_proc
= t
->to_proc
;
1811 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1812 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1813 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1814 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
1817 if (fd_buf_size
> parent
->length
||
1818 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1819 /* No space for all file descriptors here. */
1820 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1821 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
1825 * Since the parent was already fixed up, convert it
1826 * back to the kernel address space to access it
1828 parent_buffer
= parent
->buffer
- target_proc
->user_buffer_offset
;
1829 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
1830 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
1831 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1832 proc
->pid
, thread
->pid
);
1835 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
1836 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
1839 goto err_translate_fd_failed
;
1840 fd_array
[fdi
] = target_fd
;
1844 err_translate_fd_failed
:
1846 * Failed to allocate fd or security error, free fds
1849 num_installed_fds
= fdi
;
1850 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
1851 task_close_fd(target_proc
, fd_array
[fdi
]);
1855 static int binder_fixup_parent(struct binder_transaction
*t
,
1856 struct binder_thread
*thread
,
1857 struct binder_buffer_object
*bp
,
1858 binder_size_t
*off_start
,
1859 binder_size_t num_valid
,
1860 struct binder_buffer_object
*last_fixup_obj
,
1861 binder_size_t last_fixup_min_off
)
1863 struct binder_buffer_object
*parent
;
1865 struct binder_buffer
*b
= t
->buffer
;
1866 struct binder_proc
*proc
= thread
->proc
;
1867 struct binder_proc
*target_proc
= t
->to_proc
;
1869 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
1872 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
1874 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1875 proc
->pid
, thread
->pid
);
1879 if (!binder_validate_fixup(b
, off_start
,
1880 parent
, bp
->parent_offset
,
1882 last_fixup_min_off
)) {
1883 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1884 proc
->pid
, thread
->pid
);
1888 if (parent
->length
< sizeof(binder_uintptr_t
) ||
1889 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
1890 /* No space for a pointer here! */
1891 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1892 proc
->pid
, thread
->pid
);
1895 parent_buffer
= (u8
*)(parent
->buffer
-
1896 target_proc
->user_buffer_offset
);
1897 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
1902 static void binder_transaction(struct binder_proc
*proc
,
1903 struct binder_thread
*thread
,
1904 struct binder_transaction_data
*tr
, int reply
,
1905 binder_size_t extra_buffers_size
)
1908 struct binder_transaction
*t
;
1909 struct binder_work
*tcomplete
;
1910 binder_size_t
*offp
, *off_end
, *off_start
;
1911 binder_size_t off_min
;
1912 u8
*sg_bufp
, *sg_buf_end
;
1913 struct binder_proc
*target_proc
;
1914 struct binder_thread
*target_thread
= NULL
;
1915 struct binder_node
*target_node
= NULL
;
1916 struct list_head
*target_list
;
1917 wait_queue_head_t
*target_wait
;
1918 struct binder_transaction
*in_reply_to
= NULL
;
1919 struct binder_transaction_log_entry
*e
;
1920 uint32_t return_error
;
1921 struct binder_buffer_object
*last_fixup_obj
= NULL
;
1922 binder_size_t last_fixup_min_off
= 0;
1923 struct binder_context
*context
= proc
->context
;
1925 e
= binder_transaction_log_add(&binder_transaction_log
);
1926 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
1927 e
->from_proc
= proc
->pid
;
1928 e
->from_thread
= thread
->pid
;
1929 e
->target_handle
= tr
->target
.handle
;
1930 e
->data_size
= tr
->data_size
;
1931 e
->offsets_size
= tr
->offsets_size
;
1932 e
->context_name
= proc
->context
->name
;
1935 in_reply_to
= thread
->transaction_stack
;
1936 if (in_reply_to
== NULL
) {
1937 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
1938 proc
->pid
, thread
->pid
);
1939 return_error
= BR_FAILED_REPLY
;
1940 goto err_empty_call_stack
;
1942 binder_set_nice(in_reply_to
->saved_priority
);
1943 if (in_reply_to
->to_thread
!= thread
) {
1944 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
1945 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
1946 in_reply_to
->to_proc
?
1947 in_reply_to
->to_proc
->pid
: 0,
1948 in_reply_to
->to_thread
?
1949 in_reply_to
->to_thread
->pid
: 0);
1950 return_error
= BR_FAILED_REPLY
;
1952 goto err_bad_call_stack
;
1954 thread
->transaction_stack
= in_reply_to
->to_parent
;
1955 target_thread
= in_reply_to
->from
;
1956 if (target_thread
== NULL
) {
1957 return_error
= BR_DEAD_REPLY
;
1958 goto err_dead_binder
;
1960 if (target_thread
->transaction_stack
!= in_reply_to
) {
1961 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
1962 proc
->pid
, thread
->pid
,
1963 target_thread
->transaction_stack
?
1964 target_thread
->transaction_stack
->debug_id
: 0,
1965 in_reply_to
->debug_id
);
1966 return_error
= BR_FAILED_REPLY
;
1968 target_thread
= NULL
;
1969 goto err_dead_binder
;
1971 target_proc
= target_thread
->proc
;
1973 if (tr
->target
.handle
) {
1974 struct binder_ref
*ref
;
1976 ref
= binder_get_ref(proc
, tr
->target
.handle
, true);
1978 binder_user_error("%d:%d got transaction to invalid handle\n",
1979 proc
->pid
, thread
->pid
);
1980 return_error
= BR_FAILED_REPLY
;
1981 goto err_invalid_target_handle
;
1983 target_node
= ref
->node
;
1985 target_node
= context
->binder_context_mgr_node
;
1986 if (target_node
== NULL
) {
1987 return_error
= BR_DEAD_REPLY
;
1988 goto err_no_context_mgr_node
;
1991 e
->to_node
= target_node
->debug_id
;
1992 target_proc
= target_node
->proc
;
1993 if (target_proc
== NULL
) {
1994 return_error
= BR_DEAD_REPLY
;
1995 goto err_dead_binder
;
1997 if (security_binder_transaction(proc
->tsk
, target_proc
->tsk
) < 0) {
1998 return_error
= BR_FAILED_REPLY
;
1999 goto err_invalid_target_handle
;
2001 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2002 struct binder_transaction
*tmp
;
2004 tmp
= thread
->transaction_stack
;
2005 if (tmp
->to_thread
!= thread
) {
2006 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2007 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2008 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2010 tmp
->to_thread
->pid
: 0);
2011 return_error
= BR_FAILED_REPLY
;
2012 goto err_bad_call_stack
;
2015 if (tmp
->from
&& tmp
->from
->proc
== target_proc
)
2016 target_thread
= tmp
->from
;
2017 tmp
= tmp
->from_parent
;
2021 if (target_thread
) {
2022 e
->to_thread
= target_thread
->pid
;
2023 target_list
= &target_thread
->todo
;
2024 target_wait
= &target_thread
->wait
;
2026 target_list
= &target_proc
->todo
;
2027 target_wait
= &target_proc
->wait
;
2029 e
->to_proc
= target_proc
->pid
;
2031 /* TODO: reuse incoming transaction for reply */
2032 t
= kzalloc_preempt_disabled(sizeof(*t
));
2034 return_error
= BR_FAILED_REPLY
;
2035 goto err_alloc_t_failed
;
2037 binder_stats_created(BINDER_STAT_TRANSACTION
);
2039 tcomplete
= kzalloc_preempt_disabled(sizeof(*tcomplete
));
2040 if (tcomplete
== NULL
) {
2041 return_error
= BR_FAILED_REPLY
;
2042 goto err_alloc_tcomplete_failed
;
2044 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2046 t
->debug_id
= ++binder_last_id
;
2047 e
->debug_id
= t
->debug_id
;
2050 binder_debug(BINDER_DEBUG_TRANSACTION
,
2051 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2052 proc
->pid
, thread
->pid
, t
->debug_id
,
2053 target_proc
->pid
, target_thread
->pid
,
2054 (u64
)tr
->data
.ptr
.buffer
,
2055 (u64
)tr
->data
.ptr
.offsets
,
2056 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2057 (u64
)extra_buffers_size
);
2059 binder_debug(BINDER_DEBUG_TRANSACTION
,
2060 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2061 proc
->pid
, thread
->pid
, t
->debug_id
,
2062 target_proc
->pid
, target_node
->debug_id
,
2063 (u64
)tr
->data
.ptr
.buffer
,
2064 (u64
)tr
->data
.ptr
.offsets
,
2065 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2066 (u64
)extra_buffers_size
);
2068 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2072 t
->sender_euid
= task_euid(proc
->tsk
);
2073 t
->to_proc
= target_proc
;
2074 t
->to_thread
= target_thread
;
2076 t
->flags
= tr
->flags
;
2077 t
->priority
= task_nice(current
);
2079 trace_binder_transaction(reply
, t
, target_node
);
2081 t
->buffer
= binder_alloc_buf(target_proc
, tr
->data_size
,
2082 tr
->offsets_size
, extra_buffers_size
,
2083 !reply
&& (t
->flags
& TF_ONE_WAY
));
2084 if (t
->buffer
== NULL
) {
2085 return_error
= BR_FAILED_REPLY
;
2086 goto err_binder_alloc_buf_failed
;
2088 t
->buffer
->allow_user_free
= 0;
2089 t
->buffer
->debug_id
= t
->debug_id
;
2090 t
->buffer
->transaction
= t
;
2091 t
->buffer
->target_node
= target_node
;
2092 trace_binder_transaction_alloc_buf(t
->buffer
);
2094 binder_inc_node(target_node
, 1, 0, NULL
);
2096 off_start
= (binder_size_t
*)(t
->buffer
->data
+
2097 ALIGN(tr
->data_size
, sizeof(void *)));
2100 if (copy_from_user_preempt_disabled(t
->buffer
->data
, (const void __user
*)(uintptr_t)
2101 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
2102 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2103 proc
->pid
, thread
->pid
);
2104 return_error
= BR_FAILED_REPLY
;
2105 goto err_copy_data_failed
;
2107 if (copy_from_user_preempt_disabled(offp
, (const void __user
*)(uintptr_t)
2108 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
2109 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2110 proc
->pid
, thread
->pid
);
2111 return_error
= BR_FAILED_REPLY
;
2112 goto err_copy_data_failed
;
2114 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2115 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2116 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2117 return_error
= BR_FAILED_REPLY
;
2118 goto err_bad_offset
;
2120 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
2121 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2122 proc
->pid
, thread
->pid
,
2123 extra_buffers_size
);
2124 return_error
= BR_FAILED_REPLY
;
2125 goto err_bad_offset
;
2127 off_end
= (void *)off_start
+ tr
->offsets_size
;
2128 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
2129 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
2131 for (; offp
< off_end
; offp
++) {
2132 struct binder_object_header
*hdr
;
2133 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
2135 if (object_size
== 0 || *offp
< off_min
) {
2136 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2137 proc
->pid
, thread
->pid
, (u64
)*offp
,
2139 (u64
)t
->buffer
->data_size
);
2140 return_error
= BR_FAILED_REPLY
;
2141 goto err_bad_offset
;
2144 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
2145 off_min
= *offp
+ object_size
;
2146 switch (hdr
->type
) {
2147 case BINDER_TYPE_BINDER
:
2148 case BINDER_TYPE_WEAK_BINDER
: {
2149 struct flat_binder_object
*fp
;
2151 fp
= to_flat_binder_object(hdr
);
2152 ret
= binder_translate_binder(fp
, t
, thread
);
2154 return_error
= BR_FAILED_REPLY
;
2155 goto err_translate_failed
;
2158 case BINDER_TYPE_HANDLE
:
2159 case BINDER_TYPE_WEAK_HANDLE
: {
2160 struct flat_binder_object
*fp
;
2162 fp
= to_flat_binder_object(hdr
);
2163 ret
= binder_translate_handle(fp
, t
, thread
);
2165 return_error
= BR_FAILED_REPLY
;
2166 goto err_translate_failed
;
2170 case BINDER_TYPE_FD
: {
2171 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2172 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
2175 if (target_fd
< 0) {
2176 return_error
= BR_FAILED_REPLY
;
2177 goto err_translate_failed
;
2182 case BINDER_TYPE_FDA
: {
2183 struct binder_fd_array_object
*fda
=
2184 to_binder_fd_array_object(hdr
);
2185 struct binder_buffer_object
*parent
=
2186 binder_validate_ptr(t
->buffer
, fda
->parent
,
2190 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2191 proc
->pid
, thread
->pid
);
2192 return_error
= BR_FAILED_REPLY
;
2193 goto err_bad_parent
;
2195 if (!binder_validate_fixup(t
->buffer
, off_start
,
2196 parent
, fda
->parent_offset
,
2198 last_fixup_min_off
)) {
2199 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2200 proc
->pid
, thread
->pid
);
2201 return_error
= BR_FAILED_REPLY
;
2202 goto err_bad_parent
;
2204 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
2207 return_error
= BR_FAILED_REPLY
;
2208 goto err_translate_failed
;
2210 last_fixup_obj
= parent
;
2211 last_fixup_min_off
=
2212 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
2214 case BINDER_TYPE_PTR
: {
2215 struct binder_buffer_object
*bp
=
2216 to_binder_buffer_object(hdr
);
2217 size_t buf_left
= sg_buf_end
- sg_bufp
;
2219 if (bp
->length
> buf_left
) {
2220 binder_user_error("%d:%d got transaction with too large buffer\n",
2221 proc
->pid
, thread
->pid
);
2222 return_error
= BR_FAILED_REPLY
;
2223 goto err_bad_offset
;
2225 if (copy_from_user_preempt_disabled(
2227 (const void __user
*)(uintptr_t)
2228 bp
->buffer
, bp
->length
)) {
2229 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2230 proc
->pid
, thread
->pid
);
2231 return_error
= BR_FAILED_REPLY
;
2232 goto err_copy_data_failed
;
2234 /* Fixup buffer pointer to target proc address space */
2235 bp
->buffer
= (uintptr_t)sg_bufp
+
2236 target_proc
->user_buffer_offset
;
2237 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
2239 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
2242 last_fixup_min_off
);
2244 return_error
= BR_FAILED_REPLY
;
2245 goto err_translate_failed
;
2247 last_fixup_obj
= bp
;
2248 last_fixup_min_off
= 0;
2251 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2252 proc
->pid
, thread
->pid
, hdr
->type
);
2253 return_error
= BR_FAILED_REPLY
;
2254 goto err_bad_object_type
;
2258 BUG_ON(t
->buffer
->async_transaction
!= 0);
2259 binder_pop_transaction(target_thread
, in_reply_to
);
2260 } else if (!(t
->flags
& TF_ONE_WAY
)) {
2261 BUG_ON(t
->buffer
->async_transaction
!= 0);
2263 t
->from_parent
= thread
->transaction_stack
;
2264 thread
->transaction_stack
= t
;
2266 BUG_ON(target_node
== NULL
);
2267 BUG_ON(t
->buffer
->async_transaction
!= 1);
2268 if (target_node
->has_async_transaction
) {
2269 target_list
= &target_node
->async_todo
;
2272 target_node
->has_async_transaction
= 1;
2274 t
->work
.type
= BINDER_WORK_TRANSACTION
;
2275 list_add_tail(&t
->work
.entry
, target_list
);
2276 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
2277 list_add_tail(&tcomplete
->entry
, &thread
->todo
);
2279 if (reply
|| !(t
->flags
& TF_ONE_WAY
)) {
2280 wake_up_interruptible_sync(target_wait
);
2282 wake_up_interruptible(target_wait
);
2287 err_translate_failed
:
2288 err_bad_object_type
:
2291 err_copy_data_failed
:
2292 trace_binder_transaction_failed_buffer_release(t
->buffer
);
2293 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
2294 t
->buffer
->transaction
= NULL
;
2295 binder_free_buf(target_proc
, t
->buffer
);
2296 err_binder_alloc_buf_failed
:
2298 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2299 err_alloc_tcomplete_failed
:
2301 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2304 err_empty_call_stack
:
2306 err_invalid_target_handle
:
2307 err_no_context_mgr_node
:
2308 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2309 "%d:%d transaction failed %d, size %lld-%lld\n",
2310 proc
->pid
, thread
->pid
, return_error
,
2311 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
);
2314 struct binder_transaction_log_entry
*fe
;
2316 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
2320 BUG_ON(thread
->return_error
!= BR_OK
);
2322 thread
->return_error
= BR_TRANSACTION_COMPLETE
;
2323 binder_send_failed_reply(in_reply_to
, return_error
);
2325 thread
->return_error
= return_error
;
2328 int binder_thread_write(struct binder_proc
*proc
,
2329 struct binder_thread
*thread
,
2330 binder_uintptr_t binder_buffer
, size_t size
,
2331 binder_size_t
*consumed
)
2334 struct binder_context
*context
= proc
->context
;
2335 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
2336 void __user
*ptr
= buffer
+ *consumed
;
2337 void __user
*end
= buffer
+ size
;
2339 while (ptr
< end
&& thread
->return_error
== BR_OK
) {
2340 if (get_user_preempt_disabled(cmd
, (uint32_t __user
*)ptr
))
2342 ptr
+= sizeof(uint32_t);
2343 trace_binder_command(cmd
);
2344 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
2345 binder_stats
.bc
[_IOC_NR(cmd
)]++;
2346 proc
->stats
.bc
[_IOC_NR(cmd
)]++;
2347 thread
->stats
.bc
[_IOC_NR(cmd
)]++;
2355 struct binder_ref
*ref
;
2356 const char *debug_string
;
2358 if (get_user_preempt_disabled(target
, (uint32_t __user
*)ptr
))
2360 ptr
+= sizeof(uint32_t);
2361 if (target
== 0 && context
->binder_context_mgr_node
&&
2362 (cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
)) {
2363 ref
= binder_get_ref_for_node(proc
,
2364 context
->binder_context_mgr_node
);
2365 if (ref
->desc
!= target
) {
2366 binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
2367 proc
->pid
, thread
->pid
,
2371 ref
= binder_get_ref(proc
, target
,
2372 cmd
== BC_ACQUIRE
||
2375 binder_user_error("%d:%d refcount change on invalid ref %d\n",
2376 proc
->pid
, thread
->pid
, target
);
2381 debug_string
= "IncRefs";
2382 binder_inc_ref(ref
, 0, NULL
);
2385 debug_string
= "Acquire";
2386 binder_inc_ref(ref
, 1, NULL
);
2389 debug_string
= "Release";
2390 binder_dec_ref(&ref
, 1);
2394 debug_string
= "DecRefs";
2395 binder_dec_ref(&ref
, 0);
2399 binder_debug(BINDER_DEBUG_USER_REFS
,
2400 "binder: %d:%d %s ref deleted",
2401 proc
->pid
, thread
->pid
, debug_string
);
2403 binder_debug(BINDER_DEBUG_USER_REFS
,
2404 "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
2405 proc
->pid
, thread
->pid
, debug_string
,
2406 ref
->debug_id
, ref
->desc
, ref
->strong
,
2407 ref
->weak
, ref
->node
->debug_id
);
2411 case BC_INCREFS_DONE
:
2412 case BC_ACQUIRE_DONE
: {
2413 binder_uintptr_t node_ptr
;
2414 binder_uintptr_t cookie
;
2415 struct binder_node
*node
;
2417 if (get_user_preempt_disabled(node_ptr
, (binder_uintptr_t __user
*)ptr
))
2419 ptr
+= sizeof(binder_uintptr_t
);
2420 if (get_user_preempt_disabled(cookie
, (binder_uintptr_t __user
*)ptr
))
2422 ptr
+= sizeof(binder_uintptr_t
);
2423 node
= binder_get_node(proc
, node_ptr
);
2425 binder_user_error("%d:%d %s u%016llx no match\n",
2426 proc
->pid
, thread
->pid
,
2427 cmd
== BC_INCREFS_DONE
?
2433 if (cookie
!= node
->cookie
) {
2434 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2435 proc
->pid
, thread
->pid
,
2436 cmd
== BC_INCREFS_DONE
?
2437 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2438 (u64
)node_ptr
, node
->debug_id
,
2439 (u64
)cookie
, (u64
)node
->cookie
);
2442 if (cmd
== BC_ACQUIRE_DONE
) {
2443 if (node
->pending_strong_ref
== 0) {
2444 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2445 proc
->pid
, thread
->pid
,
2449 node
->pending_strong_ref
= 0;
2451 if (node
->pending_weak_ref
== 0) {
2452 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2453 proc
->pid
, thread
->pid
,
2457 node
->pending_weak_ref
= 0;
2459 binder_dec_node(node
, cmd
== BC_ACQUIRE_DONE
, 0);
2460 binder_debug(BINDER_DEBUG_USER_REFS
,
2461 "%d:%d %s node %d ls %d lw %d\n",
2462 proc
->pid
, thread
->pid
,
2463 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2464 node
->debug_id
, node
->local_strong_refs
, node
->local_weak_refs
);
2467 case BC_ATTEMPT_ACQUIRE
:
2468 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2470 case BC_ACQUIRE_RESULT
:
2471 pr_err("BC_ACQUIRE_RESULT not supported\n");
2474 case BC_FREE_BUFFER
: {
2475 binder_uintptr_t data_ptr
;
2476 struct binder_buffer
*buffer
;
2478 if (get_user_preempt_disabled(data_ptr
, (binder_uintptr_t __user
*)ptr
))
2480 ptr
+= sizeof(binder_uintptr_t
);
2482 buffer
= binder_buffer_lookup(proc
, data_ptr
);
2483 if (buffer
== NULL
) {
2484 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2485 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
2488 if (!buffer
->allow_user_free
) {
2489 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2490 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
2493 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
2494 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2495 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
2497 buffer
->transaction
? "active" : "finished");
2499 if (buffer
->transaction
) {
2500 buffer
->transaction
->buffer
= NULL
;
2501 buffer
->transaction
= NULL
;
2503 if (buffer
->async_transaction
&& buffer
->target_node
) {
2504 BUG_ON(!buffer
->target_node
->has_async_transaction
);
2505 if (list_empty(&buffer
->target_node
->async_todo
))
2506 buffer
->target_node
->has_async_transaction
= 0;
2508 list_move_tail(buffer
->target_node
->async_todo
.next
, &thread
->todo
);
2510 trace_binder_transaction_buffer_release(buffer
);
2511 binder_transaction_buffer_release(proc
, buffer
, NULL
);
2512 binder_free_buf(proc
, buffer
);
2516 case BC_TRANSACTION_SG
:
2518 struct binder_transaction_data_sg tr
;
2520 if (copy_from_user_preempt_disabled(&tr
, ptr
,
2524 binder_transaction(proc
, thread
, &tr
.transaction_data
,
2525 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
2528 case BC_TRANSACTION
:
2530 struct binder_transaction_data tr
;
2532 if (copy_from_user_preempt_disabled(&tr
, ptr
, sizeof(tr
)))
2535 binder_transaction(proc
, thread
, &tr
,
2536 cmd
== BC_REPLY
, 0);
2540 case BC_REGISTER_LOOPER
:
2541 binder_debug(BINDER_DEBUG_THREADS
,
2542 "%d:%d BC_REGISTER_LOOPER\n",
2543 proc
->pid
, thread
->pid
);
2544 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
2545 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2546 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
2547 proc
->pid
, thread
->pid
);
2548 } else if (proc
->requested_threads
== 0) {
2549 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2550 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
2551 proc
->pid
, thread
->pid
);
2553 proc
->requested_threads
--;
2554 proc
->requested_threads_started
++;
2556 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
2558 case BC_ENTER_LOOPER
:
2559 binder_debug(BINDER_DEBUG_THREADS
,
2560 "%d:%d BC_ENTER_LOOPER\n",
2561 proc
->pid
, thread
->pid
);
2562 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
2563 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
2564 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
2565 proc
->pid
, thread
->pid
);
2567 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
2569 case BC_EXIT_LOOPER
:
2570 binder_debug(BINDER_DEBUG_THREADS
,
2571 "%d:%d BC_EXIT_LOOPER\n",
2572 proc
->pid
, thread
->pid
);
2573 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
2576 case BC_REQUEST_DEATH_NOTIFICATION
:
2577 case BC_CLEAR_DEATH_NOTIFICATION
: {
2579 binder_uintptr_t cookie
;
2580 struct binder_ref
*ref
;
2581 struct binder_ref_death
*death
;
2583 if (get_user_preempt_disabled(target
, (uint32_t __user
*)ptr
))
2585 ptr
+= sizeof(uint32_t);
2586 if (get_user_preempt_disabled(cookie
, (binder_uintptr_t __user
*)ptr
))
2588 ptr
+= sizeof(binder_uintptr_t
);
2589 ref
= binder_get_ref(proc
, target
, false);
2591 binder_user_error("%d:%d %s invalid ref %d\n",
2592 proc
->pid
, thread
->pid
,
2593 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
2594 "BC_REQUEST_DEATH_NOTIFICATION" :
2595 "BC_CLEAR_DEATH_NOTIFICATION",
2600 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
2601 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
2602 proc
->pid
, thread
->pid
,
2603 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
2604 "BC_REQUEST_DEATH_NOTIFICATION" :
2605 "BC_CLEAR_DEATH_NOTIFICATION",
2606 (u64
)cookie
, ref
->debug_id
, ref
->desc
,
2607 ref
->strong
, ref
->weak
, ref
->node
->debug_id
);
2609 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
2611 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
2612 proc
->pid
, thread
->pid
);
2615 death
= kzalloc_preempt_disabled(sizeof(*death
));
2616 if (death
== NULL
) {
2617 thread
->return_error
= BR_ERROR
;
2618 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2619 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
2620 proc
->pid
, thread
->pid
);
2623 binder_stats_created(BINDER_STAT_DEATH
);
2624 INIT_LIST_HEAD(&death
->work
.entry
);
2625 death
->cookie
= cookie
;
2627 if (ref
->node
->proc
== NULL
) {
2628 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
2629 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2630 list_add_tail(&ref
->death
->work
.entry
, &thread
->todo
);
2632 list_add_tail(&ref
->death
->work
.entry
, &proc
->todo
);
2633 wake_up_interruptible(&proc
->wait
);
2637 if (ref
->death
== NULL
) {
2638 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
2639 proc
->pid
, thread
->pid
);
2643 if (death
->cookie
!= cookie
) {
2644 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
2645 proc
->pid
, thread
->pid
,
2651 if (list_empty(&death
->work
.entry
)) {
2652 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
2653 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2654 list_add_tail(&death
->work
.entry
, &thread
->todo
);
2656 list_add_tail(&death
->work
.entry
, &proc
->todo
);
2657 wake_up_interruptible(&proc
->wait
);
2660 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
2661 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
2665 case BC_DEAD_BINDER_DONE
: {
2666 struct binder_work
*w
;
2667 binder_uintptr_t cookie
;
2668 struct binder_ref_death
*death
= NULL
;
2670 if (get_user_preempt_disabled(cookie
, (binder_uintptr_t __user
*)ptr
))
2673 ptr
+= sizeof(cookie
);
2674 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
2675 struct binder_ref_death
*tmp_death
= container_of(w
, struct binder_ref_death
, work
);
2677 if (tmp_death
->cookie
== cookie
) {
2682 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2683 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
2684 proc
->pid
, thread
->pid
, (u64
)cookie
, death
);
2685 if (death
== NULL
) {
2686 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2687 proc
->pid
, thread
->pid
, (u64
)cookie
);
2691 list_del_init(&death
->work
.entry
);
2692 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
2693 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
2694 if (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
| BINDER_LOOPER_STATE_ENTERED
)) {
2695 list_add_tail(&death
->work
.entry
, &thread
->todo
);
2697 list_add_tail(&death
->work
.entry
, &proc
->todo
);
2698 wake_up_interruptible(&proc
->wait
);
2704 pr_err("%d:%d unknown command %d\n",
2705 proc
->pid
, thread
->pid
, cmd
);
2708 *consumed
= ptr
- buffer
;
2713 static void binder_stat_br(struct binder_proc
*proc
,
2714 struct binder_thread
*thread
, uint32_t cmd
)
2716 trace_binder_return(cmd
);
2717 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
2718 binder_stats
.br
[_IOC_NR(cmd
)]++;
2719 proc
->stats
.br
[_IOC_NR(cmd
)]++;
2720 thread
->stats
.br
[_IOC_NR(cmd
)]++;
2724 static int binder_has_proc_work(struct binder_proc
*proc
,
2725 struct binder_thread
*thread
)
2727 return !list_empty(&proc
->todo
) ||
2728 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
2731 static int binder_has_thread_work(struct binder_thread
*thread
)
2733 return !list_empty(&thread
->todo
) || thread
->return_error
!= BR_OK
||
2734 (thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
);
2737 static int binder_thread_read(struct binder_proc
*proc
,
2738 struct binder_thread
*thread
,
2739 binder_uintptr_t binder_buffer
, size_t size
,
2740 binder_size_t
*consumed
, int non_block
)
2742 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
2743 void __user
*ptr
= buffer
+ *consumed
;
2744 void __user
*end
= buffer
+ size
;
2747 int wait_for_proc_work
;
2749 if (*consumed
== 0) {
2750 if (put_user_preempt_disabled(BR_NOOP
, (uint32_t __user
*)ptr
))
2752 ptr
+= sizeof(uint32_t);
2756 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
2757 list_empty(&thread
->todo
);
2759 if (thread
->return_error
!= BR_OK
&& ptr
< end
) {
2760 if (thread
->return_error2
!= BR_OK
) {
2761 if (put_user_preempt_disabled(thread
->return_error2
, (uint32_t __user
*)ptr
))
2763 ptr
+= sizeof(uint32_t);
2764 binder_stat_br(proc
, thread
, thread
->return_error2
);
2767 thread
->return_error2
= BR_OK
;
2769 if (put_user_preempt_disabled(thread
->return_error
, (uint32_t __user
*)ptr
))
2771 ptr
+= sizeof(uint32_t);
2772 binder_stat_br(proc
, thread
, thread
->return_error
);
2773 thread
->return_error
= BR_OK
;
2778 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
2779 if (wait_for_proc_work
)
2780 proc
->ready_threads
++;
2782 binder_unlock(__func__
);
2784 trace_binder_wait_for_work(wait_for_proc_work
,
2785 !!thread
->transaction_stack
,
2786 !list_empty(&thread
->todo
));
2787 if (wait_for_proc_work
) {
2788 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
2789 BINDER_LOOPER_STATE_ENTERED
))) {
2790 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
2791 proc
->pid
, thread
->pid
, thread
->looper
);
2792 wait_event_interruptible(binder_user_error_wait
,
2793 binder_stop_on_user_error
< 2);
2795 binder_set_nice(proc
->default_priority
);
2797 if (!binder_has_proc_work(proc
, thread
))
2800 ret
= wait_event_freezable_exclusive(proc
->wait
, binder_has_proc_work(proc
, thread
));
2803 if (!binder_has_thread_work(thread
))
2806 ret
= wait_event_freezable(thread
->wait
, binder_has_thread_work(thread
));
2809 binder_lock(__func__
);
2811 if (wait_for_proc_work
)
2812 proc
->ready_threads
--;
2813 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
2820 struct binder_transaction_data tr
;
2821 struct binder_work
*w
;
2822 struct binder_transaction
*t
= NULL
;
2824 if (!list_empty(&thread
->todo
)) {
2825 w
= list_first_entry(&thread
->todo
, struct binder_work
,
2827 } else if (!list_empty(&proc
->todo
) && wait_for_proc_work
) {
2828 w
= list_first_entry(&proc
->todo
, struct binder_work
,
2832 if (ptr
- buffer
== 4 &&
2833 !(thread
->looper
& BINDER_LOOPER_STATE_NEED_RETURN
))
2838 if (end
- ptr
< sizeof(tr
) + 4)
2842 case BINDER_WORK_TRANSACTION
: {
2843 t
= container_of(w
, struct binder_transaction
, work
);
2845 case BINDER_WORK_TRANSACTION_COMPLETE
: {
2846 cmd
= BR_TRANSACTION_COMPLETE
;
2847 if (put_user_preempt_disabled(cmd
, (uint32_t __user
*)ptr
))
2849 ptr
+= sizeof(uint32_t);
2851 binder_stat_br(proc
, thread
, cmd
);
2852 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
2853 "%d:%d BR_TRANSACTION_COMPLETE\n",
2854 proc
->pid
, thread
->pid
);
2856 list_del(&w
->entry
);
2858 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2860 case BINDER_WORK_NODE
: {
2861 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
2862 uint32_t cmd
= BR_NOOP
;
2863 const char *cmd_name
;
2864 int strong
= node
->internal_strong_refs
|| node
->local_strong_refs
;
2865 int weak
= !hlist_empty(&node
->refs
) || node
->local_weak_refs
|| strong
;
2867 if (weak
&& !node
->has_weak_ref
) {
2869 cmd_name
= "BR_INCREFS";
2870 node
->has_weak_ref
= 1;
2871 node
->pending_weak_ref
= 1;
2872 node
->local_weak_refs
++;
2873 } else if (strong
&& !node
->has_strong_ref
) {
2875 cmd_name
= "BR_ACQUIRE";
2876 node
->has_strong_ref
= 1;
2877 node
->pending_strong_ref
= 1;
2878 node
->local_strong_refs
++;
2879 } else if (!strong
&& node
->has_strong_ref
) {
2881 cmd_name
= "BR_RELEASE";
2882 node
->has_strong_ref
= 0;
2883 } else if (!weak
&& node
->has_weak_ref
) {
2885 cmd_name
= "BR_DECREFS";
2886 node
->has_weak_ref
= 0;
2888 if (cmd
!= BR_NOOP
) {
2889 if (put_user_preempt_disabled(cmd
, (uint32_t __user
*)ptr
))
2891 ptr
+= sizeof(uint32_t);
2892 if (put_user_preempt_disabled(node
->ptr
,
2893 (binder_uintptr_t __user
*)ptr
))
2895 ptr
+= sizeof(binder_uintptr_t
);
2896 if (put_user_preempt_disabled(node
->cookie
,
2897 (binder_uintptr_t __user
*)ptr
))
2899 ptr
+= sizeof(binder_uintptr_t
);
2901 binder_stat_br(proc
, thread
, cmd
);
2902 binder_debug(BINDER_DEBUG_USER_REFS
,
2903 "%d:%d %s %d u%016llx c%016llx\n",
2904 proc
->pid
, thread
->pid
, cmd_name
,
2906 (u64
)node
->ptr
, (u64
)node
->cookie
);
2908 list_del_init(&w
->entry
);
2909 if (!weak
&& !strong
) {
2910 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2911 "%d:%d node %d u%016llx c%016llx deleted\n",
2912 proc
->pid
, thread
->pid
,
2916 rb_erase(&node
->rb_node
, &proc
->nodes
);
2918 binder_stats_deleted(BINDER_STAT_NODE
);
2920 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
2921 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2922 proc
->pid
, thread
->pid
,
2929 case BINDER_WORK_DEAD_BINDER
:
2930 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
2931 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
2932 struct binder_ref_death
*death
;
2935 death
= container_of(w
, struct binder_ref_death
, work
);
2936 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
2937 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
2939 cmd
= BR_DEAD_BINDER
;
2940 if (put_user_preempt_disabled(cmd
, (uint32_t __user
*)ptr
))
2942 ptr
+= sizeof(uint32_t);
2943 if (put_user_preempt_disabled(death
->cookie
,
2944 (binder_uintptr_t __user
*)ptr
))
2946 ptr
+= sizeof(binder_uintptr_t
);
2947 binder_stat_br(proc
, thread
, cmd
);
2948 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
2949 "%d:%d %s %016llx\n",
2950 proc
->pid
, thread
->pid
,
2951 cmd
== BR_DEAD_BINDER
?
2953 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
2954 (u64
)death
->cookie
);
2956 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
2957 list_del(&w
->entry
);
2959 binder_stats_deleted(BINDER_STAT_DEATH
);
2961 list_move(&w
->entry
, &proc
->delivered_death
);
2962 if (cmd
== BR_DEAD_BINDER
)
2963 goto done
; /* DEAD_BINDER notifications can cause transactions */
2970 BUG_ON(t
->buffer
== NULL
);
2971 if (t
->buffer
->target_node
) {
2972 struct binder_node
*target_node
= t
->buffer
->target_node
;
2974 tr
.target
.ptr
= target_node
->ptr
;
2975 tr
.cookie
= target_node
->cookie
;
2976 t
->saved_priority
= task_nice(current
);
2977 if (t
->priority
< target_node
->min_priority
&&
2978 !(t
->flags
& TF_ONE_WAY
))
2979 binder_set_nice(t
->priority
);
2980 else if (!(t
->flags
& TF_ONE_WAY
) ||
2981 t
->saved_priority
> target_node
->min_priority
)
2982 binder_set_nice(target_node
->min_priority
);
2983 cmd
= BR_TRANSACTION
;
2990 tr
.flags
= t
->flags
;
2991 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
2994 struct task_struct
*sender
= t
->from
->proc
->tsk
;
2996 tr
.sender_pid
= task_tgid_nr_ns(sender
,
2997 task_active_pid_ns(current
));
3002 tr
.data_size
= t
->buffer
->data_size
;
3003 tr
.offsets_size
= t
->buffer
->offsets_size
;
3004 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)(
3005 (uintptr_t)t
->buffer
->data
+
3006 proc
->user_buffer_offset
);
3007 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
3008 ALIGN(t
->buffer
->data_size
,
3011 if (put_user_preempt_disabled(cmd
, (uint32_t __user
*)ptr
))
3013 ptr
+= sizeof(uint32_t);
3014 if (copy_to_user_preempt_disabled(ptr
, &tr
, sizeof(tr
)))
3018 trace_binder_transaction_received(t
);
3019 binder_stat_br(proc
, thread
, cmd
);
3020 binder_debug(BINDER_DEBUG_TRANSACTION
,
3021 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3022 proc
->pid
, thread
->pid
,
3023 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
3025 t
->debug_id
, t
->from
? t
->from
->proc
->pid
: 0,
3026 t
->from
? t
->from
->pid
: 0, cmd
,
3027 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3028 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
3030 list_del(&t
->work
.entry
);
3031 t
->buffer
->allow_user_free
= 1;
3032 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
3033 t
->to_parent
= thread
->transaction_stack
;
3034 t
->to_thread
= thread
;
3035 thread
->transaction_stack
= t
;
3037 t
->buffer
->transaction
= NULL
;
3039 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3046 *consumed
= ptr
- buffer
;
3047 if (proc
->requested_threads
+ proc
->ready_threads
== 0 &&
3048 proc
->requested_threads_started
< proc
->max_threads
&&
3049 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3050 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
3051 /*spawn a new thread if we leave this out */) {
3052 proc
->requested_threads
++;
3053 binder_debug(BINDER_DEBUG_THREADS
,
3054 "%d:%d BR_SPAWN_LOOPER\n",
3055 proc
->pid
, thread
->pid
);
3056 if (put_user_preempt_disabled(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
3058 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
3063 static void binder_release_work(struct list_head
*list
)
3065 struct binder_work
*w
;
3067 while (!list_empty(list
)) {
3068 w
= list_first_entry(list
, struct binder_work
, entry
);
3069 list_del_init(&w
->entry
);
3071 case BINDER_WORK_TRANSACTION
: {
3072 struct binder_transaction
*t
;
3074 t
= container_of(w
, struct binder_transaction
, work
);
3075 if (t
->buffer
->target_node
&&
3076 !(t
->flags
& TF_ONE_WAY
)) {
3077 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
3079 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3080 "undelivered transaction %d\n",
3082 t
->buffer
->transaction
= NULL
;
3084 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3087 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3088 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3089 "undelivered TRANSACTION_COMPLETE\n");
3091 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3093 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3094 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3095 struct binder_ref_death
*death
;
3097 death
= container_of(w
, struct binder_ref_death
, work
);
3098 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3099 "undelivered death notification, %016llx\n",
3100 (u64
)death
->cookie
);
3102 binder_stats_deleted(BINDER_STAT_DEATH
);
3105 pr_err("unexpected work type, %d, not freed\n",
3113 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
3115 struct binder_thread
*thread
= NULL
;
3116 struct rb_node
*parent
= NULL
;
3117 struct rb_node
**p
= &proc
->threads
.rb_node
;
3121 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
3123 if (current
->pid
< thread
->pid
)
3125 else if (current
->pid
> thread
->pid
)
3126 p
= &(*p
)->rb_right
;
3131 thread
= kzalloc_preempt_disabled(sizeof(*thread
));
3134 binder_stats_created(BINDER_STAT_THREAD
);
3135 thread
->proc
= proc
;
3136 thread
->pid
= current
->pid
;
3137 init_waitqueue_head(&thread
->wait
);
3138 INIT_LIST_HEAD(&thread
->todo
);
3139 rb_link_node(&thread
->rb_node
, parent
, p
);
3140 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
3141 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
3142 thread
->return_error
= BR_OK
;
3143 thread
->return_error2
= BR_OK
;
3148 static int binder_free_thread(struct binder_proc
*proc
,
3149 struct binder_thread
*thread
)
3151 struct binder_transaction
*t
;
3152 struct binder_transaction
*send_reply
= NULL
;
3153 int active_transactions
= 0;
3155 rb_erase(&thread
->rb_node
, &proc
->threads
);
3156 t
= thread
->transaction_stack
;
3157 if (t
&& t
->to_thread
== thread
)
3160 active_transactions
++;
3161 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3162 "release %d:%d transaction %d %s, still active\n",
3163 proc
->pid
, thread
->pid
,
3165 (t
->to_thread
== thread
) ? "in" : "out");
3167 if (t
->to_thread
== thread
) {
3169 t
->to_thread
= NULL
;
3171 t
->buffer
->transaction
= NULL
;
3175 } else if (t
->from
== thread
) {
3182 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
3183 binder_release_work(&thread
->todo
);
3185 binder_stats_deleted(BINDER_STAT_THREAD
);
3186 return active_transactions
;
3189 static unsigned int binder_poll(struct file
*filp
,
3190 struct poll_table_struct
*wait
)
3192 struct binder_proc
*proc
= filp
->private_data
;
3193 struct binder_thread
*thread
= NULL
;
3194 int wait_for_proc_work
;
3196 binder_lock(__func__
);
3198 thread
= binder_get_thread(proc
);
3200 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3201 list_empty(&thread
->todo
) && thread
->return_error
== BR_OK
;
3203 binder_unlock(__func__
);
3205 if (wait_for_proc_work
) {
3206 if (binder_has_proc_work(proc
, thread
))
3208 poll_wait(filp
, &proc
->wait
, wait
);
3209 if (binder_has_proc_work(proc
, thread
))
3212 if (binder_has_thread_work(thread
))
3214 poll_wait(filp
, &thread
->wait
, wait
);
3215 if (binder_has_thread_work(thread
))
3221 static int binder_ioctl_write_read(struct file
*filp
,
3222 unsigned int cmd
, unsigned long arg
,
3223 struct binder_thread
*thread
)
3226 struct binder_proc
*proc
= filp
->private_data
;
3227 unsigned int size
= _IOC_SIZE(cmd
);
3228 void __user
*ubuf
= (void __user
*)arg
;
3229 struct binder_write_read bwr
;
3231 if (size
!= sizeof(struct binder_write_read
)) {
3235 if (copy_from_user_preempt_disabled(&bwr
, ubuf
, sizeof(bwr
))) {
3239 binder_debug(BINDER_DEBUG_READ_WRITE
,
3240 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3241 proc
->pid
, thread
->pid
,
3242 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
3243 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
3245 if (bwr
.write_size
> 0) {
3246 ret
= binder_thread_write(proc
, thread
,
3249 &bwr
.write_consumed
);
3250 trace_binder_write_done(ret
);
3252 bwr
.read_consumed
= 0;
3253 if (copy_to_user_preempt_disabled(ubuf
, &bwr
, sizeof(bwr
)))
3258 if (bwr
.read_size
> 0) {
3259 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
3262 filp
->f_flags
& O_NONBLOCK
);
3263 trace_binder_read_done(ret
);
3264 if (!list_empty(&proc
->todo
))
3265 wake_up_interruptible(&proc
->wait
);
3267 if (copy_to_user_preempt_disabled(ubuf
, &bwr
, sizeof(bwr
)))
3272 binder_debug(BINDER_DEBUG_READ_WRITE
,
3273 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3274 proc
->pid
, thread
->pid
,
3275 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
3276 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
3277 if (copy_to_user_preempt_disabled(ubuf
, &bwr
, sizeof(bwr
))) {
3285 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
3288 struct binder_proc
*proc
= filp
->private_data
;
3289 struct binder_context
*context
= proc
->context
;
3291 kuid_t curr_euid
= current_euid();
3293 if (context
->binder_context_mgr_node
) {
3294 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3298 ret
= security_binder_set_context_mgr(proc
->tsk
);
3301 if (uid_valid(context
->binder_context_mgr_uid
)) {
3302 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
3303 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3304 from_kuid(&init_user_ns
, curr_euid
),
3305 from_kuid(&init_user_ns
,
3306 context
->binder_context_mgr_uid
));
3311 context
->binder_context_mgr_uid
= curr_euid
;
3313 context
->binder_context_mgr_node
= binder_new_node(proc
, 0, 0);
3314 if (!context
->binder_context_mgr_node
) {
3318 context
->binder_context_mgr_node
->local_weak_refs
++;
3319 context
->binder_context_mgr_node
->local_strong_refs
++;
3320 context
->binder_context_mgr_node
->has_strong_ref
= 1;
3321 context
->binder_context_mgr_node
->has_weak_ref
= 1;
3326 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3329 struct binder_proc
*proc
= filp
->private_data
;
3330 struct binder_thread
*thread
;
3331 unsigned int size
= _IOC_SIZE(cmd
);
3332 void __user
*ubuf
= (void __user
*)arg
;
3334 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3335 proc->pid, current->pid, cmd, arg);*/
3337 trace_binder_ioctl(cmd
, arg
);
3339 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
3343 binder_lock(__func__
);
3344 thread
= binder_get_thread(proc
);
3345 if (thread
== NULL
) {
3351 case BINDER_WRITE_READ
:
3352 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
3356 case BINDER_SET_MAX_THREADS
:
3357 if (copy_from_user_preempt_disabled(&proc
->max_threads
, ubuf
, sizeof(proc
->max_threads
))) {
3362 case BINDER_SET_CONTEXT_MGR
:
3363 ret
= binder_ioctl_set_ctx_mgr(filp
);
3367 case BINDER_THREAD_EXIT
:
3368 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
3369 proc
->pid
, thread
->pid
);
3370 binder_free_thread(proc
, thread
);
3373 case BINDER_VERSION
: {
3374 struct binder_version __user
*ver
= ubuf
;
3376 if (size
!= sizeof(struct binder_version
)) {
3381 if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION
,
3382 &ver
->protocol_version
)) {
3395 thread
->looper
&= ~BINDER_LOOPER_STATE_NEED_RETURN
;
3396 binder_unlock(__func__
);
3397 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
3398 if (ret
&& ret
!= -ERESTARTSYS
)
3399 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
3401 trace_binder_ioctl_done(ret
);
3405 static void binder_vma_open(struct vm_area_struct
*vma
)
3407 struct binder_proc
*proc
= vma
->vm_private_data
;
3409 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3410 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3411 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3412 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3413 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3416 static void binder_vma_close(struct vm_area_struct
*vma
)
3418 struct binder_proc
*proc
= vma
->vm_private_data
;
3420 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3421 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
3422 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3423 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3424 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3426 proc
->vma_vm_mm
= NULL
;
3427 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
3430 static int binder_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
3432 return VM_FAULT_SIGBUS
;
3435 static struct vm_operations_struct binder_vm_ops
= {
3436 .open
= binder_vma_open
,
3437 .close
= binder_vma_close
,
3438 .fault
= binder_vm_fault
,
3441 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
3445 struct vm_struct
*area
;
3446 struct binder_proc
*proc
= filp
->private_data
;
3447 const char *failure_string
;
3448 struct binder_buffer
*buffer
;
3450 if (proc
->tsk
!= current
)
3453 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
3454 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
3456 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3457 "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3458 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
3459 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
3460 (unsigned long)pgprot_val(vma
->vm_page_prot
));
3462 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
3464 failure_string
= "bad vm_flags";
3467 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
3469 mutex_lock(&binder_mmap_lock
);
3472 failure_string
= "already mapped";
3473 goto err_already_mapped
;
3476 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
3479 failure_string
= "get_vm_area";
3480 goto err_get_vm_area_failed
;
3482 proc
->buffer
= area
->addr
;
3483 proc
->user_buffer_offset
= vma
->vm_start
- (uintptr_t)proc
->buffer
;
3484 mutex_unlock(&binder_mmap_lock
);
3486 #ifdef CONFIG_CPU_CACHE_VIPT
3487 if (cache_is_vipt_aliasing()) {
3488 while (CACHE_COLOUR((vma
->vm_start
^ (uint32_t)proc
->buffer
))) {
3489 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc
->pid
, vma
->vm_start
, vma
->vm_end
, proc
->buffer
);
3490 vma
->vm_start
+= PAGE_SIZE
;
3494 proc
->pages
= kzalloc(sizeof(proc
->pages
[0]) * ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
), GFP_KERNEL
);
3495 if (proc
->pages
== NULL
) {
3497 failure_string
= "alloc page array";
3498 goto err_alloc_pages_failed
;
3500 proc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
3502 vma
->vm_ops
= &binder_vm_ops
;
3503 vma
->vm_private_data
= proc
;
3505 /* binder_update_page_range assumes preemption is disabled */
3507 ret
= binder_update_page_range(proc
, 1, proc
->buffer
, proc
->buffer
+ PAGE_SIZE
, vma
);
3508 preempt_enable_no_resched();
3511 failure_string
= "alloc small buf";
3512 goto err_alloc_small_buf_failed
;
3514 buffer
= proc
->buffer
;
3515 INIT_LIST_HEAD(&proc
->buffers
);
3516 list_add(&buffer
->entry
, &proc
->buffers
);
3518 binder_insert_free_buffer(proc
, buffer
);
3519 proc
->free_async_space
= proc
->buffer_size
/ 2;
3521 proc
->files
= get_files_struct(current
);
3523 proc
->vma_vm_mm
= vma
->vm_mm
;
3525 /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
3526 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
3529 err_alloc_small_buf_failed
:
3532 err_alloc_pages_failed
:
3533 mutex_lock(&binder_mmap_lock
);
3534 vfree(proc
->buffer
);
3535 proc
->buffer
= NULL
;
3536 err_get_vm_area_failed
:
3538 mutex_unlock(&binder_mmap_lock
);
3540 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
3541 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
3545 static int binder_open(struct inode
*nodp
, struct file
*filp
)
3547 struct binder_proc
*proc
;
3548 struct binder_device
*binder_dev
;
3550 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
3551 current
->group_leader
->pid
, current
->pid
);
3553 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
3556 get_task_struct(current
);
3557 proc
->tsk
= current
;
3558 INIT_LIST_HEAD(&proc
->todo
);
3559 init_waitqueue_head(&proc
->wait
);
3560 proc
->default_priority
= task_nice(current
);
3561 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
3563 proc
->context
= &binder_dev
->context
;
3565 binder_lock(__func__
);
3567 binder_stats_created(BINDER_STAT_PROC
);
3568 hlist_add_head(&proc
->proc_node
, &binder_procs
);
3569 proc
->pid
= current
->group_leader
->pid
;
3570 INIT_LIST_HEAD(&proc
->delivered_death
);
3571 filp
->private_data
= proc
;
3573 binder_unlock(__func__
);
3575 if (binder_debugfs_dir_entry_proc
) {
3578 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
3580 * proc debug entries are shared between contexts, so
3581 * this will fail if the process tries to open the driver
3582 * again with a different context. The priting code will
3583 * anyway print all contexts that a given PID has, so this
3586 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
3587 binder_debugfs_dir_entry_proc
,
3588 (void *)(unsigned long)proc
->pid
,
3595 static int binder_flush(struct file
*filp
, fl_owner_t id
)
3597 struct binder_proc
*proc
= filp
->private_data
;
3599 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
3604 static void binder_deferred_flush(struct binder_proc
*proc
)
3609 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
3610 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
3612 thread
->looper
|= BINDER_LOOPER_STATE_NEED_RETURN
;
3613 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
3614 wake_up_interruptible(&thread
->wait
);
3618 wake_up_interruptible_all(&proc
->wait
);
3620 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3621 "binder_flush: %d woke %d threads\n", proc
->pid
,
3625 static int binder_release(struct inode
*nodp
, struct file
*filp
)
3627 struct binder_proc
*proc
= filp
->private_data
;
3629 debugfs_remove(proc
->debugfs_entry
);
3630 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
3635 static int binder_node_release(struct binder_node
*node
, int refs
)
3637 struct binder_ref
*ref
;
3640 list_del_init(&node
->work
.entry
);
3641 binder_release_work(&node
->async_todo
);
3643 if (hlist_empty(&node
->refs
)) {
3645 binder_stats_deleted(BINDER_STAT_NODE
);
3651 node
->local_strong_refs
= 0;
3652 node
->local_weak_refs
= 0;
3653 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
3655 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
3663 if (list_empty(&ref
->death
->work
.entry
)) {
3664 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3665 list_add_tail(&ref
->death
->work
.entry
,
3667 wake_up_interruptible(&ref
->proc
->wait
);
3672 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3673 "node %d now dead, refs %d, death %d\n",
3674 node
->debug_id
, refs
, death
);
3679 static void binder_deferred_release(struct binder_proc
*proc
)
3681 struct binder_transaction
*t
;
3682 struct binder_context
*context
= proc
->context
;
3684 int threads
, nodes
, incoming_refs
, outgoing_refs
, buffers
,
3685 active_transactions
, page_count
;
3688 BUG_ON(proc
->files
);
3690 hlist_del(&proc
->proc_node
);
3692 if (context
->binder_context_mgr_node
&&
3693 context
->binder_context_mgr_node
->proc
== proc
) {
3694 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3695 "%s: %d context_mgr_node gone\n",
3696 __func__
, proc
->pid
);
3697 context
->binder_context_mgr_node
= NULL
;
3701 active_transactions
= 0;
3702 while ((n
= rb_first(&proc
->threads
))) {
3703 struct binder_thread
*thread
;
3705 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
3707 active_transactions
+= binder_free_thread(proc
, thread
);
3712 while ((n
= rb_first(&proc
->nodes
))) {
3713 struct binder_node
*node
;
3715 node
= rb_entry(n
, struct binder_node
, rb_node
);
3717 rb_erase(&node
->rb_node
, &proc
->nodes
);
3718 incoming_refs
= binder_node_release(node
, incoming_refs
);
3722 while ((n
= rb_first(&proc
->refs_by_desc
))) {
3723 struct binder_ref
*ref
;
3725 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
3727 binder_delete_ref(ref
);
3730 binder_release_work(&proc
->todo
);
3731 binder_release_work(&proc
->delivered_death
);
3734 while ((n
= rb_first(&proc
->allocated_buffers
))) {
3735 struct binder_buffer
*buffer
;
3737 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
3739 t
= buffer
->transaction
;
3742 buffer
->transaction
= NULL
;
3743 pr_err("release proc %d, transaction %d, not freed\n",
3744 proc
->pid
, t
->debug_id
);
3748 binder_free_buf(proc
, buffer
);
3752 binder_stats_deleted(BINDER_STAT_PROC
);
3758 for (i
= 0; i
< proc
->buffer_size
/ PAGE_SIZE
; i
++) {
3761 if (!proc
->pages
[i
])
3764 page_addr
= proc
->buffer
+ i
* PAGE_SIZE
;
3765 binder_debug(BINDER_DEBUG_BUFFER_ALLOC
,
3766 "%s: %d: page %d at %pK not freed\n",
3767 __func__
, proc
->pid
, i
, page_addr
);
3768 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
3769 __free_page(proc
->pages
[i
]);
3773 vfree(proc
->buffer
);
3776 put_task_struct(proc
->tsk
);
3778 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
3779 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
3780 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
3781 outgoing_refs
, active_transactions
, buffers
, page_count
);
3786 static void binder_deferred_func(struct work_struct
*work
)
3788 struct binder_proc
*proc
;
3789 struct files_struct
*files
;
3794 trace_binder_lock(__func__
);
3795 mutex_lock(&binder_main_lock
);
3796 trace_binder_locked(__func__
);
3798 mutex_lock(&binder_deferred_lock
);
3800 if (!hlist_empty(&binder_deferred_list
)) {
3801 proc
= hlist_entry(binder_deferred_list
.first
,
3802 struct binder_proc
, deferred_work_node
);
3803 hlist_del_init(&proc
->deferred_work_node
);
3804 defer
= proc
->deferred_work
;
3805 proc
->deferred_work
= 0;
3810 mutex_unlock(&binder_deferred_lock
);
3813 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
3814 files
= proc
->files
;
3819 if (defer
& BINDER_DEFERRED_FLUSH
)
3820 binder_deferred_flush(proc
);
3822 if (defer
& BINDER_DEFERRED_RELEASE
)
3823 binder_deferred_release(proc
); /* frees proc */
3825 trace_binder_unlock(__func__
);
3826 mutex_unlock(&binder_main_lock
);
3827 preempt_enable_no_resched();
3829 put_files_struct(files
);
3832 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
3835 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
3837 mutex_lock(&binder_deferred_lock
);
3838 proc
->deferred_work
|= defer
;
3839 if (hlist_unhashed(&proc
->deferred_work_node
)) {
3840 hlist_add_head(&proc
->deferred_work_node
,
3841 &binder_deferred_list
);
3842 queue_work(binder_deferred_workqueue
, &binder_deferred_work
);
3844 mutex_unlock(&binder_deferred_lock
);
3847 static void print_binder_transaction(struct seq_file
*m
, const char *prefix
,
3848 struct binder_transaction
*t
)
3851 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3852 prefix
, t
->debug_id
, t
,
3853 t
->from
? t
->from
->proc
->pid
: 0,
3854 t
->from
? t
->from
->pid
: 0,
3855 t
->to_proc
? t
->to_proc
->pid
: 0,
3856 t
->to_thread
? t
->to_thread
->pid
: 0,
3857 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
3858 if (t
->buffer
== NULL
) {
3859 seq_puts(m
, " buffer free\n");
3862 if (t
->buffer
->target_node
)
3863 seq_printf(m
, " node %d",
3864 t
->buffer
->target_node
->debug_id
);
3865 seq_printf(m
, " size %zd:%zd data %pK\n",
3866 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3870 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
3871 struct binder_buffer
*buffer
)
3873 seq_printf(m
, "%s %d: %pK size %zd:%zd %s\n",
3874 prefix
, buffer
->debug_id
, buffer
->data
,
3875 buffer
->data_size
, buffer
->offsets_size
,
3876 buffer
->transaction
? "active" : "delivered");
3879 static void print_binder_work(struct seq_file
*m
, const char *prefix
,
3880 const char *transaction_prefix
,
3881 struct binder_work
*w
)
3883 struct binder_node
*node
;
3884 struct binder_transaction
*t
;
3887 case BINDER_WORK_TRANSACTION
:
3888 t
= container_of(w
, struct binder_transaction
, work
);
3889 print_binder_transaction(m
, transaction_prefix
, t
);
3891 case BINDER_WORK_TRANSACTION_COMPLETE
:
3892 seq_printf(m
, "%stransaction complete\n", prefix
);
3894 case BINDER_WORK_NODE
:
3895 node
= container_of(w
, struct binder_node
, work
);
3896 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
3897 prefix
, node
->debug_id
,
3898 (u64
)node
->ptr
, (u64
)node
->cookie
);
3900 case BINDER_WORK_DEAD_BINDER
:
3901 seq_printf(m
, "%shas dead binder\n", prefix
);
3903 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3904 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
3906 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
3907 seq_printf(m
, "%shas cleared death notification\n", prefix
);
3910 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
3915 static void print_binder_thread(struct seq_file
*m
,
3916 struct binder_thread
*thread
,
3919 struct binder_transaction
*t
;
3920 struct binder_work
*w
;
3921 size_t start_pos
= m
->count
;
3924 seq_printf(m
, " thread %d: l %02x\n", thread
->pid
, thread
->looper
);
3925 header_pos
= m
->count
;
3926 t
= thread
->transaction_stack
;
3928 if (t
->from
== thread
) {
3929 print_binder_transaction(m
,
3930 " outgoing transaction", t
);
3932 } else if (t
->to_thread
== thread
) {
3933 print_binder_transaction(m
,
3934 " incoming transaction", t
);
3937 print_binder_transaction(m
, " bad transaction", t
);
3941 list_for_each_entry(w
, &thread
->todo
, entry
) {
3942 print_binder_work(m
, " ", " pending transaction", w
);
3944 if (!print_always
&& m
->count
== header_pos
)
3945 m
->count
= start_pos
;
3948 static void print_binder_node(struct seq_file
*m
, struct binder_node
*node
)
3950 struct binder_ref
*ref
;
3951 struct binder_work
*w
;
3955 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
3958 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3959 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
3960 node
->has_strong_ref
, node
->has_weak_ref
,
3961 node
->local_strong_refs
, node
->local_weak_refs
,
3962 node
->internal_strong_refs
, count
);
3964 seq_puts(m
, " proc");
3965 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
3966 seq_printf(m
, " %d", ref
->proc
->pid
);
3969 list_for_each_entry(w
, &node
->async_todo
, entry
)
3970 print_binder_work(m
, " ",
3971 " pending async transaction", w
);
3974 static void print_binder_ref(struct seq_file
*m
, struct binder_ref
*ref
)
3976 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
3977 ref
->debug_id
, ref
->desc
, ref
->node
->proc
? "" : "dead ",
3978 ref
->node
->debug_id
, ref
->strong
, ref
->weak
, ref
->death
);
3981 static void print_binder_proc(struct seq_file
*m
,
3982 struct binder_proc
*proc
, int print_all
)
3984 struct binder_work
*w
;
3986 size_t start_pos
= m
->count
;
3989 seq_printf(m
, "proc %d\n", proc
->pid
);
3990 seq_printf(m
, "context %s\n", proc
->context
->name
);
3991 header_pos
= m
->count
;
3993 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
3994 print_binder_thread(m
, rb_entry(n
, struct binder_thread
,
3995 rb_node
), print_all
);
3996 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
3997 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
3999 if (print_all
|| node
->has_async_transaction
)
4000 print_binder_node(m
, node
);
4003 for (n
= rb_first(&proc
->refs_by_desc
);
4006 print_binder_ref(m
, rb_entry(n
, struct binder_ref
,
4009 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
4010 print_binder_buffer(m
, " buffer",
4011 rb_entry(n
, struct binder_buffer
, rb_node
));
4012 list_for_each_entry(w
, &proc
->todo
, entry
)
4013 print_binder_work(m
, " ", " pending transaction", w
);
4014 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
4015 seq_puts(m
, " has delivered dead binder\n");
4018 if (!print_all
&& m
->count
== header_pos
)
4019 m
->count
= start_pos
;
4022 static const char * const binder_return_strings
[] = {
4027 "BR_ACQUIRE_RESULT",
4029 "BR_TRANSACTION_COMPLETE",
4034 "BR_ATTEMPT_ACQUIRE",
4039 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4043 static const char * const binder_command_strings
[] = {
4046 "BC_ACQUIRE_RESULT",
4054 "BC_ATTEMPT_ACQUIRE",
4055 "BC_REGISTER_LOOPER",
4058 "BC_REQUEST_DEATH_NOTIFICATION",
4059 "BC_CLEAR_DEATH_NOTIFICATION",
4060 "BC_DEAD_BINDER_DONE",
4061 "BC_TRANSACTION_SG",
4065 static const char * const binder_objstat_strings
[] = {
4072 "transaction_complete"
4075 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
4076 struct binder_stats
*stats
)
4080 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
4081 ARRAY_SIZE(binder_command_strings
));
4082 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
4084 seq_printf(m
, "%s%s: %d\n", prefix
,
4085 binder_command_strings
[i
], stats
->bc
[i
]);
4088 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
4089 ARRAY_SIZE(binder_return_strings
));
4090 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
4092 seq_printf(m
, "%s%s: %d\n", prefix
,
4093 binder_return_strings
[i
], stats
->br
[i
]);
4096 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4097 ARRAY_SIZE(binder_objstat_strings
));
4098 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4099 ARRAY_SIZE(stats
->obj_deleted
));
4100 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
4101 if (stats
->obj_created
[i
] || stats
->obj_deleted
[i
])
4102 seq_printf(m
, "%s%s: active %d total %d\n", prefix
,
4103 binder_objstat_strings
[i
],
4104 stats
->obj_created
[i
] - stats
->obj_deleted
[i
],
4105 stats
->obj_created
[i
]);
4109 static void print_binder_proc_stats(struct seq_file
*m
,
4110 struct binder_proc
*proc
)
4112 struct binder_work
*w
;
4114 int count
, strong
, weak
;
4116 seq_printf(m
, "proc %d\n", proc
->pid
);
4117 seq_printf(m
, "context %s\n", proc
->context
->name
);
4119 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4121 seq_printf(m
, " threads: %d\n", count
);
4122 seq_printf(m
, " requested threads: %d+%d/%d\n"
4123 " ready threads %d\n"
4124 " free async space %zd\n", proc
->requested_threads
,
4125 proc
->requested_threads_started
, proc
->max_threads
,
4126 proc
->ready_threads
, proc
->free_async_space
);
4128 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
4130 seq_printf(m
, " nodes: %d\n", count
);
4134 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
4135 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
4138 strong
+= ref
->strong
;
4141 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
4144 for (n
= rb_first(&proc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
4146 seq_printf(m
, " buffers: %d\n", count
);
4149 list_for_each_entry(w
, &proc
->todo
, entry
) {
4151 case BINDER_WORK_TRANSACTION
:
4158 seq_printf(m
, " pending transactions: %d\n", count
);
4160 print_binder_stats(m
, " ", &proc
->stats
);
4164 static int binder_state_show(struct seq_file
*m
, void *unused
)
4166 struct binder_proc
*proc
;
4167 struct binder_node
*node
;
4168 int do_lock
= !binder_debug_no_lock
;
4171 binder_lock(__func__
);
4173 seq_puts(m
, "binder state:\n");
4175 if (!hlist_empty(&binder_dead_nodes
))
4176 seq_puts(m
, "dead nodes:\n");
4177 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
)
4178 print_binder_node(m
, node
);
4180 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4181 print_binder_proc(m
, proc
, 1);
4183 binder_unlock(__func__
);
4187 static int binder_stats_show(struct seq_file
*m
, void *unused
)
4189 struct binder_proc
*proc
;
4190 int do_lock
= !binder_debug_no_lock
;
4193 binder_lock(__func__
);
4195 seq_puts(m
, "binder stats:\n");
4197 print_binder_stats(m
, "", &binder_stats
);
4199 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4200 print_binder_proc_stats(m
, proc
);
4202 binder_unlock(__func__
);
4206 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
4208 struct binder_proc
*proc
;
4209 int do_lock
= !binder_debug_no_lock
;
4212 binder_lock(__func__
);
4214 seq_puts(m
, "binder transactions:\n");
4215 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4216 print_binder_proc(m
, proc
, 0);
4218 binder_unlock(__func__
);
4222 static int binder_proc_show(struct seq_file
*m
, void *unused
)
4224 struct binder_proc
*itr
;
4225 int pid
= (unsigned long)m
->private;
4226 int do_lock
= !binder_debug_no_lock
;
4229 binder_lock(__func__
);
4231 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
4232 if (itr
->pid
== pid
) {
4233 seq_puts(m
, "binder proc state:\n");
4234 print_binder_proc(m
, itr
, 1);
4238 binder_unlock(__func__
);
4242 static void print_binder_transaction_log_entry(struct seq_file
*m
,
4243 struct binder_transaction_log_entry
*e
)
4246 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
4247 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
4248 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
4249 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
4250 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
);
4253 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
4255 struct binder_transaction_log
*log
= m
->private;
4259 for (i
= log
->next
; i
< ARRAY_SIZE(log
->entry
); i
++)
4260 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
4262 for (i
= 0; i
< log
->next
; i
++)
4263 print_binder_transaction_log_entry(m
, &log
->entry
[i
]);
4267 static const struct file_operations binder_fops
= {
4268 .owner
= THIS_MODULE
,
4269 .poll
= binder_poll
,
4270 .unlocked_ioctl
= binder_ioctl
,
4271 .compat_ioctl
= binder_ioctl
,
4272 .mmap
= binder_mmap
,
4273 .open
= binder_open
,
4274 .flush
= binder_flush
,
4275 .release
= binder_release
,
4278 BINDER_DEBUG_ENTRY(state
);
4279 BINDER_DEBUG_ENTRY(stats
);
4280 BINDER_DEBUG_ENTRY(transactions
);
4281 BINDER_DEBUG_ENTRY(transaction_log
);
4283 static int __init
init_binder_device(const char *name
)
4286 struct binder_device
*binder_device
;
4288 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
4292 binder_device
->miscdev
.fops
= &binder_fops
;
4293 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
4294 binder_device
->miscdev
.name
= name
;
4296 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
4297 binder_device
->context
.name
= name
;
4299 ret
= misc_register(&binder_device
->miscdev
);
4301 kfree(binder_device
);
4305 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
4310 static int __init
binder_init(void)
4313 char *device_name
, *device_names
;
4314 struct binder_device
*device
;
4315 struct hlist_node
*tmp
;
4317 binder_deferred_workqueue
= create_singlethread_workqueue("binder");
4318 if (!binder_deferred_workqueue
)
4321 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
4322 if (binder_debugfs_dir_entry_root
)
4323 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
4324 binder_debugfs_dir_entry_root
);
4326 if (binder_debugfs_dir_entry_root
) {
4327 debugfs_create_file("state",
4329 binder_debugfs_dir_entry_root
,
4331 &binder_state_fops
);
4332 debugfs_create_file("stats",
4334 binder_debugfs_dir_entry_root
,
4336 &binder_stats_fops
);
4337 debugfs_create_file("transactions",
4339 binder_debugfs_dir_entry_root
,
4341 &binder_transactions_fops
);
4342 debugfs_create_file("transaction_log",
4344 binder_debugfs_dir_entry_root
,
4345 &binder_transaction_log
,
4346 &binder_transaction_log_fops
);
4347 debugfs_create_file("failed_transaction_log",
4349 binder_debugfs_dir_entry_root
,
4350 &binder_transaction_log_failed
,
4351 &binder_transaction_log_fops
);
4355 * Copy the module_parameter string, because we don't want to
4356 * tokenize it in-place.
4358 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
4359 if (!device_names
) {
4361 goto err_alloc_device_names_failed
;
4363 strcpy(device_names
, binder_devices_param
);
4365 while ((device_name
= strsep(&device_names
, ","))) {
4366 ret
= init_binder_device(device_name
);
4368 goto err_init_binder_device_failed
;
4373 err_init_binder_device_failed
:
4374 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
4375 misc_deregister(&device
->miscdev
);
4376 hlist_del(&device
->hlist
);
4379 err_alloc_device_names_failed
:
4380 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
4382 destroy_workqueue(binder_deferred_workqueue
);
4387 device_initcall(binder_init
);
4389 #define CREATE_TRACE_POINTS
4390 #include "binder_trace.h"
4392 MODULE_LICENSE("GPL v2");