3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
51 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53 #include <asm/cacheflush.h>
54 #include <linux/fdtable.h>
55 #include <linux/file.h>
56 #include <linux/freezer.h>
58 #include <linux/list.h>
59 #include <linux/miscdevice.h>
60 #include <linux/module.h>
61 #include <linux/mutex.h>
62 #include <linux/nsproxy.h>
63 #include <linux/poll.h>
64 #include <linux/debugfs.h>
65 #include <linux/rbtree.h>
66 #include <linux/sched.h>
67 #include <linux/seq_file.h>
68 #include <linux/uaccess.h>
69 #include <linux/pid_namespace.h>
70 #include <linux/security.h>
71 #include <linux/spinlock.h>
73 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
74 #define BINDER_IPC_32BIT 1
77 #include <uapi/linux/android/binder.h>
78 #include "binder_alloc.h"
79 #include "binder_trace.h"
81 static DEFINE_MUTEX(binder_main_lock
);
83 static HLIST_HEAD(binder_deferred_list
);
84 static DEFINE_MUTEX(binder_deferred_lock
);
86 static HLIST_HEAD(binder_devices
);
87 static HLIST_HEAD(binder_procs
);
88 static DEFINE_MUTEX(binder_procs_lock
);
90 static HLIST_HEAD(binder_dead_nodes
);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
93 static struct dentry
*binder_debugfs_dir_entry_root
;
94 static struct dentry
*binder_debugfs_dir_entry_proc
;
95 static atomic_t binder_last_id
;
96 static struct workqueue_struct
*binder_deferred_workqueue
;
98 #define BINDER_DEBUG_ENTRY(name) \
99 static int binder_##name##_open(struct inode *inode, struct file *file) \
101 return single_open(file, binder_##name##_show, inode->i_private); \
104 static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
112 static int binder_proc_show(struct seq_file
*m
, void *unused
);
113 BINDER_DEBUG_ENTRY(proc
);
115 /* This is only defined in include/asm-arm/sizes.h */
121 #define SZ_4M 0x400000
124 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
126 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
129 BINDER_DEBUG_USER_ERROR
= 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
135 BINDER_DEBUG_READ_WRITE
= 1U << 6,
136 BINDER_DEBUG_USER_REFS
= 1U << 7,
137 BINDER_DEBUG_THREADS
= 1U << 8,
138 BINDER_DEBUG_TRANSACTION
= 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
142 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
143 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
145 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
146 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
147 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
149 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
150 module_param_named(devices
, binder_devices_param
, charp
, S_IRUGO
);
152 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
153 static int binder_stop_on_user_error
;
155 static int binder_set_stop_on_user_error(const char *val
,
156 struct kernel_param
*kp
)
160 ret
= param_set_int(val
, kp
);
161 if (binder_stop_on_user_error
< 2)
162 wake_up(&binder_user_error_wait
);
165 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
166 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
168 #define binder_debug(mask, x...) \
170 if (binder_debug_mask & mask) \
174 #define binder_user_error(x...) \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
182 #define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
185 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187 #define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
190 #define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
193 enum binder_stat_types
{
199 BINDER_STAT_TRANSACTION
,
200 BINDER_STAT_TRANSACTION_COMPLETE
,
204 struct binder_stats
{
205 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
206 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
207 atomic_t obj_created
[BINDER_STAT_COUNT
];
208 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
211 static struct binder_stats binder_stats
;
213 static inline void binder_stats_deleted(enum binder_stat_types type
)
215 atomic_inc(&binder_stats
.obj_deleted
[type
]);
218 static inline void binder_stats_created(enum binder_stat_types type
)
220 atomic_inc(&binder_stats
.obj_created
[type
]);
223 struct binder_transaction_log_entry
{
235 int return_error_line
;
236 uint32_t return_error
;
237 uint32_t return_error_param
;
238 const char *context_name
;
240 struct binder_transaction_log
{
243 struct binder_transaction_log_entry entry
[32];
245 static struct binder_transaction_log binder_transaction_log
;
246 static struct binder_transaction_log binder_transaction_log_failed
;
248 static struct binder_transaction_log_entry
*binder_transaction_log_add(
249 struct binder_transaction_log
*log
)
251 struct binder_transaction_log_entry
*e
;
252 unsigned int cur
= atomic_inc_return(&log
->cur
);
254 if (cur
>= ARRAY_SIZE(log
->entry
))
256 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
257 WRITE_ONCE(e
->debug_id_done
, 0);
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
264 memset(e
, 0, sizeof(*e
));
268 struct binder_context
{
269 struct binder_node
*binder_context_mgr_node
;
270 struct mutex context_mgr_node_lock
;
272 kuid_t binder_context_mgr_uid
;
276 struct binder_device
{
277 struct hlist_node hlist
;
278 struct miscdevice miscdev
;
279 struct binder_context context
;
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
287 * There are separate work lists for proc, thread, and node (async).
290 struct list_head entry
;
293 BINDER_WORK_TRANSACTION
= 1,
294 BINDER_WORK_TRANSACTION_COMPLETE
,
295 BINDER_WORK_RETURN_ERROR
,
297 BINDER_WORK_DEAD_BINDER
,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
303 struct binder_error
{
304 struct binder_work work
;
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
314 * (protected by @proc->inner_lock)
315 * @rb_node: element for proc->nodes tree
316 * @dead_node: element for binder_dead_nodes list
317 * (protected by binder_dead_nodes_lock)
318 * @proc: binder_proc that owns this node
319 * (invariant after initialized)
320 * @refs: list of references on this node
321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
323 * (protected by @proc->inner_lock if @proc
325 * @local_weak_refs: weak user refs from local process
326 * (protected by @proc->inner_lock if @proc
328 * @local_strong_refs: strong user refs from local process
329 * (protected by @proc->inner_lock if @proc
331 * @tmp_refs: temporary kernel refs
332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
342 * (protected by @proc->inner_lock if @proc
344 * @pending_strong_ref: userspace has acked notification of strong ref
345 * (protected by @proc->inner_lock if @proc
347 * @has_weak_ref: userspace notified of weak ref
348 * (protected by @proc->inner_lock if @proc
350 * @pending_weak_ref: userspace has acked notification of weak ref
351 * (protected by @proc->inner_lock if @proc
353 * @has_async_transaction: async transaction to node in progress
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
359 * (protected by @proc->inner_lock)
361 * Bookkeeping structure for binder nodes.
366 struct binder_work work
;
368 struct rb_node rb_node
;
369 struct hlist_node dead_node
;
371 struct binder_proc
*proc
;
372 struct hlist_head refs
;
373 int internal_strong_refs
;
375 int local_strong_refs
;
377 binder_uintptr_t ptr
;
378 binder_uintptr_t cookie
;
381 * bitfield elements protected by
385 u8 pending_strong_ref
:1;
387 u8 pending_weak_ref
:1;
391 * invariant after initialization
396 bool has_async_transaction
;
397 struct list_head async_todo
;
400 struct binder_ref_death
{
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
406 struct binder_work work
;
407 binder_uintptr_t cookie
;
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
422 struct binder_ref_data
{
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
435 * @proc: binder_proc containing ref
436 * @node: binder_node of target node. When cleaning up a
437 * ref for deletion in binder_cleanup_ref, a non-NULL
438 * @node indicates the node must be freed
439 * @death: pointer to death notification (ref_death) if requested
441 * Structure to track references from procA to target node (on procB). This
442 * structure is unsafe to access without holding @proc->outer_lock.
445 /* Lookups needed: */
446 /* node + proc => ref (transaction) */
447 /* desc + proc => ref (transaction, inc/dec ref) */
448 /* node => refs + procs (proc exit) */
449 struct binder_ref_data data
;
450 struct rb_node rb_node_desc
;
451 struct rb_node rb_node_node
;
452 struct hlist_node node_entry
;
453 struct binder_proc
*proc
;
454 struct binder_node
*node
;
455 struct binder_ref_death
*death
;
458 enum binder_deferred_state
{
459 BINDER_DEFERRED_PUT_FILES
= 0x01,
460 BINDER_DEFERRED_FLUSH
= 0x02,
461 BINDER_DEFERRED_RELEASE
= 0x04,
465 * struct binder_proc - binder process bookkeeping
466 * @proc_node: element for binder_procs list
467 * @threads: rbtree of binder_threads in this proc
468 * @nodes: rbtree of binder nodes associated with
469 * this proc ordered by node->ptr
470 * @refs_by_desc: rbtree of refs ordered by ref->desc
471 * @refs_by_node: rbtree of refs ordered by ref->node
472 * @pid PID of group_leader of process
473 * (invariant after initialized)
474 * @tsk task_struct for group_leader of process
475 * (invariant after initialized)
476 * @files files_struct for process
477 * (invariant after initialized)
478 * @deferred_work_node: element for binder_deferred_list
479 * (protected by binder_deferred_lock)
480 * @deferred_work: bitmap of deferred work to perform
481 * (protected by binder_deferred_lock)
482 * @is_dead: process is dead and awaiting free
483 * when outstanding transactions are cleaned up
484 * @todo: list of work for this process
485 * (protected by @inner_lock)
486 * @wait: wait queue head to wait for proc work
487 * (invariant after initialized)
488 * @stats: per-process binder statistics
489 * (atomics, no lock needed)
490 * @delivered_death: list of delivered death notification
491 * (protected by @inner_lock)
492 * @max_threads: cap on number of binder threads
493 * @requested_threads: number of binder threads requested but not
494 * yet started. In current implementation, can
496 * @requested_threads_started: number binder threads started
497 * @ready_threads: number of threads waiting for proc work
498 * @tmp_ref: temporary reference to indicate proc is in use
499 * @default_priority: default scheduler priority
500 * (invariant after initialized)
501 * @debugfs_entry: debugfs node
502 * @alloc: binder allocator bookkeeping
503 * @context: binder_context for this proc
504 * (invariant after initialized)
505 * @inner_lock: can nest under outer_lock and/or node lock
506 * @outer_lock: no nesting under innor or node lock
507 * Lock order: 1) outer, 2) node, 3) inner
509 * Bookkeeping structure for binder processes
512 struct hlist_node proc_node
;
513 struct rb_root threads
;
514 struct rb_root nodes
;
515 struct rb_root refs_by_desc
;
516 struct rb_root refs_by_node
;
518 struct task_struct
*tsk
;
519 struct files_struct
*files
;
520 struct hlist_node deferred_work_node
;
524 struct list_head todo
;
525 wait_queue_head_t wait
;
526 struct binder_stats stats
;
527 struct list_head delivered_death
;
529 int requested_threads
;
530 int requested_threads_started
;
533 long default_priority
;
534 struct dentry
*debugfs_entry
;
535 struct binder_alloc alloc
;
536 struct binder_context
*context
;
537 spinlock_t inner_lock
;
538 spinlock_t outer_lock
;
542 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
543 BINDER_LOOPER_STATE_ENTERED
= 0x02,
544 BINDER_LOOPER_STATE_EXITED
= 0x04,
545 BINDER_LOOPER_STATE_INVALID
= 0x08,
546 BINDER_LOOPER_STATE_WAITING
= 0x10,
550 * struct binder_thread - binder thread bookkeeping
551 * @proc: binder process for this thread
552 * (invariant after initialization)
553 * @rb_node: element for proc->threads rbtree
554 * @pid: PID for this thread
555 * (invariant after initialization)
556 * @looper: bitmap of looping state
557 * (only accessed by this thread)
558 * @looper_needs_return: looping thread needs to exit driver
560 * @transaction_stack: stack of in-progress transactions for this thread
561 * @todo: list of work to do for this thread
562 * (protected by @proc->inner_lock)
563 * @return_error: transaction errors reported by this thread
564 * (only accessed by this thread)
565 * @reply_error: transaction errors reported by target thread
566 * @wait: wait queue for thread work
567 * @stats: per-thread statistics
568 * (atomics, no lock needed)
569 * @tmp_ref: temporary reference to indicate thread is in use
570 * (atomic since @proc->inner_lock cannot
571 * always be acquired)
572 * @is_dead: thread is dead and awaiting free
573 * when outstanding transactions are cleaned up
575 * Bookkeeping structure for binder threads.
577 struct binder_thread
{
578 struct binder_proc
*proc
;
579 struct rb_node rb_node
;
581 int looper
; /* only modified by this thread */
582 bool looper_need_return
; /* can be written by other thread */
583 struct binder_transaction
*transaction_stack
;
584 struct list_head todo
;
585 struct binder_error return_error
;
586 struct binder_error reply_error
;
587 wait_queue_head_t wait
;
588 struct binder_stats stats
;
593 struct binder_transaction
{
595 struct binder_work work
;
596 struct binder_thread
*from
;
597 struct binder_transaction
*from_parent
;
598 struct binder_proc
*to_proc
;
599 struct binder_thread
*to_thread
;
600 struct binder_transaction
*to_parent
;
601 unsigned need_reply
:1;
602 /* unsigned is_dead:1; */ /* not used at the moment */
604 struct binder_buffer
*buffer
;
611 * @lock: protects @from, @to_proc, and @to_thread
613 * @from, @to_proc, and @to_thread can be set to NULL
614 * during thread teardown
620 * binder_proc_lock() - Acquire outer lock for given binder_proc
621 * @proc: struct binder_proc to acquire
623 * Acquires proc->outer_lock. Used to protect binder_ref
624 * structures associated with the given proc.
626 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
628 _binder_proc_lock(struct binder_proc
*proc
, int line
)
630 binder_debug(BINDER_DEBUG_SPINLOCKS
,
631 "%s: line=%d\n", __func__
, line
);
632 spin_lock(&proc
->outer_lock
);
636 * binder_proc_unlock() - Release spinlock for given binder_proc
637 * @proc: struct binder_proc to acquire
639 * Release lock acquired via binder_proc_lock()
641 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
643 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
645 binder_debug(BINDER_DEBUG_SPINLOCKS
,
646 "%s: line=%d\n", __func__
, line
);
647 spin_unlock(&proc
->outer_lock
);
651 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
652 * @proc: struct binder_proc to acquire
654 * Acquires proc->inner_lock. Used to protect todo lists
656 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
658 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
660 binder_debug(BINDER_DEBUG_SPINLOCKS
,
661 "%s: line=%d\n", __func__
, line
);
662 spin_lock(&proc
->inner_lock
);
666 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
667 * @proc: struct binder_proc to acquire
669 * Release lock acquired via binder_inner_proc_lock()
671 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
673 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
675 binder_debug(BINDER_DEBUG_SPINLOCKS
,
676 "%s: line=%d\n", __func__
, line
);
677 spin_unlock(&proc
->inner_lock
);
681 * binder_node_lock() - Acquire spinlock for given binder_node
682 * @node: struct binder_node to acquire
684 * Acquires node->lock. Used to protect binder_node fields
686 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
688 _binder_node_lock(struct binder_node
*node
, int line
)
690 binder_debug(BINDER_DEBUG_SPINLOCKS
,
691 "%s: line=%d\n", __func__
, line
);
692 spin_lock(&node
->lock
);
696 * binder_node_unlock() - Release spinlock for given binder_proc
697 * @node: struct binder_node to acquire
699 * Release lock acquired via binder_node_lock()
701 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
703 _binder_node_unlock(struct binder_node
*node
, int line
)
705 binder_debug(BINDER_DEBUG_SPINLOCKS
,
706 "%s: line=%d\n", __func__
, line
);
707 spin_unlock(&node
->lock
);
710 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
712 return list_empty(list
);
716 * binder_worklist_empty() - Check if no items on the work list
717 * @proc: binder_proc associated with list
718 * @list: list to check
720 * Return: true if there are no items on list, else false
722 static bool binder_worklist_empty(struct binder_proc
*proc
,
723 struct list_head
*list
)
727 binder_inner_proc_lock(proc
);
728 ret
= binder_worklist_empty_ilocked(list
);
729 binder_inner_proc_unlock(proc
);
734 binder_enqueue_work_ilocked(struct binder_work
*work
,
735 struct list_head
*target_list
)
737 BUG_ON(target_list
== NULL
);
738 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
739 list_add_tail(&work
->entry
, target_list
);
743 * binder_enqueue_work() - Add an item to the work list
744 * @proc: binder_proc associated with list
745 * @work: struct binder_work to add to list
746 * @target_list: list to add work to
748 * Adds the work to the specified list. Asserts that work
749 * is not already on a list.
752 binder_enqueue_work(struct binder_proc
*proc
,
753 struct binder_work
*work
,
754 struct list_head
*target_list
)
756 binder_inner_proc_lock(proc
);
757 binder_enqueue_work_ilocked(work
, target_list
);
758 binder_inner_proc_unlock(proc
);
762 binder_dequeue_work_ilocked(struct binder_work
*work
)
764 list_del_init(&work
->entry
);
768 * binder_dequeue_work() - Removes an item from the work list
769 * @proc: binder_proc associated with list
770 * @work: struct binder_work to remove from list
772 * Removes the specified work item from whatever list it is on.
773 * Can safely be called if work is not on any list.
776 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
778 binder_inner_proc_lock(proc
);
779 binder_dequeue_work_ilocked(work
);
780 binder_inner_proc_unlock(proc
);
783 static struct binder_work
*binder_dequeue_work_head_ilocked(
784 struct list_head
*list
)
786 struct binder_work
*w
;
788 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
790 list_del_init(&w
->entry
);
795 * binder_dequeue_work_head() - Dequeues the item at head of list
796 * @proc: binder_proc associated with list
797 * @list: list to dequeue head
799 * Removes the head of the list if there are items on the list
801 * Return: pointer dequeued binder_work, NULL if list was empty
803 static struct binder_work
*binder_dequeue_work_head(
804 struct binder_proc
*proc
,
805 struct list_head
*list
)
807 struct binder_work
*w
;
809 binder_inner_proc_lock(proc
);
810 w
= binder_dequeue_work_head_ilocked(list
);
811 binder_inner_proc_unlock(proc
);
816 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
817 static void binder_free_thread(struct binder_thread
*thread
);
818 static void binder_free_proc(struct binder_proc
*proc
);
819 static void binder_inc_node_tmpref(struct binder_node
*node
);
821 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
823 struct files_struct
*files
= proc
->files
;
824 unsigned long rlim_cur
;
830 if (!lock_task_sighand(proc
->tsk
, &irqs
))
833 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
834 unlock_task_sighand(proc
->tsk
, &irqs
);
836 return __alloc_fd(files
, 0, rlim_cur
, flags
);
840 * copied from fd_install
842 static void task_fd_install(
843 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
846 __fd_install(proc
->files
, fd
, file
);
850 * copied from sys_close
852 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
856 if (proc
->files
== NULL
)
859 retval
= __close_fd(proc
->files
, fd
);
860 /* can't restart close syscall because file table entry was cleared */
861 if (unlikely(retval
== -ERESTARTSYS
||
862 retval
== -ERESTARTNOINTR
||
863 retval
== -ERESTARTNOHAND
||
864 retval
== -ERESTART_RESTARTBLOCK
))
870 static inline void binder_lock(const char *tag
)
872 trace_binder_lock(tag
);
873 mutex_lock(&binder_main_lock
);
874 trace_binder_locked(tag
);
877 static inline void binder_unlock(const char *tag
)
879 trace_binder_unlock(tag
);
880 mutex_unlock(&binder_main_lock
);
883 static void binder_set_nice(long nice
)
887 if (can_nice(current
, nice
)) {
888 set_user_nice(current
, nice
);
891 min_nice
= rlimit_to_nice(current
->signal
->rlim
[RLIMIT_NICE
].rlim_cur
);
892 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
893 "%d: nice value %ld not allowed use %ld instead\n",
894 current
->pid
, nice
, min_nice
);
895 set_user_nice(current
, min_nice
);
896 if (min_nice
<= MAX_NICE
)
898 binder_user_error("%d RLIMIT_NICE not set\n", current
->pid
);
901 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
902 binder_uintptr_t ptr
)
904 struct rb_node
*n
= proc
->nodes
.rb_node
;
905 struct binder_node
*node
;
908 node
= rb_entry(n
, struct binder_node
, rb_node
);
912 else if (ptr
> node
->ptr
)
916 * take an implicit weak reference
917 * to ensure node stays alive until
918 * call to binder_put_node()
920 binder_inc_node_tmpref(node
);
927 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
928 binder_uintptr_t ptr
,
929 binder_uintptr_t cookie
)
931 struct rb_node
**p
= &proc
->nodes
.rb_node
;
932 struct rb_node
*parent
= NULL
;
933 struct binder_node
*node
;
937 node
= rb_entry(parent
, struct binder_node
, rb_node
);
941 else if (ptr
> node
->ptr
)
947 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
950 binder_stats_created(BINDER_STAT_NODE
);
952 rb_link_node(&node
->rb_node
, parent
, p
);
953 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
954 node
->debug_id
= atomic_inc_return(&binder_last_id
);
957 node
->cookie
= cookie
;
958 node
->work
.type
= BINDER_WORK_NODE
;
959 spin_lock_init(&node
->lock
);
960 INIT_LIST_HEAD(&node
->work
.entry
);
961 INIT_LIST_HEAD(&node
->async_todo
);
962 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
963 "%d:%d node %d u%016llx c%016llx created\n",
964 proc
->pid
, current
->pid
, node
->debug_id
,
965 (u64
)node
->ptr
, (u64
)node
->cookie
);
969 static void binder_free_node(struct binder_node
*node
)
972 binder_stats_deleted(BINDER_STAT_NODE
);
975 static int binder_inc_node_ilocked(struct binder_node
*node
, int strong
,
977 struct list_head
*target_list
)
980 BUG_ON(!spin_is_locked(&node
->proc
->inner_lock
));
983 if (target_list
== NULL
&&
984 node
->internal_strong_refs
== 0 &&
986 node
== node
->proc
->context
->
987 binder_context_mgr_node
&&
988 node
->has_strong_ref
)) {
989 pr_err("invalid inc strong node for %d\n",
993 node
->internal_strong_refs
++;
995 node
->local_strong_refs
++;
996 if (!node
->has_strong_ref
&& target_list
) {
997 binder_dequeue_work_ilocked(&node
->work
);
998 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1002 node
->local_weak_refs
++;
1003 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1004 if (target_list
== NULL
) {
1005 pr_err("invalid inc weak node for %d\n",
1009 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1015 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1016 struct list_head
*target_list
)
1021 binder_inner_proc_lock(node
->proc
);
1022 ret
= binder_inc_node_ilocked(node
, strong
, internal
, target_list
);
1024 binder_inner_proc_unlock(node
->proc
);
1029 static bool binder_dec_node_ilocked(struct binder_node
*node
,
1030 int strong
, int internal
)
1032 struct binder_proc
*proc
= node
->proc
;
1035 BUG_ON(!spin_is_locked(&proc
->inner_lock
));
1038 node
->internal_strong_refs
--;
1040 node
->local_strong_refs
--;
1041 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1045 node
->local_weak_refs
--;
1046 if (node
->local_weak_refs
|| node
->tmp_refs
||
1047 !hlist_empty(&node
->refs
))
1051 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1052 if (list_empty(&node
->work
.entry
)) {
1053 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1054 wake_up_interruptible(&node
->proc
->wait
);
1057 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1058 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1060 binder_dequeue_work_ilocked(&node
->work
);
1061 rb_erase(&node
->rb_node
, &proc
->nodes
);
1062 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1063 "refless node %d deleted\n",
1066 BUG_ON(!list_empty(&node
->work
.entry
));
1067 spin_lock(&binder_dead_nodes_lock
);
1069 * tmp_refs could have changed so
1072 if (node
->tmp_refs
) {
1073 spin_unlock(&binder_dead_nodes_lock
);
1076 hlist_del(&node
->dead_node
);
1077 spin_unlock(&binder_dead_nodes_lock
);
1078 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1079 "dead node %d deleted\n",
1088 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1093 binder_inner_proc_lock(node
->proc
);
1094 free_node
= binder_dec_node_ilocked(node
, strong
, internal
);
1096 binder_inner_proc_unlock(node
->proc
);
1099 binder_free_node(node
);
1102 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1105 * No call to binder_inc_node() is needed since we
1106 * don't need to inform userspace of any changes to
1113 * binder_inc_node_tmpref() - take a temporary reference on node
1114 * @node: node to reference
1116 * Take reference on node to prevent the node from being freed
1117 * while referenced only by a local variable. The inner lock is
1118 * needed to serialize with the node work on the queue (which
1119 * isn't needed after the node is dead). If the node is dead
1120 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1121 * node->tmp_refs against dead-node-only cases where the node
1122 * lock cannot be acquired (eg traversing the dead node list to
1125 static void binder_inc_node_tmpref(struct binder_node
*node
)
1128 binder_inner_proc_lock(node
->proc
);
1130 spin_lock(&binder_dead_nodes_lock
);
1131 binder_inc_node_tmpref_ilocked(node
);
1133 binder_inner_proc_unlock(node
->proc
);
1135 spin_unlock(&binder_dead_nodes_lock
);
1139 * binder_dec_node_tmpref() - remove a temporary reference on node
1140 * @node: node to reference
1142 * Release temporary reference on node taken via binder_inc_node_tmpref()
1144 static void binder_dec_node_tmpref(struct binder_node
*node
)
1149 binder_inner_proc_lock(node
->proc
);
1151 spin_lock(&binder_dead_nodes_lock
);
1153 BUG_ON(node
->tmp_refs
< 0);
1155 spin_unlock(&binder_dead_nodes_lock
);
1157 * Call binder_dec_node() to check if all refcounts are 0
1158 * and cleanup is needed. Calling with strong=0 and internal=1
1159 * causes no actual reference to be released in binder_dec_node().
1160 * If that changes, a change is needed here too.
1162 free_node
= binder_dec_node_ilocked(node
, 0, 1);
1164 binder_inner_proc_unlock(node
->proc
);
1166 binder_free_node(node
);
1169 static void binder_put_node(struct binder_node
*node
)
1171 binder_dec_node_tmpref(node
);
1174 static struct binder_ref
*binder_get_ref(struct binder_proc
*proc
,
1175 u32 desc
, bool need_strong_ref
)
1177 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1178 struct binder_ref
*ref
;
1181 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1183 if (desc
< ref
->data
.desc
) {
1185 } else if (desc
> ref
->data
.desc
) {
1187 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1188 binder_user_error("tried to use weak ref as strong ref\n");
1198 * binder_get_ref_for_node() - get the ref associated with given node
1199 * @proc: binder_proc that owns the ref
1200 * @node: binder_node of target
1201 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1203 * Look up the ref for the given node and return it if it exists
1205 * If it doesn't exist and the caller provides a newly allocated
1206 * ref, initialize the fields of the newly allocated ref and insert
1207 * into the given proc rb_trees and node refs list.
1209 * Return: the ref for node. It is possible that another thread
1210 * allocated/initialized the ref first in which case the
1211 * returned ref would be different than the passed-in
1212 * new_ref. new_ref must be kfree'd by the caller in
1215 static struct binder_ref
*binder_get_ref_for_node(struct binder_proc
*proc
,
1216 struct binder_node
*node
,
1217 struct binder_ref
*new_ref
)
1219 struct binder_context
*context
= proc
->context
;
1220 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1221 struct rb_node
*parent
= NULL
;
1222 struct binder_ref
*ref
;
1227 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1229 if (node
< ref
->node
)
1231 else if (node
> ref
->node
)
1232 p
= &(*p
)->rb_right
;
1239 binder_stats_created(BINDER_STAT_REF
);
1240 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1241 new_ref
->proc
= proc
;
1242 new_ref
->node
= node
;
1243 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1244 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1246 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1247 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1248 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1249 if (ref
->data
.desc
> new_ref
->data
.desc
)
1251 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1254 p
= &proc
->refs_by_desc
.rb_node
;
1257 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1259 if (new_ref
->data
.desc
< ref
->data
.desc
)
1261 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1262 p
= &(*p
)->rb_right
;
1266 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1267 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1268 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1270 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1271 "%d new ref %d desc %d for node %d\n",
1272 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1277 static void binder_cleanup_ref(struct binder_ref
*ref
)
1279 bool delete_node
= false;
1280 struct binder_proc
*node_proc
= ref
->node
->proc
;
1282 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1283 "%d delete ref %d desc %d for node %d\n",
1284 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1285 ref
->node
->debug_id
);
1287 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1288 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1291 binder_inner_proc_lock(node_proc
);
1292 if (ref
->data
.strong
)
1293 binder_dec_node_ilocked(ref
->node
, 1, 1);
1295 hlist_del(&ref
->node_entry
);
1296 delete_node
= binder_dec_node_ilocked(ref
->node
, 0, 1);
1298 binder_inner_proc_unlock(node_proc
);
1300 * Clear ref->node unless we want the caller to free the node
1304 * The caller uses ref->node to determine
1305 * whether the node needs to be freed. Clear
1306 * it since the node is still alive.
1312 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1313 "%d delete ref %d desc %d has death notification\n",
1314 ref
->proc
->pid
, ref
->data
.debug_id
,
1316 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1317 binder_stats_deleted(BINDER_STAT_DEATH
);
1319 binder_stats_deleted(BINDER_STAT_REF
);
1323 * binder_inc_ref() - increment the ref for given handle
1324 * @ref: ref to be incremented
1325 * @strong: if true, strong increment, else weak
1326 * @target_list: list to queue node work on
1328 * Increment the ref.
1330 * Return: 0, if successful, else errno
1332 static int binder_inc_ref(struct binder_ref
*ref
, int strong
,
1333 struct list_head
*target_list
)
1338 if (ref
->data
.strong
== 0) {
1339 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1345 if (ref
->data
.weak
== 0) {
1346 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1356 * binder_dec_ref() - dec the ref for given handle
1357 * @ref: ref to be decremented
1358 * @strong: if true, strong decrement, else weak
1360 * Decrement the ref.
1362 * TODO: kfree is avoided here since an upcoming patch
1363 * will put this under a lock.
1365 * Return: true if ref is cleaned up and ready to be freed
1367 static bool binder_dec_ref(struct binder_ref
*ref
, int strong
)
1370 if (ref
->data
.strong
== 0) {
1371 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1372 ref
->proc
->pid
, ref
->data
.debug_id
,
1373 ref
->data
.desc
, ref
->data
.strong
,
1378 if (ref
->data
.strong
== 0)
1379 binder_dec_node(ref
->node
, strong
, 1);
1381 if (ref
->data
.weak
== 0) {
1382 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1383 ref
->proc
->pid
, ref
->data
.debug_id
,
1384 ref
->data
.desc
, ref
->data
.strong
,
1390 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1391 binder_cleanup_ref(ref
);
1393 * TODO: we could kfree(ref) here, but an upcoming
1394 * patch will call this with a lock held, so we
1395 * return an indication that the ref should be
1404 * binder_get_node_from_ref() - get the node from the given proc/desc
1405 * @proc: proc containing the ref
1406 * @desc: the handle associated with the ref
1407 * @need_strong_ref: if true, only return node if ref is strong
1408 * @rdata: the id/refcount data for the ref
1410 * Given a proc and ref handle, return the associated binder_node
1412 * Return: a binder_node or NULL if not found or not strong when strong required
1414 static struct binder_node
*binder_get_node_from_ref(
1415 struct binder_proc
*proc
,
1416 u32 desc
, bool need_strong_ref
,
1417 struct binder_ref_data
*rdata
)
1419 struct binder_node
*node
;
1420 struct binder_ref
*ref
;
1422 ref
= binder_get_ref(proc
, desc
, need_strong_ref
);
1427 * Take an implicit reference on the node to ensure
1428 * it stays alive until the call to binder_put_node()
1430 binder_inc_node_tmpref(node
);
1441 * binder_free_ref() - free the binder_ref
1444 * Free the binder_ref. Free the binder_node indicated by ref->node
1445 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1447 static void binder_free_ref(struct binder_ref
*ref
)
1450 binder_free_node(ref
->node
);
1456 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1457 * @proc: proc containing the ref
1458 * @desc: the handle associated with the ref
1459 * @increment: true=inc reference, false=dec reference
1460 * @strong: true=strong reference, false=weak reference
1461 * @rdata: the id/refcount data for the ref
1463 * Given a proc and ref handle, increment or decrement the ref
1464 * according to "increment" arg.
1466 * Return: 0 if successful, else errno
1468 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1469 uint32_t desc
, bool increment
, bool strong
,
1470 struct binder_ref_data
*rdata
)
1473 struct binder_ref
*ref
;
1474 bool delete_ref
= false;
1476 ref
= binder_get_ref(proc
, desc
, strong
);
1482 ret
= binder_inc_ref(ref
, strong
, NULL
);
1484 delete_ref
= binder_dec_ref(ref
, strong
);
1490 binder_free_ref(ref
);
1498 * binder_dec_ref_for_handle() - dec the ref for given handle
1499 * @proc: proc containing the ref
1500 * @desc: the handle associated with the ref
1501 * @strong: true=strong reference, false=weak reference
1502 * @rdata: the id/refcount data for the ref
1504 * Just calls binder_update_ref_for_handle() to decrement the ref.
1506 * Return: 0 if successful, else errno
1508 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1509 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1511 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1516 * binder_inc_ref_for_node() - increment the ref for given proc/node
1517 * @proc: proc containing the ref
1518 * @node: target node
1519 * @strong: true=strong reference, false=weak reference
1520 * @target_list: worklist to use if node is incremented
1521 * @rdata: the id/refcount data for the ref
1523 * Given a proc and node, increment the ref. Create the ref if it
1524 * doesn't already exist
1526 * Return: 0 if successful, else errno
1528 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1529 struct binder_node
*node
,
1531 struct list_head
*target_list
,
1532 struct binder_ref_data
*rdata
)
1534 struct binder_ref
*ref
;
1535 struct binder_ref
*new_ref
= NULL
;
1538 ref
= binder_get_ref_for_node(proc
, node
, NULL
);
1540 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1543 ref
= binder_get_ref_for_node(proc
, node
, new_ref
);
1545 ret
= binder_inc_ref(ref
, strong
, target_list
);
1547 if (new_ref
&& ref
!= new_ref
)
1549 * Another thread created the ref first so
1550 * free the one we allocated
1556 static void binder_pop_transaction(struct binder_thread
*target_thread
,
1557 struct binder_transaction
*t
)
1559 BUG_ON(!target_thread
);
1560 BUG_ON(target_thread
->transaction_stack
!= t
);
1561 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1562 target_thread
->transaction_stack
=
1563 target_thread
->transaction_stack
->from_parent
;
1568 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1569 * @thread: thread to decrement
1571 * A thread needs to be kept alive while being used to create or
1572 * handle a transaction. binder_get_txn_from() is used to safely
1573 * extract t->from from a binder_transaction and keep the thread
1574 * indicated by t->from from being freed. When done with that
1575 * binder_thread, this function is called to decrement the
1576 * tmp_ref and free if appropriate (thread has been released
1577 * and no transaction being processed by the driver)
1579 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1582 * atomic is used to protect the counter value while
1583 * it cannot reach zero or thread->is_dead is false
1585 * TODO: future patch adds locking to ensure that the
1586 * check of tmp_ref and is_dead is done with a lock held
1588 atomic_dec(&thread
->tmp_ref
);
1589 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1590 binder_free_thread(thread
);
1596 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1597 * @proc: proc to decrement
1599 * A binder_proc needs to be kept alive while being used to create or
1600 * handle a transaction. proc->tmp_ref is incremented when
1601 * creating a new transaction or the binder_proc is currently in-use
1602 * by threads that are being released. When done with the binder_proc,
1603 * this function is called to decrement the counter and free the
1604 * proc if appropriate (proc has been released, all threads have
1605 * been released and not currenly in-use to process a transaction).
1607 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1610 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1612 binder_free_proc(proc
);
1618 * binder_get_txn_from() - safely extract the "from" thread in transaction
1619 * @t: binder transaction for t->from
1621 * Atomically return the "from" thread and increment the tmp_ref
1622 * count for the thread to ensure it stays alive until
1623 * binder_thread_dec_tmpref() is called.
1625 * Return: the value of t->from
1627 static struct binder_thread
*binder_get_txn_from(
1628 struct binder_transaction
*t
)
1630 struct binder_thread
*from
;
1632 spin_lock(&t
->lock
);
1635 atomic_inc(&from
->tmp_ref
);
1636 spin_unlock(&t
->lock
);
1640 static void binder_free_transaction(struct binder_transaction
*t
)
1643 t
->buffer
->transaction
= NULL
;
1645 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
1648 static void binder_send_failed_reply(struct binder_transaction
*t
,
1649 uint32_t error_code
)
1651 struct binder_thread
*target_thread
;
1652 struct binder_transaction
*next
;
1654 BUG_ON(t
->flags
& TF_ONE_WAY
);
1656 target_thread
= binder_get_txn_from(t
);
1657 if (target_thread
) {
1658 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1659 "send failed reply for transaction %d to %d:%d\n",
1661 target_thread
->proc
->pid
,
1662 target_thread
->pid
);
1664 binder_pop_transaction(target_thread
, t
);
1665 if (target_thread
->reply_error
.cmd
== BR_OK
) {
1666 target_thread
->reply_error
.cmd
= error_code
;
1667 binder_enqueue_work(
1668 target_thread
->proc
,
1669 &target_thread
->reply_error
.work
,
1670 &target_thread
->todo
);
1671 wake_up_interruptible(&target_thread
->wait
);
1673 WARN(1, "Unexpected reply error: %u\n",
1674 target_thread
->reply_error
.cmd
);
1676 binder_thread_dec_tmpref(target_thread
);
1677 binder_free_transaction(t
);
1680 next
= t
->from_parent
;
1682 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
1683 "send failed reply for transaction %d, target dead\n",
1686 binder_free_transaction(t
);
1688 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1689 "reply failed, no target thread at root\n");
1693 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1694 "reply failed, no target thread -- retry %d\n",
1700 * binder_validate_object() - checks for a valid metadata object in a buffer.
1701 * @buffer: binder_buffer that we're parsing.
1702 * @offset: offset in the buffer at which to validate an object.
1704 * Return: If there's a valid metadata object at @offset in @buffer, the
1705 * size of that object. Otherwise, it returns zero.
1707 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
1709 /* Check if we can read a header first */
1710 struct binder_object_header
*hdr
;
1711 size_t object_size
= 0;
1713 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
1714 buffer
->data_size
< sizeof(*hdr
) ||
1715 !IS_ALIGNED(offset
, sizeof(u32
)))
1718 /* Ok, now see if we can read a complete object. */
1719 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
1720 switch (hdr
->type
) {
1721 case BINDER_TYPE_BINDER
:
1722 case BINDER_TYPE_WEAK_BINDER
:
1723 case BINDER_TYPE_HANDLE
:
1724 case BINDER_TYPE_WEAK_HANDLE
:
1725 object_size
= sizeof(struct flat_binder_object
);
1727 case BINDER_TYPE_FD
:
1728 object_size
= sizeof(struct binder_fd_object
);
1730 case BINDER_TYPE_PTR
:
1731 object_size
= sizeof(struct binder_buffer_object
);
1733 case BINDER_TYPE_FDA
:
1734 object_size
= sizeof(struct binder_fd_array_object
);
1739 if (offset
<= buffer
->data_size
- object_size
&&
1740 buffer
->data_size
>= object_size
)
1747 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1748 * @b: binder_buffer containing the object
1749 * @index: index in offset array at which the binder_buffer_object is
1751 * @start: points to the start of the offset array
1752 * @num_valid: the number of valid offsets in the offset array
1754 * Return: If @index is within the valid range of the offset array
1755 * described by @start and @num_valid, and if there's a valid
1756 * binder_buffer_object at the offset found in index @index
1757 * of the offset array, that object is returned. Otherwise,
1758 * %NULL is returned.
1759 * Note that the offset found in index @index itself is not
1760 * verified; this function assumes that @num_valid elements
1761 * from @start were previously verified to have valid offsets.
1763 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
1764 binder_size_t index
,
1765 binder_size_t
*start
,
1766 binder_size_t num_valid
)
1768 struct binder_buffer_object
*buffer_obj
;
1769 binder_size_t
*offp
;
1771 if (index
>= num_valid
)
1774 offp
= start
+ index
;
1775 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
1776 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
1783 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1784 * @b: transaction buffer
1785 * @objects_start start of objects buffer
1786 * @buffer: binder_buffer_object in which to fix up
1787 * @offset: start offset in @buffer to fix up
1788 * @last_obj: last binder_buffer_object that we fixed up in
1789 * @last_min_offset: minimum fixup offset in @last_obj
1791 * Return: %true if a fixup in buffer @buffer at offset @offset is
1794 * For safety reasons, we only allow fixups inside a buffer to happen
1795 * at increasing offsets; additionally, we only allow fixup on the last
1796 * buffer object that was verified, or one of its parents.
1798 * Example of what is allowed:
1801 * B (parent = A, offset = 0)
1802 * C (parent = A, offset = 16)
1803 * D (parent = C, offset = 0)
1804 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1806 * Examples of what is not allowed:
1808 * Decreasing offsets within the same parent:
1810 * C (parent = A, offset = 16)
1811 * B (parent = A, offset = 0) // decreasing offset within A
1813 * Referring to a parent that wasn't the last object or any of its parents:
1815 * B (parent = A, offset = 0)
1816 * C (parent = A, offset = 0)
1817 * C (parent = A, offset = 16)
1818 * D (parent = B, offset = 0) // B is not A or any of A's parents
1820 static bool binder_validate_fixup(struct binder_buffer
*b
,
1821 binder_size_t
*objects_start
,
1822 struct binder_buffer_object
*buffer
,
1823 binder_size_t fixup_offset
,
1824 struct binder_buffer_object
*last_obj
,
1825 binder_size_t last_min_offset
)
1828 /* Nothing to fix up in */
1832 while (last_obj
!= buffer
) {
1834 * Safe to retrieve the parent of last_obj, since it
1835 * was already previously verified by the driver.
1837 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
1839 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
1840 last_obj
= (struct binder_buffer_object
*)
1841 (b
->data
+ *(objects_start
+ last_obj
->parent
));
1843 return (fixup_offset
>= last_min_offset
);
1846 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
1847 struct binder_buffer
*buffer
,
1848 binder_size_t
*failed_at
)
1850 binder_size_t
*offp
, *off_start
, *off_end
;
1851 int debug_id
= buffer
->debug_id
;
1853 binder_debug(BINDER_DEBUG_TRANSACTION
,
1854 "%d buffer release %d, size %zd-%zd, failed at %p\n",
1855 proc
->pid
, buffer
->debug_id
,
1856 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
1858 if (buffer
->target_node
)
1859 binder_dec_node(buffer
->target_node
, 1, 0);
1861 off_start
= (binder_size_t
*)(buffer
->data
+
1862 ALIGN(buffer
->data_size
, sizeof(void *)));
1864 off_end
= failed_at
;
1866 off_end
= (void *)off_start
+ buffer
->offsets_size
;
1867 for (offp
= off_start
; offp
< off_end
; offp
++) {
1868 struct binder_object_header
*hdr
;
1869 size_t object_size
= binder_validate_object(buffer
, *offp
);
1871 if (object_size
== 0) {
1872 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1873 debug_id
, (u64
)*offp
, buffer
->data_size
);
1876 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
1877 switch (hdr
->type
) {
1878 case BINDER_TYPE_BINDER
:
1879 case BINDER_TYPE_WEAK_BINDER
: {
1880 struct flat_binder_object
*fp
;
1881 struct binder_node
*node
;
1883 fp
= to_flat_binder_object(hdr
);
1884 node
= binder_get_node(proc
, fp
->binder
);
1886 pr_err("transaction release %d bad node %016llx\n",
1887 debug_id
, (u64
)fp
->binder
);
1890 binder_debug(BINDER_DEBUG_TRANSACTION
,
1891 " node %d u%016llx\n",
1892 node
->debug_id
, (u64
)node
->ptr
);
1893 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
1895 binder_put_node(node
);
1897 case BINDER_TYPE_HANDLE
:
1898 case BINDER_TYPE_WEAK_HANDLE
: {
1899 struct flat_binder_object
*fp
;
1900 struct binder_ref_data rdata
;
1903 fp
= to_flat_binder_object(hdr
);
1904 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
1905 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
1908 pr_err("transaction release %d bad handle %d, ret = %d\n",
1909 debug_id
, fp
->handle
, ret
);
1912 binder_debug(BINDER_DEBUG_TRANSACTION
,
1913 " ref %d desc %d\n",
1914 rdata
.debug_id
, rdata
.desc
);
1917 case BINDER_TYPE_FD
: {
1918 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
1920 binder_debug(BINDER_DEBUG_TRANSACTION
,
1921 " fd %d\n", fp
->fd
);
1923 task_close_fd(proc
, fp
->fd
);
1925 case BINDER_TYPE_PTR
:
1927 * Nothing to do here, this will get cleaned up when the
1928 * transaction buffer gets freed
1931 case BINDER_TYPE_FDA
: {
1932 struct binder_fd_array_object
*fda
;
1933 struct binder_buffer_object
*parent
;
1934 uintptr_t parent_buffer
;
1937 binder_size_t fd_buf_size
;
1939 fda
= to_binder_fd_array_object(hdr
);
1940 parent
= binder_validate_ptr(buffer
, fda
->parent
,
1944 pr_err("transaction release %d bad parent offset",
1949 * Since the parent was already fixed up, convert it
1950 * back to kernel address space to access it
1952 parent_buffer
= parent
->buffer
-
1953 binder_alloc_get_user_buffer_offset(
1956 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
1957 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
1958 pr_err("transaction release %d invalid number of fds (%lld)\n",
1959 debug_id
, (u64
)fda
->num_fds
);
1962 if (fd_buf_size
> parent
->length
||
1963 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
1964 /* No space for all file descriptors here. */
1965 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1966 debug_id
, (u64
)fda
->num_fds
);
1969 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
1970 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
1971 task_close_fd(proc
, fd_array
[fd_index
]);
1974 pr_err("transaction release %d bad object type %x\n",
1975 debug_id
, hdr
->type
);
1981 static int binder_translate_binder(struct flat_binder_object
*fp
,
1982 struct binder_transaction
*t
,
1983 struct binder_thread
*thread
)
1985 struct binder_node
*node
;
1986 struct binder_proc
*proc
= thread
->proc
;
1987 struct binder_proc
*target_proc
= t
->to_proc
;
1988 struct binder_ref_data rdata
;
1991 node
= binder_get_node(proc
, fp
->binder
);
1993 node
= binder_new_node(proc
, fp
->binder
, fp
->cookie
);
1997 node
->min_priority
= fp
->flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1998 node
->accept_fds
= !!(fp
->flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
2000 if (fp
->cookie
!= node
->cookie
) {
2001 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2002 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2003 node
->debug_id
, (u64
)fp
->cookie
,
2008 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2013 ret
= binder_inc_ref_for_node(target_proc
, node
,
2014 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2015 &thread
->todo
, &rdata
);
2019 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2020 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2022 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2024 fp
->handle
= rdata
.desc
;
2027 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2028 binder_debug(BINDER_DEBUG_TRANSACTION
,
2029 " node %d u%016llx -> ref %d desc %d\n",
2030 node
->debug_id
, (u64
)node
->ptr
,
2031 rdata
.debug_id
, rdata
.desc
);
2033 binder_put_node(node
);
2037 static int binder_translate_handle(struct flat_binder_object
*fp
,
2038 struct binder_transaction
*t
,
2039 struct binder_thread
*thread
)
2041 struct binder_proc
*proc
= thread
->proc
;
2042 struct binder_proc
*target_proc
= t
->to_proc
;
2043 struct binder_node
*node
;
2044 struct binder_ref_data src_rdata
;
2047 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2048 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2050 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2051 proc
->pid
, thread
->pid
, fp
->handle
);
2054 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2059 if (node
->proc
== target_proc
) {
2060 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2061 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2063 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2064 fp
->binder
= node
->ptr
;
2065 fp
->cookie
= node
->cookie
;
2066 binder_inc_node(node
,
2067 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2069 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2070 binder_debug(BINDER_DEBUG_TRANSACTION
,
2071 " ref %d desc %d -> node %d u%016llx\n",
2072 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2076 struct binder_ref_data dest_rdata
;
2078 ret
= binder_inc_ref_for_node(target_proc
, node
,
2079 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2085 fp
->handle
= dest_rdata
.desc
;
2087 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2089 binder_debug(BINDER_DEBUG_TRANSACTION
,
2090 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2091 src_rdata
.debug_id
, src_rdata
.desc
,
2092 dest_rdata
.debug_id
, dest_rdata
.desc
,
2096 binder_put_node(node
);
2100 static int binder_translate_fd(int fd
,
2101 struct binder_transaction
*t
,
2102 struct binder_thread
*thread
,
2103 struct binder_transaction
*in_reply_to
)
2105 struct binder_proc
*proc
= thread
->proc
;
2106 struct binder_proc
*target_proc
= t
->to_proc
;
2110 bool target_allows_fd
;
2113 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2115 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2116 if (!target_allows_fd
) {
2117 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2118 proc
->pid
, thread
->pid
,
2119 in_reply_to
? "reply" : "transaction",
2122 goto err_fd_not_accepted
;
2127 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2128 proc
->pid
, thread
->pid
, fd
);
2132 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2138 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2139 if (target_fd
< 0) {
2141 goto err_get_unused_fd
;
2143 task_fd_install(target_proc
, target_fd
, file
);
2144 trace_binder_transaction_fd(t
, fd
, target_fd
);
2145 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2154 err_fd_not_accepted
:
2158 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2159 struct binder_buffer_object
*parent
,
2160 struct binder_transaction
*t
,
2161 struct binder_thread
*thread
,
2162 struct binder_transaction
*in_reply_to
)
2164 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2166 uintptr_t parent_buffer
;
2168 struct binder_proc
*proc
= thread
->proc
;
2169 struct binder_proc
*target_proc
= t
->to_proc
;
2171 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2172 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2173 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2174 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2177 if (fd_buf_size
> parent
->length
||
2178 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2179 /* No space for all file descriptors here. */
2180 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2181 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2185 * Since the parent was already fixed up, convert it
2186 * back to the kernel address space to access it
2188 parent_buffer
= parent
->buffer
-
2189 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2190 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
2191 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2192 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2193 proc
->pid
, thread
->pid
);
2196 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2197 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2200 goto err_translate_fd_failed
;
2201 fd_array
[fdi
] = target_fd
;
2205 err_translate_fd_failed
:
2207 * Failed to allocate fd or security error, free fds
2210 num_installed_fds
= fdi
;
2211 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2212 task_close_fd(target_proc
, fd_array
[fdi
]);
2216 static int binder_fixup_parent(struct binder_transaction
*t
,
2217 struct binder_thread
*thread
,
2218 struct binder_buffer_object
*bp
,
2219 binder_size_t
*off_start
,
2220 binder_size_t num_valid
,
2221 struct binder_buffer_object
*last_fixup_obj
,
2222 binder_size_t last_fixup_min_off
)
2224 struct binder_buffer_object
*parent
;
2226 struct binder_buffer
*b
= t
->buffer
;
2227 struct binder_proc
*proc
= thread
->proc
;
2228 struct binder_proc
*target_proc
= t
->to_proc
;
2230 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2233 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2235 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2236 proc
->pid
, thread
->pid
);
2240 if (!binder_validate_fixup(b
, off_start
,
2241 parent
, bp
->parent_offset
,
2243 last_fixup_min_off
)) {
2244 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2245 proc
->pid
, thread
->pid
);
2249 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2250 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2251 /* No space for a pointer here! */
2252 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2253 proc
->pid
, thread
->pid
);
2256 parent_buffer
= (u8
*)(parent
->buffer
-
2257 binder_alloc_get_user_buffer_offset(
2258 &target_proc
->alloc
));
2259 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2264 static void binder_transaction(struct binder_proc
*proc
,
2265 struct binder_thread
*thread
,
2266 struct binder_transaction_data
*tr
, int reply
,
2267 binder_size_t extra_buffers_size
)
2270 struct binder_transaction
*t
;
2271 struct binder_work
*tcomplete
;
2272 binder_size_t
*offp
, *off_end
, *off_start
;
2273 binder_size_t off_min
;
2274 u8
*sg_bufp
, *sg_buf_end
;
2275 struct binder_proc
*target_proc
= NULL
;
2276 struct binder_thread
*target_thread
= NULL
;
2277 struct binder_node
*target_node
= NULL
;
2278 struct list_head
*target_list
;
2279 wait_queue_head_t
*target_wait
;
2280 struct binder_transaction
*in_reply_to
= NULL
;
2281 struct binder_transaction_log_entry
*e
;
2282 uint32_t return_error
= 0;
2283 uint32_t return_error_param
= 0;
2284 uint32_t return_error_line
= 0;
2285 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2286 binder_size_t last_fixup_min_off
= 0;
2287 struct binder_context
*context
= proc
->context
;
2288 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2290 e
= binder_transaction_log_add(&binder_transaction_log
);
2291 e
->debug_id
= t_debug_id
;
2292 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2293 e
->from_proc
= proc
->pid
;
2294 e
->from_thread
= thread
->pid
;
2295 e
->target_handle
= tr
->target
.handle
;
2296 e
->data_size
= tr
->data_size
;
2297 e
->offsets_size
= tr
->offsets_size
;
2298 e
->context_name
= proc
->context
->name
;
2301 in_reply_to
= thread
->transaction_stack
;
2302 if (in_reply_to
== NULL
) {
2303 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2304 proc
->pid
, thread
->pid
);
2305 return_error
= BR_FAILED_REPLY
;
2306 return_error_param
= -EPROTO
;
2307 return_error_line
= __LINE__
;
2308 goto err_empty_call_stack
;
2310 binder_set_nice(in_reply_to
->saved_priority
);
2311 if (in_reply_to
->to_thread
!= thread
) {
2312 spin_lock(&in_reply_to
->lock
);
2313 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2314 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2315 in_reply_to
->to_proc
?
2316 in_reply_to
->to_proc
->pid
: 0,
2317 in_reply_to
->to_thread
?
2318 in_reply_to
->to_thread
->pid
: 0);
2319 spin_unlock(&in_reply_to
->lock
);
2320 return_error
= BR_FAILED_REPLY
;
2321 return_error_param
= -EPROTO
;
2322 return_error_line
= __LINE__
;
2324 goto err_bad_call_stack
;
2326 thread
->transaction_stack
= in_reply_to
->to_parent
;
2327 target_thread
= binder_get_txn_from(in_reply_to
);
2328 if (target_thread
== NULL
) {
2329 return_error
= BR_DEAD_REPLY
;
2330 return_error_line
= __LINE__
;
2331 goto err_dead_binder
;
2333 if (target_thread
->transaction_stack
!= in_reply_to
) {
2334 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2335 proc
->pid
, thread
->pid
,
2336 target_thread
->transaction_stack
?
2337 target_thread
->transaction_stack
->debug_id
: 0,
2338 in_reply_to
->debug_id
);
2339 return_error
= BR_FAILED_REPLY
;
2340 return_error_param
= -EPROTO
;
2341 return_error_line
= __LINE__
;
2343 target_thread
= NULL
;
2344 goto err_dead_binder
;
2346 target_proc
= target_thread
->proc
;
2347 target_proc
->tmp_ref
++;
2349 if (tr
->target
.handle
) {
2350 struct binder_ref
*ref
;
2353 * There must already be a strong ref
2354 * on this node. If so, do a strong
2355 * increment on the node to ensure it
2356 * stays alive until the transaction is
2359 ref
= binder_get_ref(proc
, tr
->target
.handle
, true);
2361 binder_inc_node(ref
->node
, 1, 0, NULL
);
2362 target_node
= ref
->node
;
2364 if (target_node
== NULL
) {
2365 binder_user_error("%d:%d got transaction to invalid handle\n",
2366 proc
->pid
, thread
->pid
);
2367 return_error
= BR_FAILED_REPLY
;
2368 return_error_param
= -EINVAL
;
2369 return_error_line
= __LINE__
;
2370 goto err_invalid_target_handle
;
2373 mutex_lock(&context
->context_mgr_node_lock
);
2374 target_node
= context
->binder_context_mgr_node
;
2375 if (target_node
== NULL
) {
2376 return_error
= BR_DEAD_REPLY
;
2377 mutex_unlock(&context
->context_mgr_node_lock
);
2378 return_error_line
= __LINE__
;
2379 goto err_no_context_mgr_node
;
2381 binder_inc_node(target_node
, 1, 0, NULL
);
2382 mutex_unlock(&context
->context_mgr_node_lock
);
2384 e
->to_node
= target_node
->debug_id
;
2385 target_proc
= target_node
->proc
;
2386 if (target_proc
== NULL
) {
2387 return_error
= BR_DEAD_REPLY
;
2388 return_error_line
= __LINE__
;
2389 goto err_dead_binder
;
2391 target_proc
->tmp_ref
++;
2392 if (security_binder_transaction(proc
->tsk
,
2393 target_proc
->tsk
) < 0) {
2394 return_error
= BR_FAILED_REPLY
;
2395 return_error_param
= -EPERM
;
2396 return_error_line
= __LINE__
;
2397 goto err_invalid_target_handle
;
2399 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2400 struct binder_transaction
*tmp
;
2402 tmp
= thread
->transaction_stack
;
2403 if (tmp
->to_thread
!= thread
) {
2404 spin_lock(&tmp
->lock
);
2405 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2406 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2407 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2409 tmp
->to_thread
->pid
: 0);
2410 spin_unlock(&tmp
->lock
);
2411 return_error
= BR_FAILED_REPLY
;
2412 return_error_param
= -EPROTO
;
2413 return_error_line
= __LINE__
;
2414 goto err_bad_call_stack
;
2417 struct binder_thread
*from
;
2419 spin_lock(&tmp
->lock
);
2421 if (from
&& from
->proc
== target_proc
) {
2422 atomic_inc(&from
->tmp_ref
);
2423 target_thread
= from
;
2424 spin_unlock(&tmp
->lock
);
2427 spin_unlock(&tmp
->lock
);
2428 tmp
= tmp
->from_parent
;
2432 if (target_thread
) {
2433 e
->to_thread
= target_thread
->pid
;
2434 target_list
= &target_thread
->todo
;
2435 target_wait
= &target_thread
->wait
;
2437 target_list
= &target_proc
->todo
;
2438 target_wait
= &target_proc
->wait
;
2440 e
->to_proc
= target_proc
->pid
;
2442 /* TODO: reuse incoming transaction for reply */
2443 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2445 return_error
= BR_FAILED_REPLY
;
2446 return_error_param
= -ENOMEM
;
2447 return_error_line
= __LINE__
;
2448 goto err_alloc_t_failed
;
2450 binder_stats_created(BINDER_STAT_TRANSACTION
);
2451 spin_lock_init(&t
->lock
);
2453 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2454 if (tcomplete
== NULL
) {
2455 return_error
= BR_FAILED_REPLY
;
2456 return_error_param
= -ENOMEM
;
2457 return_error_line
= __LINE__
;
2458 goto err_alloc_tcomplete_failed
;
2460 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2462 t
->debug_id
= t_debug_id
;
2465 binder_debug(BINDER_DEBUG_TRANSACTION
,
2466 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2467 proc
->pid
, thread
->pid
, t
->debug_id
,
2468 target_proc
->pid
, target_thread
->pid
,
2469 (u64
)tr
->data
.ptr
.buffer
,
2470 (u64
)tr
->data
.ptr
.offsets
,
2471 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2472 (u64
)extra_buffers_size
);
2474 binder_debug(BINDER_DEBUG_TRANSACTION
,
2475 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2476 proc
->pid
, thread
->pid
, t
->debug_id
,
2477 target_proc
->pid
, target_node
->debug_id
,
2478 (u64
)tr
->data
.ptr
.buffer
,
2479 (u64
)tr
->data
.ptr
.offsets
,
2480 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2481 (u64
)extra_buffers_size
);
2483 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2487 t
->sender_euid
= task_euid(proc
->tsk
);
2488 t
->to_proc
= target_proc
;
2489 t
->to_thread
= target_thread
;
2491 t
->flags
= tr
->flags
;
2492 t
->priority
= task_nice(current
);
2494 trace_binder_transaction(reply
, t
, target_node
);
2496 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
2497 tr
->offsets_size
, extra_buffers_size
,
2498 !reply
&& (t
->flags
& TF_ONE_WAY
));
2499 if (IS_ERR(t
->buffer
)) {
2501 * -ESRCH indicates VMA cleared. The target is dying.
2503 return_error_param
= PTR_ERR(t
->buffer
);
2504 return_error
= return_error_param
== -ESRCH
?
2505 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
2506 return_error_line
= __LINE__
;
2508 goto err_binder_alloc_buf_failed
;
2510 t
->buffer
->allow_user_free
= 0;
2511 t
->buffer
->debug_id
= t
->debug_id
;
2512 t
->buffer
->transaction
= t
;
2513 t
->buffer
->target_node
= target_node
;
2514 trace_binder_transaction_alloc_buf(t
->buffer
);
2515 off_start
= (binder_size_t
*)(t
->buffer
->data
+
2516 ALIGN(tr
->data_size
, sizeof(void *)));
2519 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
2520 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
2521 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2522 proc
->pid
, thread
->pid
);
2523 return_error
= BR_FAILED_REPLY
;
2524 return_error_param
= -EFAULT
;
2525 return_error_line
= __LINE__
;
2526 goto err_copy_data_failed
;
2528 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
2529 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
2530 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2531 proc
->pid
, thread
->pid
);
2532 return_error
= BR_FAILED_REPLY
;
2533 return_error_param
= -EFAULT
;
2534 return_error_line
= __LINE__
;
2535 goto err_copy_data_failed
;
2537 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
2538 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2539 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
2540 return_error
= BR_FAILED_REPLY
;
2541 return_error_param
= -EINVAL
;
2542 return_error_line
= __LINE__
;
2543 goto err_bad_offset
;
2545 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
2546 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2547 proc
->pid
, thread
->pid
,
2548 (u64
)extra_buffers_size
);
2549 return_error
= BR_FAILED_REPLY
;
2550 return_error_param
= -EINVAL
;
2551 return_error_line
= __LINE__
;
2552 goto err_bad_offset
;
2554 off_end
= (void *)off_start
+ tr
->offsets_size
;
2555 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
2556 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
2558 for (; offp
< off_end
; offp
++) {
2559 struct binder_object_header
*hdr
;
2560 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
2562 if (object_size
== 0 || *offp
< off_min
) {
2563 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2564 proc
->pid
, thread
->pid
, (u64
)*offp
,
2566 (u64
)t
->buffer
->data_size
);
2567 return_error
= BR_FAILED_REPLY
;
2568 return_error_param
= -EINVAL
;
2569 return_error_line
= __LINE__
;
2570 goto err_bad_offset
;
2573 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
2574 off_min
= *offp
+ object_size
;
2575 switch (hdr
->type
) {
2576 case BINDER_TYPE_BINDER
:
2577 case BINDER_TYPE_WEAK_BINDER
: {
2578 struct flat_binder_object
*fp
;
2580 fp
= to_flat_binder_object(hdr
);
2581 ret
= binder_translate_binder(fp
, t
, thread
);
2583 return_error
= BR_FAILED_REPLY
;
2584 return_error_param
= ret
;
2585 return_error_line
= __LINE__
;
2586 goto err_translate_failed
;
2589 case BINDER_TYPE_HANDLE
:
2590 case BINDER_TYPE_WEAK_HANDLE
: {
2591 struct flat_binder_object
*fp
;
2593 fp
= to_flat_binder_object(hdr
);
2594 ret
= binder_translate_handle(fp
, t
, thread
);
2596 return_error
= BR_FAILED_REPLY
;
2597 return_error_param
= ret
;
2598 return_error_line
= __LINE__
;
2599 goto err_translate_failed
;
2603 case BINDER_TYPE_FD
: {
2604 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2605 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
2608 if (target_fd
< 0) {
2609 return_error
= BR_FAILED_REPLY
;
2610 return_error_param
= target_fd
;
2611 return_error_line
= __LINE__
;
2612 goto err_translate_failed
;
2617 case BINDER_TYPE_FDA
: {
2618 struct binder_fd_array_object
*fda
=
2619 to_binder_fd_array_object(hdr
);
2620 struct binder_buffer_object
*parent
=
2621 binder_validate_ptr(t
->buffer
, fda
->parent
,
2625 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2626 proc
->pid
, thread
->pid
);
2627 return_error
= BR_FAILED_REPLY
;
2628 return_error_param
= -EINVAL
;
2629 return_error_line
= __LINE__
;
2630 goto err_bad_parent
;
2632 if (!binder_validate_fixup(t
->buffer
, off_start
,
2633 parent
, fda
->parent_offset
,
2635 last_fixup_min_off
)) {
2636 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2637 proc
->pid
, thread
->pid
);
2638 return_error
= BR_FAILED_REPLY
;
2639 return_error_param
= -EINVAL
;
2640 return_error_line
= __LINE__
;
2641 goto err_bad_parent
;
2643 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
2646 return_error
= BR_FAILED_REPLY
;
2647 return_error_param
= ret
;
2648 return_error_line
= __LINE__
;
2649 goto err_translate_failed
;
2651 last_fixup_obj
= parent
;
2652 last_fixup_min_off
=
2653 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
2655 case BINDER_TYPE_PTR
: {
2656 struct binder_buffer_object
*bp
=
2657 to_binder_buffer_object(hdr
);
2658 size_t buf_left
= sg_buf_end
- sg_bufp
;
2660 if (bp
->length
> buf_left
) {
2661 binder_user_error("%d:%d got transaction with too large buffer\n",
2662 proc
->pid
, thread
->pid
);
2663 return_error
= BR_FAILED_REPLY
;
2664 return_error_param
= -EINVAL
;
2665 return_error_line
= __LINE__
;
2666 goto err_bad_offset
;
2668 if (copy_from_user(sg_bufp
,
2669 (const void __user
*)(uintptr_t)
2670 bp
->buffer
, bp
->length
)) {
2671 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2672 proc
->pid
, thread
->pid
);
2673 return_error_param
= -EFAULT
;
2674 return_error
= BR_FAILED_REPLY
;
2675 return_error_line
= __LINE__
;
2676 goto err_copy_data_failed
;
2678 /* Fixup buffer pointer to target proc address space */
2679 bp
->buffer
= (uintptr_t)sg_bufp
+
2680 binder_alloc_get_user_buffer_offset(
2681 &target_proc
->alloc
);
2682 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
2684 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
2687 last_fixup_min_off
);
2689 return_error
= BR_FAILED_REPLY
;
2690 return_error_param
= ret
;
2691 return_error_line
= __LINE__
;
2692 goto err_translate_failed
;
2694 last_fixup_obj
= bp
;
2695 last_fixup_min_off
= 0;
2698 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
2699 proc
->pid
, thread
->pid
, hdr
->type
);
2700 return_error
= BR_FAILED_REPLY
;
2701 return_error_param
= -EINVAL
;
2702 return_error_line
= __LINE__
;
2703 goto err_bad_object_type
;
2706 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
2707 binder_enqueue_work(proc
, tcomplete
, &thread
->todo
);
2710 if (target_thread
->is_dead
)
2711 goto err_dead_proc_or_thread
;
2712 BUG_ON(t
->buffer
->async_transaction
!= 0);
2713 binder_pop_transaction(target_thread
, in_reply_to
);
2714 binder_free_transaction(in_reply_to
);
2715 } else if (!(t
->flags
& TF_ONE_WAY
)) {
2716 BUG_ON(t
->buffer
->async_transaction
!= 0);
2718 t
->from_parent
= thread
->transaction_stack
;
2719 thread
->transaction_stack
= t
;
2720 if (target_proc
->is_dead
||
2721 (target_thread
&& target_thread
->is_dead
)) {
2722 binder_pop_transaction(thread
, t
);
2723 goto err_dead_proc_or_thread
;
2726 BUG_ON(target_node
== NULL
);
2727 BUG_ON(t
->buffer
->async_transaction
!= 1);
2728 if (target_node
->has_async_transaction
) {
2729 target_list
= &target_node
->async_todo
;
2732 target_node
->has_async_transaction
= 1;
2733 if (target_proc
->is_dead
||
2734 (target_thread
&& target_thread
->is_dead
))
2735 goto err_dead_proc_or_thread
;
2737 t
->work
.type
= BINDER_WORK_TRANSACTION
;
2738 binder_enqueue_work(target_proc
, &t
->work
, target_list
);
2740 if (reply
|| !(tr
->flags
& TF_ONE_WAY
))
2741 wake_up_interruptible_sync(target_wait
);
2743 wake_up_interruptible(target_wait
);
2746 binder_thread_dec_tmpref(target_thread
);
2747 binder_proc_dec_tmpref(target_proc
);
2749 * write barrier to synchronize with initialization
2753 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
2756 err_dead_proc_or_thread
:
2757 return_error
= BR_DEAD_REPLY
;
2758 return_error_line
= __LINE__
;
2759 err_translate_failed
:
2760 err_bad_object_type
:
2763 err_copy_data_failed
:
2764 trace_binder_transaction_failed_buffer_release(t
->buffer
);
2765 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
2767 t
->buffer
->transaction
= NULL
;
2768 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
2769 err_binder_alloc_buf_failed
:
2771 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
2772 err_alloc_tcomplete_failed
:
2774 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2777 err_empty_call_stack
:
2779 err_invalid_target_handle
:
2780 err_no_context_mgr_node
:
2782 binder_thread_dec_tmpref(target_thread
);
2784 binder_proc_dec_tmpref(target_proc
);
2786 binder_dec_node(target_node
, 1, 0);
2788 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2789 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2790 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
2791 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2795 struct binder_transaction_log_entry
*fe
;
2797 e
->return_error
= return_error
;
2798 e
->return_error_param
= return_error_param
;
2799 e
->return_error_line
= return_error_line
;
2800 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
2803 * write barrier to synchronize with initialization
2807 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
2808 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
2811 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
2813 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
2814 binder_enqueue_work(thread
->proc
,
2815 &thread
->return_error
.work
,
2817 binder_send_failed_reply(in_reply_to
, return_error
);
2819 thread
->return_error
.cmd
= return_error
;
2820 binder_enqueue_work(thread
->proc
,
2821 &thread
->return_error
.work
,
2826 static int binder_thread_write(struct binder_proc
*proc
,
2827 struct binder_thread
*thread
,
2828 binder_uintptr_t binder_buffer
, size_t size
,
2829 binder_size_t
*consumed
)
2832 struct binder_context
*context
= proc
->context
;
2833 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
2834 void __user
*ptr
= buffer
+ *consumed
;
2835 void __user
*end
= buffer
+ size
;
2837 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
2840 if (get_user(cmd
, (uint32_t __user
*)ptr
))
2842 ptr
+= sizeof(uint32_t);
2843 trace_binder_command(cmd
);
2844 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
2845 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
2846 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
2847 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
2855 const char *debug_string
;
2856 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
2857 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
2858 struct binder_ref_data rdata
;
2860 if (get_user(target
, (uint32_t __user
*)ptr
))
2863 ptr
+= sizeof(uint32_t);
2865 if (increment
&& !target
) {
2866 struct binder_node
*ctx_mgr_node
;
2867 mutex_lock(&context
->context_mgr_node_lock
);
2868 ctx_mgr_node
= context
->binder_context_mgr_node
;
2870 ret
= binder_inc_ref_for_node(
2872 strong
, NULL
, &rdata
);
2873 mutex_unlock(&context
->context_mgr_node_lock
);
2876 ret
= binder_update_ref_for_handle(
2877 proc
, target
, increment
, strong
,
2879 if (!ret
&& rdata
.desc
!= target
) {
2880 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2881 proc
->pid
, thread
->pid
,
2882 target
, rdata
.desc
);
2886 debug_string
= "IncRefs";
2889 debug_string
= "Acquire";
2892 debug_string
= "Release";
2896 debug_string
= "DecRefs";
2900 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2901 proc
->pid
, thread
->pid
, debug_string
,
2902 strong
, target
, ret
);
2905 binder_debug(BINDER_DEBUG_USER_REFS
,
2906 "%d:%d %s ref %d desc %d s %d w %d\n",
2907 proc
->pid
, thread
->pid
, debug_string
,
2908 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
2912 case BC_INCREFS_DONE
:
2913 case BC_ACQUIRE_DONE
: {
2914 binder_uintptr_t node_ptr
;
2915 binder_uintptr_t cookie
;
2916 struct binder_node
*node
;
2918 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
2920 ptr
+= sizeof(binder_uintptr_t
);
2921 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
2923 ptr
+= sizeof(binder_uintptr_t
);
2924 node
= binder_get_node(proc
, node_ptr
);
2926 binder_user_error("%d:%d %s u%016llx no match\n",
2927 proc
->pid
, thread
->pid
,
2928 cmd
== BC_INCREFS_DONE
?
2934 if (cookie
!= node
->cookie
) {
2935 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
2936 proc
->pid
, thread
->pid
,
2937 cmd
== BC_INCREFS_DONE
?
2938 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2939 (u64
)node_ptr
, node
->debug_id
,
2940 (u64
)cookie
, (u64
)node
->cookie
);
2941 binder_put_node(node
);
2944 binder_inner_proc_lock(proc
);
2945 if (cmd
== BC_ACQUIRE_DONE
) {
2946 if (node
->pending_strong_ref
== 0) {
2947 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
2948 proc
->pid
, thread
->pid
,
2950 binder_inner_proc_unlock(proc
);
2951 binder_put_node(node
);
2954 node
->pending_strong_ref
= 0;
2956 if (node
->pending_weak_ref
== 0) {
2957 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
2958 proc
->pid
, thread
->pid
,
2960 binder_inner_proc_unlock(proc
);
2961 binder_put_node(node
);
2964 node
->pending_weak_ref
= 0;
2966 binder_inner_proc_unlock(proc
);
2967 binder_dec_node(node
, cmd
== BC_ACQUIRE_DONE
, 0);
2968 binder_debug(BINDER_DEBUG_USER_REFS
,
2969 "%d:%d %s node %d ls %d lw %d tr %d\n",
2970 proc
->pid
, thread
->pid
,
2971 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2972 node
->debug_id
, node
->local_strong_refs
,
2973 node
->local_weak_refs
, node
->tmp_refs
);
2974 binder_put_node(node
);
2977 case BC_ATTEMPT_ACQUIRE
:
2978 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
2980 case BC_ACQUIRE_RESULT
:
2981 pr_err("BC_ACQUIRE_RESULT not supported\n");
2984 case BC_FREE_BUFFER
: {
2985 binder_uintptr_t data_ptr
;
2986 struct binder_buffer
*buffer
;
2988 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
2990 ptr
+= sizeof(binder_uintptr_t
);
2992 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
2994 if (buffer
== NULL
) {
2995 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2996 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
2999 if (!buffer
->allow_user_free
) {
3000 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3001 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3004 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3005 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3006 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3008 buffer
->transaction
? "active" : "finished");
3010 if (buffer
->transaction
) {
3011 buffer
->transaction
->buffer
= NULL
;
3012 buffer
->transaction
= NULL
;
3014 if (buffer
->async_transaction
&& buffer
->target_node
) {
3015 struct binder_node
*buf_node
;
3016 struct binder_work
*w
;
3018 buf_node
= buffer
->target_node
;
3019 BUG_ON(!buf_node
->has_async_transaction
);
3020 BUG_ON(buf_node
->proc
!= proc
);
3021 binder_inner_proc_lock(proc
);
3022 w
= binder_dequeue_work_head_ilocked(
3023 &buf_node
->async_todo
);
3025 buf_node
->has_async_transaction
= 0;
3027 binder_enqueue_work_ilocked(
3029 binder_inner_proc_unlock(proc
);
3031 trace_binder_transaction_buffer_release(buffer
);
3032 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3033 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3037 case BC_TRANSACTION_SG
:
3039 struct binder_transaction_data_sg tr
;
3041 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3044 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3045 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3048 case BC_TRANSACTION
:
3050 struct binder_transaction_data tr
;
3052 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3055 binder_transaction(proc
, thread
, &tr
,
3056 cmd
== BC_REPLY
, 0);
3060 case BC_REGISTER_LOOPER
:
3061 binder_debug(BINDER_DEBUG_THREADS
,
3062 "%d:%d BC_REGISTER_LOOPER\n",
3063 proc
->pid
, thread
->pid
);
3064 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3065 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3066 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3067 proc
->pid
, thread
->pid
);
3068 } else if (proc
->requested_threads
== 0) {
3069 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3070 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3071 proc
->pid
, thread
->pid
);
3073 proc
->requested_threads
--;
3074 proc
->requested_threads_started
++;
3076 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3078 case BC_ENTER_LOOPER
:
3079 binder_debug(BINDER_DEBUG_THREADS
,
3080 "%d:%d BC_ENTER_LOOPER\n",
3081 proc
->pid
, thread
->pid
);
3082 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3083 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3084 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3085 proc
->pid
, thread
->pid
);
3087 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3089 case BC_EXIT_LOOPER
:
3090 binder_debug(BINDER_DEBUG_THREADS
,
3091 "%d:%d BC_EXIT_LOOPER\n",
3092 proc
->pid
, thread
->pid
);
3093 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3096 case BC_REQUEST_DEATH_NOTIFICATION
:
3097 case BC_CLEAR_DEATH_NOTIFICATION
: {
3099 binder_uintptr_t cookie
;
3100 struct binder_ref
*ref
;
3101 struct binder_ref_death
*death
;
3103 if (get_user(target
, (uint32_t __user
*)ptr
))
3105 ptr
+= sizeof(uint32_t);
3106 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3108 ptr
+= sizeof(binder_uintptr_t
);
3109 ref
= binder_get_ref(proc
, target
, false);
3111 binder_user_error("%d:%d %s invalid ref %d\n",
3112 proc
->pid
, thread
->pid
,
3113 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3114 "BC_REQUEST_DEATH_NOTIFICATION" :
3115 "BC_CLEAR_DEATH_NOTIFICATION",
3120 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3121 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3122 proc
->pid
, thread
->pid
,
3123 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3124 "BC_REQUEST_DEATH_NOTIFICATION" :
3125 "BC_CLEAR_DEATH_NOTIFICATION",
3126 (u64
)cookie
, ref
->data
.debug_id
,
3127 ref
->data
.desc
, ref
->data
.strong
,
3128 ref
->data
.weak
, ref
->node
->debug_id
);
3130 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3132 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3133 proc
->pid
, thread
->pid
);
3136 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3137 if (death
== NULL
) {
3138 WARN_ON(thread
->return_error
.cmd
!=
3140 thread
->return_error
.cmd
= BR_ERROR
;
3141 binder_enqueue_work(
3143 &thread
->return_error
.work
,
3145 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3146 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3147 proc
->pid
, thread
->pid
);
3150 binder_stats_created(BINDER_STAT_DEATH
);
3151 INIT_LIST_HEAD(&death
->work
.entry
);
3152 death
->cookie
= cookie
;
3154 if (ref
->node
->proc
== NULL
) {
3155 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3156 if (thread
->looper
&
3157 (BINDER_LOOPER_STATE_REGISTERED
|
3158 BINDER_LOOPER_STATE_ENTERED
))
3159 binder_enqueue_work(
3164 binder_enqueue_work(
3168 wake_up_interruptible(
3173 if (ref
->death
== NULL
) {
3174 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3175 proc
->pid
, thread
->pid
);
3179 if (death
->cookie
!= cookie
) {
3180 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3181 proc
->pid
, thread
->pid
,
3187 binder_inner_proc_lock(proc
);
3188 if (list_empty(&death
->work
.entry
)) {
3189 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3190 if (thread
->looper
&
3191 (BINDER_LOOPER_STATE_REGISTERED
|
3192 BINDER_LOOPER_STATE_ENTERED
))
3193 binder_enqueue_work_ilocked(
3197 binder_enqueue_work_ilocked(
3200 wake_up_interruptible(
3204 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3205 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3207 binder_inner_proc_unlock(proc
);
3210 case BC_DEAD_BINDER_DONE
: {
3211 struct binder_work
*w
;
3212 binder_uintptr_t cookie
;
3213 struct binder_ref_death
*death
= NULL
;
3215 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3218 ptr
+= sizeof(cookie
);
3219 binder_inner_proc_lock(proc
);
3220 list_for_each_entry(w
, &proc
->delivered_death
,
3222 struct binder_ref_death
*tmp_death
=
3224 struct binder_ref_death
,
3227 if (tmp_death
->cookie
== cookie
) {
3232 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3233 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3234 proc
->pid
, thread
->pid
, (u64
)cookie
,
3236 if (death
== NULL
) {
3237 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3238 proc
->pid
, thread
->pid
, (u64
)cookie
);
3239 binder_inner_proc_unlock(proc
);
3242 binder_dequeue_work_ilocked(&death
->work
);
3243 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3244 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3245 if (thread
->looper
&
3246 (BINDER_LOOPER_STATE_REGISTERED
|
3247 BINDER_LOOPER_STATE_ENTERED
))
3248 binder_enqueue_work_ilocked(
3249 &death
->work
, &thread
->todo
);
3251 binder_enqueue_work_ilocked(
3254 wake_up_interruptible(&proc
->wait
);
3257 binder_inner_proc_unlock(proc
);
3261 pr_err("%d:%d unknown command %d\n",
3262 proc
->pid
, thread
->pid
, cmd
);
3265 *consumed
= ptr
- buffer
;
3270 static void binder_stat_br(struct binder_proc
*proc
,
3271 struct binder_thread
*thread
, uint32_t cmd
)
3273 trace_binder_return(cmd
);
3274 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3275 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3276 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3277 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3281 static int binder_has_proc_work(struct binder_proc
*proc
,
3282 struct binder_thread
*thread
)
3284 return !binder_worklist_empty(proc
, &proc
->todo
) ||
3285 thread
->looper_need_return
;
3288 static int binder_has_thread_work(struct binder_thread
*thread
)
3290 return !binder_worklist_empty(thread
->proc
, &thread
->todo
) ||
3291 thread
->looper_need_return
;
3294 static int binder_put_node_cmd(struct binder_proc
*proc
,
3295 struct binder_thread
*thread
,
3297 binder_uintptr_t node_ptr
,
3298 binder_uintptr_t node_cookie
,
3300 uint32_t cmd
, const char *cmd_name
)
3302 void __user
*ptr
= *ptrp
;
3304 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3306 ptr
+= sizeof(uint32_t);
3308 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3310 ptr
+= sizeof(binder_uintptr_t
);
3312 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3314 ptr
+= sizeof(binder_uintptr_t
);
3316 binder_stat_br(proc
, thread
, cmd
);
3317 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3318 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3319 (u64
)node_ptr
, (u64
)node_cookie
);
3325 static int binder_thread_read(struct binder_proc
*proc
,
3326 struct binder_thread
*thread
,
3327 binder_uintptr_t binder_buffer
, size_t size
,
3328 binder_size_t
*consumed
, int non_block
)
3330 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3331 void __user
*ptr
= buffer
+ *consumed
;
3332 void __user
*end
= buffer
+ size
;
3335 int wait_for_proc_work
;
3337 if (*consumed
== 0) {
3338 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3340 ptr
+= sizeof(uint32_t);
3344 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3345 binder_worklist_empty(proc
, &thread
->todo
);
3347 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3348 if (wait_for_proc_work
)
3349 proc
->ready_threads
++;
3351 binder_unlock(__func__
);
3353 trace_binder_wait_for_work(wait_for_proc_work
,
3354 !!thread
->transaction_stack
,
3355 !binder_worklist_empty(proc
, &thread
->todo
));
3356 if (wait_for_proc_work
) {
3357 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3358 BINDER_LOOPER_STATE_ENTERED
))) {
3359 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3360 proc
->pid
, thread
->pid
, thread
->looper
);
3361 wait_event_interruptible(binder_user_error_wait
,
3362 binder_stop_on_user_error
< 2);
3364 binder_set_nice(proc
->default_priority
);
3366 if (!binder_has_proc_work(proc
, thread
))
3369 ret
= wait_event_freezable_exclusive(proc
->wait
, binder_has_proc_work(proc
, thread
));
3372 if (!binder_has_thread_work(thread
))
3375 ret
= wait_event_freezable(thread
->wait
, binder_has_thread_work(thread
));
3378 binder_lock(__func__
);
3380 if (wait_for_proc_work
)
3381 proc
->ready_threads
--;
3382 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3389 struct binder_transaction_data tr
;
3390 struct binder_work
*w
= NULL
;
3391 struct list_head
*list
= NULL
;
3392 struct binder_transaction
*t
= NULL
;
3393 struct binder_thread
*t_from
;
3395 binder_inner_proc_lock(proc
);
3396 if (!binder_worklist_empty_ilocked(&thread
->todo
))
3397 list
= &thread
->todo
;
3398 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
3402 binder_inner_proc_unlock(proc
);
3405 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
3410 if (end
- ptr
< sizeof(tr
) + 4) {
3411 binder_inner_proc_unlock(proc
);
3414 w
= binder_dequeue_work_head_ilocked(list
);
3417 case BINDER_WORK_TRANSACTION
: {
3418 binder_inner_proc_unlock(proc
);
3419 t
= container_of(w
, struct binder_transaction
, work
);
3421 case BINDER_WORK_RETURN_ERROR
: {
3422 struct binder_error
*e
= container_of(
3423 w
, struct binder_error
, work
);
3425 WARN_ON(e
->cmd
== BR_OK
);
3426 binder_inner_proc_unlock(proc
);
3427 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
3430 ptr
+= sizeof(uint32_t);
3432 binder_stat_br(proc
, thread
, cmd
);
3434 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3435 binder_inner_proc_unlock(proc
);
3436 cmd
= BR_TRANSACTION_COMPLETE
;
3437 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3439 ptr
+= sizeof(uint32_t);
3441 binder_stat_br(proc
, thread
, cmd
);
3442 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
3443 "%d:%d BR_TRANSACTION_COMPLETE\n",
3444 proc
->pid
, thread
->pid
);
3446 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3448 case BINDER_WORK_NODE
: {
3449 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
3451 binder_uintptr_t node_ptr
= node
->ptr
;
3452 binder_uintptr_t node_cookie
= node
->cookie
;
3453 int node_debug_id
= node
->debug_id
;
3456 void __user
*orig_ptr
= ptr
;
3458 BUG_ON(proc
!= node
->proc
);
3459 strong
= node
->internal_strong_refs
||
3460 node
->local_strong_refs
;
3461 weak
= !hlist_empty(&node
->refs
) ||
3462 node
->local_weak_refs
||
3463 node
->tmp_refs
|| strong
;
3464 has_strong_ref
= node
->has_strong_ref
;
3465 has_weak_ref
= node
->has_weak_ref
;
3467 if (weak
&& !has_weak_ref
) {
3468 node
->has_weak_ref
= 1;
3469 node
->pending_weak_ref
= 1;
3470 node
->local_weak_refs
++;
3472 if (strong
&& !has_strong_ref
) {
3473 node
->has_strong_ref
= 1;
3474 node
->pending_strong_ref
= 1;
3475 node
->local_strong_refs
++;
3477 if (!strong
&& has_strong_ref
)
3478 node
->has_strong_ref
= 0;
3479 if (!weak
&& has_weak_ref
)
3480 node
->has_weak_ref
= 0;
3481 if (!weak
&& !strong
) {
3482 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3483 "%d:%d node %d u%016llx c%016llx deleted\n",
3484 proc
->pid
, thread
->pid
,
3488 rb_erase(&node
->rb_node
, &proc
->nodes
);
3489 binder_inner_proc_unlock(proc
);
3490 binder_free_node(node
);
3492 binder_inner_proc_unlock(proc
);
3494 if (weak
&& !has_weak_ref
)
3495 ret
= binder_put_node_cmd(
3496 proc
, thread
, &ptr
, node_ptr
,
3497 node_cookie
, node_debug_id
,
3498 BR_INCREFS
, "BR_INCREFS");
3499 if (!ret
&& strong
&& !has_strong_ref
)
3500 ret
= binder_put_node_cmd(
3501 proc
, thread
, &ptr
, node_ptr
,
3502 node_cookie
, node_debug_id
,
3503 BR_ACQUIRE
, "BR_ACQUIRE");
3504 if (!ret
&& !strong
&& has_strong_ref
)
3505 ret
= binder_put_node_cmd(
3506 proc
, thread
, &ptr
, node_ptr
,
3507 node_cookie
, node_debug_id
,
3508 BR_RELEASE
, "BR_RELEASE");
3509 if (!ret
&& !weak
&& has_weak_ref
)
3510 ret
= binder_put_node_cmd(
3511 proc
, thread
, &ptr
, node_ptr
,
3512 node_cookie
, node_debug_id
,
3513 BR_DECREFS
, "BR_DECREFS");
3514 if (orig_ptr
== ptr
)
3515 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
3516 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3517 proc
->pid
, thread
->pid
,
3524 case BINDER_WORK_DEAD_BINDER
:
3525 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3526 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3527 struct binder_ref_death
*death
;
3530 death
= container_of(w
, struct binder_ref_death
, work
);
3531 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
3532 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
3534 cmd
= BR_DEAD_BINDER
;
3536 * TODO: there is a race condition between
3537 * death notification requests and delivery
3538 * of the notifications. This will be handled
3541 binder_inner_proc_unlock(proc
);
3542 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3544 ptr
+= sizeof(uint32_t);
3545 if (put_user(death
->cookie
,
3546 (binder_uintptr_t __user
*)ptr
))
3548 ptr
+= sizeof(binder_uintptr_t
);
3549 binder_stat_br(proc
, thread
, cmd
);
3550 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3551 "%d:%d %s %016llx\n",
3552 proc
->pid
, thread
->pid
,
3553 cmd
== BR_DEAD_BINDER
?
3555 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3556 (u64
)death
->cookie
);
3558 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
3560 binder_stats_deleted(BINDER_STAT_DEATH
);
3562 binder_inner_proc_lock(proc
);
3563 binder_enqueue_work_ilocked(
3564 w
, &proc
->delivered_death
);
3565 binder_inner_proc_unlock(proc
);
3567 if (cmd
== BR_DEAD_BINDER
)
3568 goto done
; /* DEAD_BINDER notifications can cause transactions */
3575 BUG_ON(t
->buffer
== NULL
);
3576 if (t
->buffer
->target_node
) {
3577 struct binder_node
*target_node
= t
->buffer
->target_node
;
3579 tr
.target
.ptr
= target_node
->ptr
;
3580 tr
.cookie
= target_node
->cookie
;
3581 t
->saved_priority
= task_nice(current
);
3582 if (t
->priority
< target_node
->min_priority
&&
3583 !(t
->flags
& TF_ONE_WAY
))
3584 binder_set_nice(t
->priority
);
3585 else if (!(t
->flags
& TF_ONE_WAY
) ||
3586 t
->saved_priority
> target_node
->min_priority
)
3587 binder_set_nice(target_node
->min_priority
);
3588 cmd
= BR_TRANSACTION
;
3595 tr
.flags
= t
->flags
;
3596 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
3598 t_from
= binder_get_txn_from(t
);
3600 struct task_struct
*sender
= t_from
->proc
->tsk
;
3602 tr
.sender_pid
= task_tgid_nr_ns(sender
,
3603 task_active_pid_ns(current
));
3608 tr
.data_size
= t
->buffer
->data_size
;
3609 tr
.offsets_size
= t
->buffer
->offsets_size
;
3610 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
3611 ((uintptr_t)t
->buffer
->data
+
3612 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
3613 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
3614 ALIGN(t
->buffer
->data_size
,
3617 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
3619 binder_thread_dec_tmpref(t_from
);
3622 ptr
+= sizeof(uint32_t);
3623 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
3625 binder_thread_dec_tmpref(t_from
);
3630 trace_binder_transaction_received(t
);
3631 binder_stat_br(proc
, thread
, cmd
);
3632 binder_debug(BINDER_DEBUG_TRANSACTION
,
3633 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
3634 proc
->pid
, thread
->pid
,
3635 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
3637 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
3638 t_from
? t_from
->pid
: 0, cmd
,
3639 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
3640 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
3643 binder_thread_dec_tmpref(t_from
);
3644 t
->buffer
->allow_user_free
= 1;
3645 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
3646 t
->to_parent
= thread
->transaction_stack
;
3647 t
->to_thread
= thread
;
3648 thread
->transaction_stack
= t
;
3650 binder_free_transaction(t
);
3657 *consumed
= ptr
- buffer
;
3658 if (proc
->requested_threads
+ proc
->ready_threads
== 0 &&
3659 proc
->requested_threads_started
< proc
->max_threads
&&
3660 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3661 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
3662 /*spawn a new thread if we leave this out */) {
3663 proc
->requested_threads
++;
3664 binder_debug(BINDER_DEBUG_THREADS
,
3665 "%d:%d BR_SPAWN_LOOPER\n",
3666 proc
->pid
, thread
->pid
);
3667 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
3669 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
3674 static void binder_release_work(struct binder_proc
*proc
,
3675 struct list_head
*list
)
3677 struct binder_work
*w
;
3680 w
= binder_dequeue_work_head(proc
, list
);
3685 case BINDER_WORK_TRANSACTION
: {
3686 struct binder_transaction
*t
;
3688 t
= container_of(w
, struct binder_transaction
, work
);
3689 if (t
->buffer
->target_node
&&
3690 !(t
->flags
& TF_ONE_WAY
)) {
3691 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
3693 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3694 "undelivered transaction %d\n",
3696 binder_free_transaction(t
);
3699 case BINDER_WORK_RETURN_ERROR
: {
3700 struct binder_error
*e
= container_of(
3701 w
, struct binder_error
, work
);
3703 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3704 "undelivered TRANSACTION_ERROR: %u\n",
3707 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3708 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3709 "undelivered TRANSACTION_COMPLETE\n");
3711 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3713 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
3714 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
3715 struct binder_ref_death
*death
;
3717 death
= container_of(w
, struct binder_ref_death
, work
);
3718 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3719 "undelivered death notification, %016llx\n",
3720 (u64
)death
->cookie
);
3722 binder_stats_deleted(BINDER_STAT_DEATH
);
3725 pr_err("unexpected work type, %d, not freed\n",
3733 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
3735 struct binder_thread
*thread
= NULL
;
3736 struct rb_node
*parent
= NULL
;
3737 struct rb_node
**p
= &proc
->threads
.rb_node
;
3741 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
3743 if (current
->pid
< thread
->pid
)
3745 else if (current
->pid
> thread
->pid
)
3746 p
= &(*p
)->rb_right
;
3751 thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
3754 binder_stats_created(BINDER_STAT_THREAD
);
3755 thread
->proc
= proc
;
3756 thread
->pid
= current
->pid
;
3757 atomic_set(&thread
->tmp_ref
, 0);
3758 init_waitqueue_head(&thread
->wait
);
3759 INIT_LIST_HEAD(&thread
->todo
);
3760 rb_link_node(&thread
->rb_node
, parent
, p
);
3761 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
3762 thread
->looper_need_return
= true;
3763 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
3764 thread
->return_error
.cmd
= BR_OK
;
3765 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
3766 thread
->reply_error
.cmd
= BR_OK
;
3771 static void binder_free_proc(struct binder_proc
*proc
)
3773 BUG_ON(!list_empty(&proc
->todo
));
3774 BUG_ON(!list_empty(&proc
->delivered_death
));
3775 binder_alloc_deferred_release(&proc
->alloc
);
3776 put_task_struct(proc
->tsk
);
3777 binder_stats_deleted(BINDER_STAT_PROC
);
3781 static void binder_free_thread(struct binder_thread
*thread
)
3783 BUG_ON(!list_empty(&thread
->todo
));
3784 binder_stats_deleted(BINDER_STAT_THREAD
);
3785 binder_proc_dec_tmpref(thread
->proc
);
3789 static int binder_thread_release(struct binder_proc
*proc
,
3790 struct binder_thread
*thread
)
3792 struct binder_transaction
*t
;
3793 struct binder_transaction
*send_reply
= NULL
;
3794 int active_transactions
= 0;
3795 struct binder_transaction
*last_t
= NULL
;
3798 * take a ref on the proc so it survives
3799 * after we remove this thread from proc->threads.
3800 * The corresponding dec is when we actually
3801 * free the thread in binder_free_thread()
3805 * take a ref on this thread to ensure it
3806 * survives while we are releasing it
3808 atomic_inc(&thread
->tmp_ref
);
3809 rb_erase(&thread
->rb_node
, &proc
->threads
);
3810 t
= thread
->transaction_stack
;
3812 spin_lock(&t
->lock
);
3813 if (t
->to_thread
== thread
)
3816 thread
->is_dead
= true;
3820 active_transactions
++;
3821 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
3822 "release %d:%d transaction %d %s, still active\n",
3823 proc
->pid
, thread
->pid
,
3825 (t
->to_thread
== thread
) ? "in" : "out");
3827 if (t
->to_thread
== thread
) {
3829 t
->to_thread
= NULL
;
3831 t
->buffer
->transaction
= NULL
;
3835 } else if (t
->from
== thread
) {
3840 spin_unlock(&last_t
->lock
);
3842 spin_lock(&t
->lock
);
3846 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
3847 binder_release_work(proc
, &thread
->todo
);
3848 binder_thread_dec_tmpref(thread
);
3849 return active_transactions
;
3852 static unsigned int binder_poll(struct file
*filp
,
3853 struct poll_table_struct
*wait
)
3855 struct binder_proc
*proc
= filp
->private_data
;
3856 struct binder_thread
*thread
= NULL
;
3857 int wait_for_proc_work
;
3859 binder_lock(__func__
);
3861 thread
= binder_get_thread(proc
);
3863 wait_for_proc_work
= thread
->transaction_stack
== NULL
&&
3864 binder_worklist_empty(proc
, &thread
->todo
);
3866 binder_unlock(__func__
);
3868 if (wait_for_proc_work
) {
3869 if (binder_has_proc_work(proc
, thread
))
3871 poll_wait(filp
, &proc
->wait
, wait
);
3872 if (binder_has_proc_work(proc
, thread
))
3875 if (binder_has_thread_work(thread
))
3877 poll_wait(filp
, &thread
->wait
, wait
);
3878 if (binder_has_thread_work(thread
))
3884 static int binder_ioctl_write_read(struct file
*filp
,
3885 unsigned int cmd
, unsigned long arg
,
3886 struct binder_thread
*thread
)
3889 struct binder_proc
*proc
= filp
->private_data
;
3890 unsigned int size
= _IOC_SIZE(cmd
);
3891 void __user
*ubuf
= (void __user
*)arg
;
3892 struct binder_write_read bwr
;
3894 if (size
!= sizeof(struct binder_write_read
)) {
3898 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
3902 binder_debug(BINDER_DEBUG_READ_WRITE
,
3903 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3904 proc
->pid
, thread
->pid
,
3905 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
3906 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
3908 if (bwr
.write_size
> 0) {
3909 ret
= binder_thread_write(proc
, thread
,
3912 &bwr
.write_consumed
);
3913 trace_binder_write_done(ret
);
3915 bwr
.read_consumed
= 0;
3916 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
3921 if (bwr
.read_size
> 0) {
3922 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
3925 filp
->f_flags
& O_NONBLOCK
);
3926 trace_binder_read_done(ret
);
3927 if (!binder_worklist_empty(proc
, &proc
->todo
))
3928 wake_up_interruptible(&proc
->wait
);
3930 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
3935 binder_debug(BINDER_DEBUG_READ_WRITE
,
3936 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3937 proc
->pid
, thread
->pid
,
3938 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
3939 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
3940 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
3948 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
3951 struct binder_proc
*proc
= filp
->private_data
;
3952 struct binder_context
*context
= proc
->context
;
3953 struct binder_node
*new_node
;
3954 kuid_t curr_euid
= current_euid();
3956 mutex_lock(&context
->context_mgr_node_lock
);
3957 if (context
->binder_context_mgr_node
) {
3958 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3962 ret
= security_binder_set_context_mgr(proc
->tsk
);
3965 if (uid_valid(context
->binder_context_mgr_uid
)) {
3966 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
3967 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3968 from_kuid(&init_user_ns
, curr_euid
),
3969 from_kuid(&init_user_ns
,
3970 context
->binder_context_mgr_uid
));
3975 context
->binder_context_mgr_uid
= curr_euid
;
3977 new_node
= binder_new_node(proc
, 0, 0);
3982 new_node
->local_weak_refs
++;
3983 new_node
->local_strong_refs
++;
3984 new_node
->has_strong_ref
= 1;
3985 new_node
->has_weak_ref
= 1;
3986 context
->binder_context_mgr_node
= new_node
;
3987 binder_put_node(new_node
);
3989 mutex_unlock(&context
->context_mgr_node_lock
);
3993 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
3996 struct binder_proc
*proc
= filp
->private_data
;
3997 struct binder_thread
*thread
;
3998 unsigned int size
= _IOC_SIZE(cmd
);
3999 void __user
*ubuf
= (void __user
*)arg
;
4001 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4002 proc->pid, current->pid, cmd, arg);*/
4004 trace_binder_ioctl(cmd
, arg
);
4006 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4010 binder_lock(__func__
);
4011 thread
= binder_get_thread(proc
);
4012 if (thread
== NULL
) {
4018 case BINDER_WRITE_READ
:
4019 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4023 case BINDER_SET_MAX_THREADS
:
4024 if (copy_from_user(&proc
->max_threads
, ubuf
, sizeof(proc
->max_threads
))) {
4029 case BINDER_SET_CONTEXT_MGR
:
4030 ret
= binder_ioctl_set_ctx_mgr(filp
);
4034 case BINDER_THREAD_EXIT
:
4035 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4036 proc
->pid
, thread
->pid
);
4037 binder_thread_release(proc
, thread
);
4040 case BINDER_VERSION
: {
4041 struct binder_version __user
*ver
= ubuf
;
4043 if (size
!= sizeof(struct binder_version
)) {
4047 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4048 &ver
->protocol_version
)) {
4061 thread
->looper_need_return
= false;
4062 binder_unlock(__func__
);
4063 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4064 if (ret
&& ret
!= -ERESTARTSYS
)
4065 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4067 trace_binder_ioctl_done(ret
);
4071 static void binder_vma_open(struct vm_area_struct
*vma
)
4073 struct binder_proc
*proc
= vma
->vm_private_data
;
4075 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4076 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4077 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4078 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4079 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4082 static void binder_vma_close(struct vm_area_struct
*vma
)
4084 struct binder_proc
*proc
= vma
->vm_private_data
;
4086 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4087 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4088 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4089 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4090 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4091 binder_alloc_vma_close(&proc
->alloc
);
4092 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4095 static int binder_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
4097 return VM_FAULT_SIGBUS
;
4100 static const struct vm_operations_struct binder_vm_ops
= {
4101 .open
= binder_vma_open
,
4102 .close
= binder_vma_close
,
4103 .fault
= binder_vm_fault
,
4106 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4109 struct binder_proc
*proc
= filp
->private_data
;
4110 const char *failure_string
;
4112 if (proc
->tsk
!= current
->group_leader
)
4115 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4116 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4118 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4119 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4120 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4121 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4122 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4124 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4126 failure_string
= "bad vm_flags";
4129 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4130 vma
->vm_ops
= &binder_vm_ops
;
4131 vma
->vm_private_data
= proc
;
4133 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4136 proc
->files
= get_files_struct(current
);
4140 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4141 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4145 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4147 struct binder_proc
*proc
;
4148 struct binder_device
*binder_dev
;
4150 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4151 current
->group_leader
->pid
, current
->pid
);
4153 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4156 spin_lock_init(&proc
->inner_lock
);
4157 spin_lock_init(&proc
->outer_lock
);
4158 get_task_struct(current
->group_leader
);
4159 proc
->tsk
= current
->group_leader
;
4160 INIT_LIST_HEAD(&proc
->todo
);
4161 init_waitqueue_head(&proc
->wait
);
4162 proc
->default_priority
= task_nice(current
);
4163 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4165 proc
->context
= &binder_dev
->context
;
4166 binder_alloc_init(&proc
->alloc
);
4168 binder_lock(__func__
);
4170 binder_stats_created(BINDER_STAT_PROC
);
4171 proc
->pid
= current
->group_leader
->pid
;
4172 INIT_LIST_HEAD(&proc
->delivered_death
);
4173 filp
->private_data
= proc
;
4175 binder_unlock(__func__
);
4177 mutex_lock(&binder_procs_lock
);
4178 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4179 mutex_unlock(&binder_procs_lock
);
4181 if (binder_debugfs_dir_entry_proc
) {
4184 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4186 * proc debug entries are shared between contexts, so
4187 * this will fail if the process tries to open the driver
4188 * again with a different context. The priting code will
4189 * anyway print all contexts that a given PID has, so this
4192 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4193 binder_debugfs_dir_entry_proc
,
4194 (void *)(unsigned long)proc
->pid
,
4201 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4203 struct binder_proc
*proc
= filp
->private_data
;
4205 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4210 static void binder_deferred_flush(struct binder_proc
*proc
)
4215 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4216 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4218 thread
->looper_need_return
= true;
4219 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4220 wake_up_interruptible(&thread
->wait
);
4224 wake_up_interruptible_all(&proc
->wait
);
4226 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4227 "binder_flush: %d woke %d threads\n", proc
->pid
,
4231 static int binder_release(struct inode
*nodp
, struct file
*filp
)
4233 struct binder_proc
*proc
= filp
->private_data
;
4235 debugfs_remove(proc
->debugfs_entry
);
4236 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
4241 static int binder_node_release(struct binder_node
*node
, int refs
)
4243 struct binder_ref
*ref
;
4245 struct binder_proc
*proc
= node
->proc
;
4247 binder_release_work(proc
, &node
->async_todo
);
4249 binder_inner_proc_lock(proc
);
4250 binder_dequeue_work_ilocked(&node
->work
);
4252 * The caller must have taken a temporary ref on the node,
4254 BUG_ON(!node
->tmp_refs
);
4255 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
4256 binder_inner_proc_unlock(proc
);
4257 binder_free_node(node
);
4263 node
->local_strong_refs
= 0;
4264 node
->local_weak_refs
= 0;
4265 binder_inner_proc_unlock(proc
);
4267 spin_lock(&binder_dead_nodes_lock
);
4268 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
4269 spin_unlock(&binder_dead_nodes_lock
);
4271 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
4279 binder_inner_proc_lock(ref
->proc
);
4280 if (list_empty(&ref
->death
->work
.entry
)) {
4281 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4282 binder_enqueue_work_ilocked(&ref
->death
->work
,
4284 wake_up_interruptible(&ref
->proc
->wait
);
4287 binder_inner_proc_unlock(ref
->proc
);
4290 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4291 "node %d now dead, refs %d, death %d\n",
4292 node
->debug_id
, refs
, death
);
4293 binder_put_node(node
);
4298 static void binder_deferred_release(struct binder_proc
*proc
)
4300 struct binder_context
*context
= proc
->context
;
4302 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
4304 BUG_ON(proc
->files
);
4306 mutex_lock(&binder_procs_lock
);
4307 hlist_del(&proc
->proc_node
);
4308 mutex_unlock(&binder_procs_lock
);
4310 mutex_lock(&context
->context_mgr_node_lock
);
4311 if (context
->binder_context_mgr_node
&&
4312 context
->binder_context_mgr_node
->proc
== proc
) {
4313 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4314 "%s: %d context_mgr_node gone\n",
4315 __func__
, proc
->pid
);
4316 context
->binder_context_mgr_node
= NULL
;
4318 mutex_unlock(&context
->context_mgr_node_lock
);
4320 * Make sure proc stays alive after we
4321 * remove all the threads
4325 proc
->is_dead
= true;
4327 active_transactions
= 0;
4328 while ((n
= rb_first(&proc
->threads
))) {
4329 struct binder_thread
*thread
;
4331 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4333 active_transactions
+= binder_thread_release(proc
, thread
);
4338 while ((n
= rb_first(&proc
->nodes
))) {
4339 struct binder_node
*node
;
4341 node
= rb_entry(n
, struct binder_node
, rb_node
);
4344 * take a temporary ref on the node before
4345 * calling binder_node_release() which will either
4346 * kfree() the node or call binder_put_node()
4348 binder_inc_node_tmpref(node
);
4349 rb_erase(&node
->rb_node
, &proc
->nodes
);
4350 incoming_refs
= binder_node_release(node
, incoming_refs
);
4354 while ((n
= rb_first(&proc
->refs_by_desc
))) {
4355 struct binder_ref
*ref
;
4357 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
4359 binder_cleanup_ref(ref
);
4360 binder_free_ref(ref
);
4363 binder_release_work(proc
, &proc
->todo
);
4364 binder_release_work(proc
, &proc
->delivered_death
);
4366 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4367 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4368 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
4369 outgoing_refs
, active_transactions
);
4371 binder_proc_dec_tmpref(proc
);
4374 static void binder_deferred_func(struct work_struct
*work
)
4376 struct binder_proc
*proc
;
4377 struct files_struct
*files
;
4382 binder_lock(__func__
);
4383 mutex_lock(&binder_deferred_lock
);
4384 if (!hlist_empty(&binder_deferred_list
)) {
4385 proc
= hlist_entry(binder_deferred_list
.first
,
4386 struct binder_proc
, deferred_work_node
);
4387 hlist_del_init(&proc
->deferred_work_node
);
4388 defer
= proc
->deferred_work
;
4389 proc
->deferred_work
= 0;
4394 mutex_unlock(&binder_deferred_lock
);
4397 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
4398 files
= proc
->files
;
4403 if (defer
& BINDER_DEFERRED_FLUSH
)
4404 binder_deferred_flush(proc
);
4406 if (defer
& BINDER_DEFERRED_RELEASE
)
4407 binder_deferred_release(proc
); /* frees proc */
4409 binder_unlock(__func__
);
4411 put_files_struct(files
);
4414 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
4417 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
4419 mutex_lock(&binder_deferred_lock
);
4420 proc
->deferred_work
|= defer
;
4421 if (hlist_unhashed(&proc
->deferred_work_node
)) {
4422 hlist_add_head(&proc
->deferred_work_node
,
4423 &binder_deferred_list
);
4424 queue_work(binder_deferred_workqueue
, &binder_deferred_work
);
4426 mutex_unlock(&binder_deferred_lock
);
4429 static void print_binder_transaction(struct seq_file
*m
, const char *prefix
,
4430 struct binder_transaction
*t
)
4432 spin_lock(&t
->lock
);
4434 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4435 prefix
, t
->debug_id
, t
,
4436 t
->from
? t
->from
->proc
->pid
: 0,
4437 t
->from
? t
->from
->pid
: 0,
4438 t
->to_proc
? t
->to_proc
->pid
: 0,
4439 t
->to_thread
? t
->to_thread
->pid
: 0,
4440 t
->code
, t
->flags
, t
->priority
, t
->need_reply
);
4441 spin_unlock(&t
->lock
);
4443 if (t
->buffer
== NULL
) {
4444 seq_puts(m
, " buffer free\n");
4447 if (t
->buffer
->target_node
)
4448 seq_printf(m
, " node %d",
4449 t
->buffer
->target_node
->debug_id
);
4450 seq_printf(m
, " size %zd:%zd data %p\n",
4451 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4455 static void print_binder_work_ilocked(struct seq_file
*m
, const char *prefix
,
4456 const char *transaction_prefix
,
4457 struct binder_work
*w
)
4459 struct binder_node
*node
;
4460 struct binder_transaction
*t
;
4463 case BINDER_WORK_TRANSACTION
:
4464 t
= container_of(w
, struct binder_transaction
, work
);
4465 print_binder_transaction(m
, transaction_prefix
, t
);
4467 case BINDER_WORK_RETURN_ERROR
: {
4468 struct binder_error
*e
= container_of(
4469 w
, struct binder_error
, work
);
4471 seq_printf(m
, "%stransaction error: %u\n",
4474 case BINDER_WORK_TRANSACTION_COMPLETE
:
4475 seq_printf(m
, "%stransaction complete\n", prefix
);
4477 case BINDER_WORK_NODE
:
4478 node
= container_of(w
, struct binder_node
, work
);
4479 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
4480 prefix
, node
->debug_id
,
4481 (u64
)node
->ptr
, (u64
)node
->cookie
);
4483 case BINDER_WORK_DEAD_BINDER
:
4484 seq_printf(m
, "%shas dead binder\n", prefix
);
4486 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4487 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
4489 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
4490 seq_printf(m
, "%shas cleared death notification\n", prefix
);
4493 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
4498 static void print_binder_thread_ilocked(struct seq_file
*m
,
4499 struct binder_thread
*thread
,
4502 struct binder_transaction
*t
;
4503 struct binder_work
*w
;
4504 size_t start_pos
= m
->count
;
4507 WARN_ON(!spin_is_locked(&thread
->proc
->inner_lock
));
4508 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
4509 thread
->pid
, thread
->looper
,
4510 thread
->looper_need_return
,
4511 atomic_read(&thread
->tmp_ref
));
4512 header_pos
= m
->count
;
4513 t
= thread
->transaction_stack
;
4515 if (t
->from
== thread
) {
4516 print_binder_transaction(m
,
4517 " outgoing transaction", t
);
4519 } else if (t
->to_thread
== thread
) {
4520 print_binder_transaction(m
,
4521 " incoming transaction", t
);
4524 print_binder_transaction(m
, " bad transaction", t
);
4528 list_for_each_entry(w
, &thread
->todo
, entry
) {
4529 print_binder_work_ilocked(m
, " ",
4530 " pending transaction", w
);
4532 if (!print_always
&& m
->count
== header_pos
)
4533 m
->count
= start_pos
;
4536 static void print_binder_node(struct seq_file
*m
, struct binder_node
*node
)
4538 struct binder_ref
*ref
;
4539 struct binder_work
*w
;
4543 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
4546 seq_printf(m
, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
4547 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
4548 node
->has_strong_ref
, node
->has_weak_ref
,
4549 node
->local_strong_refs
, node
->local_weak_refs
,
4550 node
->internal_strong_refs
, count
, node
->tmp_refs
);
4552 seq_puts(m
, " proc");
4553 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
4554 seq_printf(m
, " %d", ref
->proc
->pid
);
4558 binder_inner_proc_lock(node
->proc
);
4559 list_for_each_entry(w
, &node
->async_todo
, entry
)
4560 print_binder_work_ilocked(m
, " ",
4561 " pending async transaction", w
);
4562 binder_inner_proc_unlock(node
->proc
);
4566 static void print_binder_ref(struct seq_file
*m
, struct binder_ref
*ref
)
4568 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4569 ref
->data
.debug_id
, ref
->data
.desc
,
4570 ref
->node
->proc
? "" : "dead ",
4571 ref
->node
->debug_id
, ref
->data
.strong
,
4572 ref
->data
.weak
, ref
->death
);
4575 static void print_binder_proc(struct seq_file
*m
,
4576 struct binder_proc
*proc
, int print_all
)
4578 struct binder_work
*w
;
4580 size_t start_pos
= m
->count
;
4583 seq_printf(m
, "proc %d\n", proc
->pid
);
4584 seq_printf(m
, "context %s\n", proc
->context
->name
);
4585 header_pos
= m
->count
;
4587 binder_inner_proc_lock(proc
);
4588 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4589 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
4590 rb_node
), print_all
);
4591 binder_inner_proc_unlock(proc
);
4592 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4593 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4595 if (print_all
|| node
->has_async_transaction
)
4596 print_binder_node(m
, node
);
4599 for (n
= rb_first(&proc
->refs_by_desc
);
4602 print_binder_ref(m
, rb_entry(n
, struct binder_ref
,
4605 binder_alloc_print_allocated(m
, &proc
->alloc
);
4606 binder_inner_proc_lock(proc
);
4607 list_for_each_entry(w
, &proc
->todo
, entry
)
4608 print_binder_work_ilocked(m
, " ", " pending transaction", w
);
4609 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
4610 seq_puts(m
, " has delivered dead binder\n");
4613 binder_inner_proc_unlock(proc
);
4614 if (!print_all
&& m
->count
== header_pos
)
4615 m
->count
= start_pos
;
4618 static const char * const binder_return_strings
[] = {
4623 "BR_ACQUIRE_RESULT",
4625 "BR_TRANSACTION_COMPLETE",
4630 "BR_ATTEMPT_ACQUIRE",
4635 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4639 static const char * const binder_command_strings
[] = {
4642 "BC_ACQUIRE_RESULT",
4650 "BC_ATTEMPT_ACQUIRE",
4651 "BC_REGISTER_LOOPER",
4654 "BC_REQUEST_DEATH_NOTIFICATION",
4655 "BC_CLEAR_DEATH_NOTIFICATION",
4656 "BC_DEAD_BINDER_DONE",
4657 "BC_TRANSACTION_SG",
4661 static const char * const binder_objstat_strings
[] = {
4668 "transaction_complete"
4671 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
4672 struct binder_stats
*stats
)
4676 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
4677 ARRAY_SIZE(binder_command_strings
));
4678 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
4679 int temp
= atomic_read(&stats
->bc
[i
]);
4682 seq_printf(m
, "%s%s: %d\n", prefix
,
4683 binder_command_strings
[i
], temp
);
4686 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
4687 ARRAY_SIZE(binder_return_strings
));
4688 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
4689 int temp
= atomic_read(&stats
->br
[i
]);
4692 seq_printf(m
, "%s%s: %d\n", prefix
,
4693 binder_return_strings
[i
], temp
);
4696 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4697 ARRAY_SIZE(binder_objstat_strings
));
4698 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
4699 ARRAY_SIZE(stats
->obj_deleted
));
4700 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
4701 int created
= atomic_read(&stats
->obj_created
[i
]);
4702 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
4704 if (created
|| deleted
)
4705 seq_printf(m
, "%s%s: active %d total %d\n",
4707 binder_objstat_strings
[i
],
4713 static void print_binder_proc_stats(struct seq_file
*m
,
4714 struct binder_proc
*proc
)
4716 struct binder_work
*w
;
4718 int count
, strong
, weak
;
4720 seq_printf(m
, "proc %d\n", proc
->pid
);
4721 seq_printf(m
, "context %s\n", proc
->context
->name
);
4723 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
4725 seq_printf(m
, " threads: %d\n", count
);
4726 seq_printf(m
, " requested threads: %d+%d/%d\n"
4727 " ready threads %d\n"
4728 " free async space %zd\n", proc
->requested_threads
,
4729 proc
->requested_threads_started
, proc
->max_threads
,
4730 proc
->ready_threads
,
4731 binder_alloc_get_free_async_space(&proc
->alloc
));
4733 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
4735 seq_printf(m
, " nodes: %d\n", count
);
4739 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
4740 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
4743 strong
+= ref
->data
.strong
;
4744 weak
+= ref
->data
.weak
;
4746 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
4748 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
4749 seq_printf(m
, " buffers: %d\n", count
);
4752 binder_inner_proc_lock(proc
);
4753 list_for_each_entry(w
, &proc
->todo
, entry
) {
4754 if (w
->type
== BINDER_WORK_TRANSACTION
)
4757 binder_inner_proc_unlock(proc
);
4758 seq_printf(m
, " pending transactions: %d\n", count
);
4760 print_binder_stats(m
, " ", &proc
->stats
);
4764 static int binder_state_show(struct seq_file
*m
, void *unused
)
4766 struct binder_proc
*proc
;
4767 struct binder_node
*node
;
4769 binder_lock(__func__
);
4771 seq_puts(m
, "binder state:\n");
4773 spin_lock(&binder_dead_nodes_lock
);
4774 if (!hlist_empty(&binder_dead_nodes
))
4775 seq_puts(m
, "dead nodes:\n");
4776 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
)
4777 print_binder_node(m
, node
);
4778 spin_unlock(&binder_dead_nodes_lock
);
4780 mutex_lock(&binder_procs_lock
);
4781 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4782 print_binder_proc(m
, proc
, 1);
4783 mutex_unlock(&binder_procs_lock
);
4784 binder_unlock(__func__
);
4788 static int binder_stats_show(struct seq_file
*m
, void *unused
)
4790 struct binder_proc
*proc
;
4792 binder_lock(__func__
);
4794 seq_puts(m
, "binder stats:\n");
4796 print_binder_stats(m
, "", &binder_stats
);
4798 mutex_lock(&binder_procs_lock
);
4799 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4800 print_binder_proc_stats(m
, proc
);
4801 mutex_unlock(&binder_procs_lock
);
4802 binder_unlock(__func__
);
4806 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
4808 struct binder_proc
*proc
;
4810 binder_lock(__func__
);
4812 seq_puts(m
, "binder transactions:\n");
4813 mutex_lock(&binder_procs_lock
);
4814 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
4815 print_binder_proc(m
, proc
, 0);
4816 mutex_unlock(&binder_procs_lock
);
4817 binder_unlock(__func__
);
4821 static int binder_proc_show(struct seq_file
*m
, void *unused
)
4823 struct binder_proc
*itr
;
4824 int pid
= (unsigned long)m
->private;
4826 binder_lock(__func__
);
4828 mutex_lock(&binder_procs_lock
);
4829 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
4830 if (itr
->pid
== pid
) {
4831 seq_puts(m
, "binder proc state:\n");
4832 print_binder_proc(m
, itr
, 1);
4835 mutex_unlock(&binder_procs_lock
);
4837 binder_unlock(__func__
);
4841 static void print_binder_transaction_log_entry(struct seq_file
*m
,
4842 struct binder_transaction_log_entry
*e
)
4844 int debug_id
= READ_ONCE(e
->debug_id_done
);
4846 * read barrier to guarantee debug_id_done read before
4847 * we print the log values
4851 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
4852 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
4853 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
4854 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
4855 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
4856 e
->return_error
, e
->return_error_param
,
4857 e
->return_error_line
);
4859 * read-barrier to guarantee read of debug_id_done after
4860 * done printing the fields of the entry
4863 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
4864 "\n" : " (incomplete)\n");
4867 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
4869 struct binder_transaction_log
*log
= m
->private;
4870 unsigned int log_cur
= atomic_read(&log
->cur
);
4875 count
= log_cur
+ 1;
4876 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
4877 0 : count
% ARRAY_SIZE(log
->entry
);
4878 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
4879 count
= ARRAY_SIZE(log
->entry
);
4880 for (i
= 0; i
< count
; i
++) {
4881 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
4883 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
4888 static const struct file_operations binder_fops
= {
4889 .owner
= THIS_MODULE
,
4890 .poll
= binder_poll
,
4891 .unlocked_ioctl
= binder_ioctl
,
4892 .compat_ioctl
= binder_ioctl
,
4893 .mmap
= binder_mmap
,
4894 .open
= binder_open
,
4895 .flush
= binder_flush
,
4896 .release
= binder_release
,
4899 BINDER_DEBUG_ENTRY(state
);
4900 BINDER_DEBUG_ENTRY(stats
);
4901 BINDER_DEBUG_ENTRY(transactions
);
4902 BINDER_DEBUG_ENTRY(transaction_log
);
4904 static int __init
init_binder_device(const char *name
)
4907 struct binder_device
*binder_device
;
4909 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
4913 binder_device
->miscdev
.fops
= &binder_fops
;
4914 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
4915 binder_device
->miscdev
.name
= name
;
4917 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
4918 binder_device
->context
.name
= name
;
4919 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
4921 ret
= misc_register(&binder_device
->miscdev
);
4923 kfree(binder_device
);
4927 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
4932 static int __init
binder_init(void)
4935 char *device_name
, *device_names
;
4936 struct binder_device
*device
;
4937 struct hlist_node
*tmp
;
4939 atomic_set(&binder_transaction_log
.cur
, ~0U);
4940 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
4941 binder_deferred_workqueue
= create_singlethread_workqueue("binder");
4942 if (!binder_deferred_workqueue
)
4945 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
4946 if (binder_debugfs_dir_entry_root
)
4947 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
4948 binder_debugfs_dir_entry_root
);
4950 if (binder_debugfs_dir_entry_root
) {
4951 debugfs_create_file("state",
4953 binder_debugfs_dir_entry_root
,
4955 &binder_state_fops
);
4956 debugfs_create_file("stats",
4958 binder_debugfs_dir_entry_root
,
4960 &binder_stats_fops
);
4961 debugfs_create_file("transactions",
4963 binder_debugfs_dir_entry_root
,
4965 &binder_transactions_fops
);
4966 debugfs_create_file("transaction_log",
4968 binder_debugfs_dir_entry_root
,
4969 &binder_transaction_log
,
4970 &binder_transaction_log_fops
);
4971 debugfs_create_file("failed_transaction_log",
4973 binder_debugfs_dir_entry_root
,
4974 &binder_transaction_log_failed
,
4975 &binder_transaction_log_fops
);
4979 * Copy the module_parameter string, because we don't want to
4980 * tokenize it in-place.
4982 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
4983 if (!device_names
) {
4985 goto err_alloc_device_names_failed
;
4987 strcpy(device_names
, binder_devices_param
);
4989 while ((device_name
= strsep(&device_names
, ","))) {
4990 ret
= init_binder_device(device_name
);
4992 goto err_init_binder_device_failed
;
4997 err_init_binder_device_failed
:
4998 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
4999 misc_deregister(&device
->miscdev
);
5000 hlist_del(&device
->hlist
);
5003 err_alloc_device_names_failed
:
5004 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5006 destroy_workqueue(binder_deferred_workqueue
);
5011 device_initcall(binder_init
);
5013 #define CREATE_TRACE_POINTS
5014 #include "binder_trace.h"
5016 MODULE_LICENSE("GPL v2");