3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
74 #include <uapi/linux/android/binder.h>
75 #include "binder_alloc.h"
76 #include "binder_trace.h"
77 #ifdef CONFIG_SAMSUNG_FREECESS
78 #include <linux/freecess.h>
81 static HLIST_HEAD(binder_deferred_list
);
82 static DEFINE_MUTEX(binder_deferred_lock
);
84 static HLIST_HEAD(binder_devices
);
85 static HLIST_HEAD(binder_procs
);
86 static DEFINE_MUTEX(binder_procs_lock
);
88 static HLIST_HEAD(binder_dead_nodes
);
89 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
91 static struct dentry
*binder_debugfs_dir_entry_root
;
92 static struct dentry
*binder_debugfs_dir_entry_proc
;
93 static atomic_t binder_last_id
;
95 #define BINDER_DEBUG_ENTRY(name) \
96 static int binder_##name##_open(struct inode *inode, struct file *file) \
98 return single_open(file, binder_##name##_show, inode->i_private); \
101 static const struct file_operations binder_##name##_fops = { \
102 .owner = THIS_MODULE, \
103 .open = binder_##name##_open, \
105 .llseek = seq_lseek, \
106 .release = single_release, \
109 static int binder_proc_show(struct seq_file
*m
, void *unused
);
110 BINDER_DEBUG_ENTRY(proc
);
112 /* This is only defined in include/asm-arm/sizes.h */
118 #define SZ_4M 0x400000
121 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
126 BINDER_DEBUG_USER_ERROR
= 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
132 BINDER_DEBUG_READ_WRITE
= 1U << 6,
133 BINDER_DEBUG_USER_REFS
= 1U << 7,
134 BINDER_DEBUG_THREADS
= 1U << 8,
135 BINDER_DEBUG_TRANSACTION
= 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
139 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
140 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
142 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
143 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
144 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
146 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
147 module_param_named(devices
, binder_devices_param
, charp
, S_IRUGO
);
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
150 static int binder_stop_on_user_error
;
152 static int binder_set_stop_on_user_error(const char *val
,
153 struct kernel_param
*kp
)
157 ret
= param_set_int(val
, kp
);
158 if (binder_stop_on_user_error
< 2)
159 wake_up(&binder_user_error_wait
);
162 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
163 param_get_int
, &binder_stop_on_user_error
, 0644);
165 #define binder_debug(mask, x...) \
167 if (binder_debug_mask & mask) \
171 #define binder_user_error(x...) \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
179 #define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184 #define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
187 #define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
190 enum binder_stat_types
{
196 BINDER_STAT_TRANSACTION
,
197 BINDER_STAT_TRANSACTION_COMPLETE
,
201 struct binder_stats
{
202 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
203 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
204 atomic_t obj_created
[BINDER_STAT_COUNT
];
205 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
208 static struct binder_stats binder_stats
;
210 static inline void binder_stats_deleted(enum binder_stat_types type
)
212 atomic_inc(&binder_stats
.obj_deleted
[type
]);
215 static inline void binder_stats_created(enum binder_stat_types type
)
217 atomic_inc(&binder_stats
.obj_created
[type
]);
220 struct binder_transaction_log_entry
{
232 int return_error_line
;
233 uint32_t return_error
;
234 uint32_t return_error_param
;
235 const char *context_name
;
237 struct binder_transaction_log
{
240 struct binder_transaction_log_entry entry
[32];
242 static struct binder_transaction_log binder_transaction_log
;
243 static struct binder_transaction_log binder_transaction_log_failed
;
245 static struct binder_transaction_log_entry
*binder_transaction_log_add(
246 struct binder_transaction_log
*log
)
248 struct binder_transaction_log_entry
*e
;
249 unsigned int cur
= atomic_inc_return(&log
->cur
);
251 if (cur
>= ARRAY_SIZE(log
->entry
))
253 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
254 WRITE_ONCE(e
->debug_id_done
, 0);
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
261 memset(e
, 0, sizeof(*e
));
265 struct binder_context
{
266 struct binder_node
*binder_context_mgr_node
;
267 struct mutex context_mgr_node_lock
;
269 kuid_t binder_context_mgr_uid
;
273 struct binder_device
{
274 struct hlist_node hlist
;
275 struct miscdevice miscdev
;
276 struct binder_context context
;
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
284 * There are separate work lists for proc, thread, and node (async).
287 struct list_head entry
;
290 BINDER_WORK_TRANSACTION
= 1,
291 BINDER_WORK_TRANSACTION_COMPLETE
,
292 BINDER_WORK_RETURN_ERROR
,
294 BINDER_WORK_DEAD_BINDER
,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
300 struct binder_error
{
301 struct binder_work work
;
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
311 * (protected by @proc->inner_lock)
312 * @rb_node: element for proc->nodes tree
313 * (protected by @proc->inner_lock)
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
319 * (protected by @lock)
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
322 * (protected by @proc->inner_lock if @proc
324 * @local_weak_refs: weak user refs from local process
325 * (protected by @proc->inner_lock if @proc
327 * @local_strong_refs: strong user refs from local process
328 * (protected by @proc->inner_lock if @proc
330 * @tmp_refs: temporary kernel refs
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
341 * (protected by @proc->inner_lock if @proc
343 * @pending_strong_ref: userspace has acked notification of strong ref
344 * (protected by @proc->inner_lock if @proc
346 * @has_weak_ref: userspace notified of weak ref
347 * (protected by @proc->inner_lock if @proc
349 * @pending_weak_ref: userspace has acked notification of weak ref
350 * (protected by @proc->inner_lock if @proc
352 * @has_async_transaction: async transaction to node in progress
353 * (protected by @lock)
354 * @sched_policy: minimum scheduling policy for node
355 * (invariant after initialized)
356 * @accept_fds: file descriptor operations supported for node
357 * (invariant after initialized)
358 * @min_priority: minimum scheduling priority
359 * (invariant after initialized)
360 * @txn_security_ctx: require sender's security context
361 (invariant after initialized)
363 * @inherit_rt: inherit RT scheduling policy from caller
364 * (invariant after initialized)
365 * @async_todo: list of async work items
366 * (protected by @proc->inner_lock)
368 * Bookkeeping structure for binder nodes.
373 struct binder_work work
;
375 struct rb_node rb_node
;
376 struct hlist_node dead_node
;
378 struct binder_proc
*proc
;
379 struct hlist_head refs
;
380 int internal_strong_refs
;
382 int local_strong_refs
;
384 binder_uintptr_t ptr
;
385 binder_uintptr_t cookie
;
388 * bitfield elements protected by
392 u8 pending_strong_ref
:1;
394 u8 pending_weak_ref
:1;
398 * invariant after initialization
403 u8 txn_security_ctx
:1;
406 bool has_async_transaction
;
407 struct list_head async_todo
;
410 struct binder_ref_death
{
412 * @work: worklist element for death notifications
413 * (protected by inner_lock of the proc that
414 * this ref belongs to)
416 struct binder_work work
;
417 binder_uintptr_t cookie
;
421 * struct binder_ref_data - binder_ref counts and id
422 * @debug_id: unique ID for the ref
423 * @desc: unique userspace handle for ref
424 * @strong: strong ref count (debugging only if not locked)
425 * @weak: weak ref count (debugging only if not locked)
427 * Structure to hold ref count and ref id information. Since
428 * the actual ref can only be accessed with a lock, this structure
429 * is used to return information about the ref to callers of
430 * ref inc/dec functions.
432 struct binder_ref_data
{
440 * struct binder_ref - struct to track references on nodes
441 * @data: binder_ref_data containing id, handle, and current refcounts
442 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
443 * @rb_node_node: node for lookup by @node in proc's rb_tree
444 * @node_entry: list entry for node->refs list in target node
445 * (protected by @node->lock)
446 * @proc: binder_proc containing ref
447 * @node: binder_node of target node. When cleaning up a
448 * ref for deletion in binder_cleanup_ref, a non-NULL
449 * @node indicates the node must be freed
450 * @death: pointer to death notification (ref_death) if requested
451 * (protected by @node->lock)
453 * Structure to track references from procA to target node (on procB). This
454 * structure is unsafe to access without holding @proc->outer_lock.
457 /* Lookups needed: */
458 /* node + proc => ref (transaction) */
459 /* desc + proc => ref (transaction, inc/dec ref) */
460 /* node => refs + procs (proc exit) */
461 struct binder_ref_data data
;
462 struct rb_node rb_node_desc
;
463 struct rb_node rb_node_node
;
464 struct hlist_node node_entry
;
465 struct binder_proc
*proc
;
466 struct binder_node
*node
;
467 struct binder_ref_death
*death
;
470 enum binder_deferred_state
{
471 BINDER_DEFERRED_PUT_FILES
= 0x01,
472 BINDER_DEFERRED_FLUSH
= 0x02,
473 BINDER_DEFERRED_RELEASE
= 0x04,
477 * struct binder_priority - scheduler policy and priority
478 * @sched_policy scheduler policy
479 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
481 * The binder driver supports inheriting the following scheduler policies:
487 struct binder_priority
{
488 unsigned int sched_policy
;
493 * struct binder_proc - binder process bookkeeping
494 * @proc_node: element for binder_procs list
495 * @threads: rbtree of binder_threads in this proc
496 * (protected by @inner_lock)
497 * @nodes: rbtree of binder nodes associated with
498 * this proc ordered by node->ptr
499 * (protected by @inner_lock)
500 * @refs_by_desc: rbtree of refs ordered by ref->desc
501 * (protected by @outer_lock)
502 * @refs_by_node: rbtree of refs ordered by ref->node
503 * (protected by @outer_lock)
504 * @waiting_threads: threads currently waiting for proc work
505 * (protected by @inner_lock)
506 * @pid PID of group_leader of process
507 * (invariant after initialized)
508 * @tsk task_struct for group_leader of process
509 * (invariant after initialized)
510 * @files files_struct for process
511 * (protected by @files_lock)
512 * @files_lock mutex to protect @files
513 * @deferred_work_node: element for binder_deferred_list
514 * (protected by binder_deferred_lock)
515 * @deferred_work: bitmap of deferred work to perform
516 * (protected by binder_deferred_lock)
517 * @is_dead: process is dead and awaiting free
518 * when outstanding transactions are cleaned up
519 * (protected by @inner_lock)
520 * @todo: list of work for this process
521 * (protected by @inner_lock)
522 * @stats: per-process binder statistics
523 * (atomics, no lock needed)
524 * @delivered_death: list of delivered death notification
525 * (protected by @inner_lock)
526 * @max_threads: cap on number of binder threads
527 * (protected by @inner_lock)
528 * @requested_threads: number of binder threads requested but not
529 * yet started. In current implementation, can
531 * (protected by @inner_lock)
532 * @requested_threads_started: number binder threads started
533 * (protected by @inner_lock)
534 * @tmp_ref: temporary reference to indicate proc is in use
535 * (protected by @inner_lock)
536 * @default_priority: default scheduler priority
537 * (invariant after initialized)
538 * @debugfs_entry: debugfs node
539 * @alloc: binder allocator bookkeeping
540 * @context: binder_context for this proc
541 * (invariant after initialized)
542 * @inner_lock: can nest under outer_lock and/or node lock
543 * @outer_lock: no nesting under innor or node lock
544 * Lock order: 1) outer, 2) node, 3) inner
546 * Bookkeeping structure for binder processes
549 struct hlist_node proc_node
;
550 struct rb_root threads
;
551 struct rb_root nodes
;
552 struct rb_root refs_by_desc
;
553 struct rb_root refs_by_node
;
554 struct list_head waiting_threads
;
556 struct task_struct
*tsk
;
557 struct files_struct
*files
;
558 struct mutex files_lock
;
559 struct hlist_node deferred_work_node
;
563 struct list_head todo
;
564 struct binder_stats stats
;
565 struct list_head delivered_death
;
567 int requested_threads
;
568 int requested_threads_started
;
570 struct binder_priority default_priority
;
571 struct dentry
*debugfs_entry
;
572 struct binder_alloc alloc
;
573 struct binder_context
*context
;
574 spinlock_t inner_lock
;
575 spinlock_t outer_lock
;
579 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
580 BINDER_LOOPER_STATE_ENTERED
= 0x02,
581 BINDER_LOOPER_STATE_EXITED
= 0x04,
582 BINDER_LOOPER_STATE_INVALID
= 0x08,
583 BINDER_LOOPER_STATE_WAITING
= 0x10,
584 BINDER_LOOPER_STATE_POLL
= 0x20,
588 * struct binder_thread - binder thread bookkeeping
589 * @proc: binder process for this thread
590 * (invariant after initialization)
591 * @rb_node: element for proc->threads rbtree
592 * (protected by @proc->inner_lock)
593 * @waiting_thread_node: element for @proc->waiting_threads list
594 * (protected by @proc->inner_lock)
595 * @pid: PID for this thread
596 * (invariant after initialization)
597 * @looper: bitmap of looping state
598 * (only accessed by this thread)
599 * @looper_needs_return: looping thread needs to exit driver
601 * @transaction_stack: stack of in-progress transactions for this thread
602 * (protected by @proc->inner_lock)
603 * @todo: list of work to do for this thread
604 * (protected by @proc->inner_lock)
605 * @process_todo: whether work in @todo should be processed
606 * (protected by @proc->inner_lock)
607 * @return_error: transaction errors reported by this thread
608 * (only accessed by this thread)
609 * @reply_error: transaction errors reported by target thread
610 * (protected by @proc->inner_lock)
611 * @wait: wait queue for thread work
612 * @stats: per-thread statistics
613 * (atomics, no lock needed)
614 * @tmp_ref: temporary reference to indicate thread is in use
615 * (atomic since @proc->inner_lock cannot
616 * always be acquired)
617 * @is_dead: thread is dead and awaiting free
618 * when outstanding transactions are cleaned up
619 * (protected by @proc->inner_lock)
620 * @task: struct task_struct for this thread
622 * Bookkeeping structure for binder threads.
624 struct binder_thread
{
625 struct binder_proc
*proc
;
626 struct rb_node rb_node
;
627 struct list_head waiting_thread_node
;
629 int looper
; /* only modified by this thread */
630 bool looper_need_return
; /* can be written by other thread */
631 struct binder_transaction
*transaction_stack
;
632 struct list_head todo
;
634 struct binder_error return_error
;
635 struct binder_error reply_error
;
636 wait_queue_head_t wait
;
637 struct binder_stats stats
;
640 struct task_struct
*task
;
643 struct binder_transaction
{
645 struct binder_work work
;
646 struct binder_thread
*from
;
647 struct binder_transaction
*from_parent
;
648 struct binder_proc
*to_proc
;
649 struct binder_thread
*to_thread
;
650 struct binder_transaction
*to_parent
;
651 unsigned need_reply
:1;
652 /* unsigned is_dead:1; */ /* not used at the moment */
654 struct binder_buffer
*buffer
;
657 struct binder_priority priority
;
658 struct binder_priority saved_priority
;
659 bool set_priority_called
;
661 binder_uintptr_t security_ctx
;
663 * @lock: protects @from, @to_proc, and @to_thread
665 * @from, @to_proc, and @to_thread can be set to NULL
666 * during thread teardown
672 * binder_proc_lock() - Acquire outer lock for given binder_proc
673 * @proc: struct binder_proc to acquire
675 * Acquires proc->outer_lock. Used to protect binder_ref
676 * structures associated with the given proc.
678 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
680 _binder_proc_lock(struct binder_proc
*proc
, int line
)
682 binder_debug(BINDER_DEBUG_SPINLOCKS
,
683 "%s: line=%d\n", __func__
, line
);
684 spin_lock(&proc
->outer_lock
);
688 * binder_proc_unlock() - Release spinlock for given binder_proc
689 * @proc: struct binder_proc to acquire
691 * Release lock acquired via binder_proc_lock()
693 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
695 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
697 binder_debug(BINDER_DEBUG_SPINLOCKS
,
698 "%s: line=%d\n", __func__
, line
);
699 spin_unlock(&proc
->outer_lock
);
703 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
704 * @proc: struct binder_proc to acquire
706 * Acquires proc->inner_lock. Used to protect todo lists
708 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
710 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
712 binder_debug(BINDER_DEBUG_SPINLOCKS
,
713 "%s: line=%d\n", __func__
, line
);
714 spin_lock(&proc
->inner_lock
);
718 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
719 * @proc: struct binder_proc to acquire
721 * Release lock acquired via binder_inner_proc_lock()
723 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
725 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
727 binder_debug(BINDER_DEBUG_SPINLOCKS
,
728 "%s: line=%d\n", __func__
, line
);
729 spin_unlock(&proc
->inner_lock
);
733 * binder_node_lock() - Acquire spinlock for given binder_node
734 * @node: struct binder_node to acquire
736 * Acquires node->lock. Used to protect binder_node fields
738 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
740 _binder_node_lock(struct binder_node
*node
, int line
)
742 binder_debug(BINDER_DEBUG_SPINLOCKS
,
743 "%s: line=%d\n", __func__
, line
);
744 spin_lock(&node
->lock
);
748 * binder_node_unlock() - Release spinlock for given binder_proc
749 * @node: struct binder_node to acquire
751 * Release lock acquired via binder_node_lock()
753 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
755 _binder_node_unlock(struct binder_node
*node
, int line
)
757 binder_debug(BINDER_DEBUG_SPINLOCKS
,
758 "%s: line=%d\n", __func__
, line
);
759 spin_unlock(&node
->lock
);
763 * binder_node_inner_lock() - Acquire node and inner locks
764 * @node: struct binder_node to acquire
766 * Acquires node->lock. If node->proc also acquires
767 * proc->inner_lock. Used to protect binder_node fields
769 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
771 _binder_node_inner_lock(struct binder_node
*node
, int line
)
773 binder_debug(BINDER_DEBUG_SPINLOCKS
,
774 "%s: line=%d\n", __func__
, line
);
775 spin_lock(&node
->lock
);
777 binder_inner_proc_lock(node
->proc
);
781 * binder_node_unlock() - Release node and inner locks
782 * @node: struct binder_node to acquire
784 * Release lock acquired via binder_node_lock()
786 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
788 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
790 struct binder_proc
*proc
= node
->proc
;
792 binder_debug(BINDER_DEBUG_SPINLOCKS
,
793 "%s: line=%d\n", __func__
, line
);
795 binder_inner_proc_unlock(proc
);
796 spin_unlock(&node
->lock
);
799 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
801 return list_empty(list
);
805 * binder_worklist_empty() - Check if no items on the work list
806 * @proc: binder_proc associated with list
807 * @list: list to check
809 * Return: true if there are no items on list, else false
811 static bool binder_worklist_empty(struct binder_proc
*proc
,
812 struct list_head
*list
)
816 binder_inner_proc_lock(proc
);
817 ret
= binder_worklist_empty_ilocked(list
);
818 binder_inner_proc_unlock(proc
);
823 * binder_enqueue_work_ilocked() - Add an item to the work list
824 * @work: struct binder_work to add to list
825 * @target_list: list to add work to
827 * Adds the work to the specified list. Asserts that work
828 * is not already on a list.
830 * Requires the proc->inner_lock to be held.
833 binder_enqueue_work_ilocked(struct binder_work
*work
,
834 struct list_head
*target_list
)
836 BUG_ON(target_list
== NULL
);
837 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
838 list_add_tail(&work
->entry
, target_list
);
842 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
843 * @thread: thread to queue work to
844 * @work: struct binder_work to add to list
846 * Adds the work to the todo list of the thread. Doesn't set the process_todo
847 * flag, which means that (if it wasn't already set) the thread will go to
848 * sleep without handling this work when it calls read.
850 * Requires the proc->inner_lock to be held.
853 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
854 struct binder_work
*work
)
856 binder_enqueue_work_ilocked(work
, &thread
->todo
);
860 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
861 * @thread: thread to queue work to
862 * @work: struct binder_work to add to list
864 * Adds the work to the todo list of the thread, and enables processing
867 * Requires the proc->inner_lock to be held.
870 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
871 struct binder_work
*work
)
873 binder_enqueue_work_ilocked(work
, &thread
->todo
);
874 thread
->process_todo
= true;
878 * binder_enqueue_thread_work() - Add an item to the thread work list
879 * @thread: thread to queue work to
880 * @work: struct binder_work to add to list
882 * Adds the work to the todo list of the thread, and enables processing
886 binder_enqueue_thread_work(struct binder_thread
*thread
,
887 struct binder_work
*work
)
889 binder_inner_proc_lock(thread
->proc
);
890 binder_enqueue_thread_work_ilocked(thread
, work
);
891 binder_inner_proc_unlock(thread
->proc
);
895 binder_dequeue_work_ilocked(struct binder_work
*work
)
897 list_del_init(&work
->entry
);
901 * binder_dequeue_work() - Removes an item from the work list
902 * @proc: binder_proc associated with list
903 * @work: struct binder_work to remove from list
905 * Removes the specified work item from whatever list it is on.
906 * Can safely be called if work is not on any list.
909 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
911 binder_inner_proc_lock(proc
);
912 binder_dequeue_work_ilocked(work
);
913 binder_inner_proc_unlock(proc
);
916 static struct binder_work
*binder_dequeue_work_head_ilocked(
917 struct list_head
*list
)
919 struct binder_work
*w
;
921 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
923 list_del_init(&w
->entry
);
928 * binder_dequeue_work_head() - Dequeues the item at head of list
929 * @proc: binder_proc associated with list
930 * @list: list to dequeue head
932 * Removes the head of the list if there are items on the list
934 * Return: pointer dequeued binder_work, NULL if list was empty
936 static struct binder_work
*binder_dequeue_work_head(
937 struct binder_proc
*proc
,
938 struct list_head
*list
)
940 struct binder_work
*w
;
942 binder_inner_proc_lock(proc
);
943 w
= binder_dequeue_work_head_ilocked(list
);
944 binder_inner_proc_unlock(proc
);
949 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
950 static void binder_free_thread(struct binder_thread
*thread
);
951 static void binder_free_proc(struct binder_proc
*proc
);
952 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
954 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
956 unsigned long rlim_cur
;
960 mutex_lock(&proc
->files_lock
);
961 if (proc
->files
== NULL
) {
965 if (!lock_task_sighand(proc
->tsk
, &irqs
)) {
969 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
970 unlock_task_sighand(proc
->tsk
, &irqs
);
972 ret
= __alloc_fd(proc
->files
, 0, rlim_cur
, flags
);
974 mutex_unlock(&proc
->files_lock
);
979 * copied from fd_install
981 static void task_fd_install(
982 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
984 mutex_lock(&proc
->files_lock
);
986 __fd_install(proc
->files
, fd
, file
);
987 mutex_unlock(&proc
->files_lock
);
991 * copied from sys_close
993 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
997 mutex_lock(&proc
->files_lock
);
998 if (proc
->files
== NULL
) {
1002 retval
= __close_fd(proc
->files
, fd
);
1003 /* can't restart close syscall because file table entry was cleared */
1004 if (unlikely(retval
== -ERESTARTSYS
||
1005 retval
== -ERESTARTNOINTR
||
1006 retval
== -ERESTARTNOHAND
||
1007 retval
== -ERESTART_RESTARTBLOCK
))
1010 mutex_unlock(&proc
->files_lock
);
1014 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
1017 return thread
->process_todo
||
1018 thread
->looper_need_return
||
1020 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
1023 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
1027 binder_inner_proc_lock(thread
->proc
);
1028 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
1029 binder_inner_proc_unlock(thread
->proc
);
1034 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
1036 return !thread
->transaction_stack
&&
1037 binder_worklist_empty_ilocked(&thread
->todo
) &&
1038 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
1039 BINDER_LOOPER_STATE_REGISTERED
));
1042 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
1046 struct binder_thread
*thread
;
1048 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
1049 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
1050 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
1051 binder_available_for_proc_work_ilocked(thread
)) {
1053 wake_up_interruptible_sync(&thread
->wait
);
1055 wake_up_interruptible(&thread
->wait
);
1061 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1062 * @proc: process to select a thread from
1064 * Note that calling this function moves the thread off the waiting_threads
1065 * list, so it can only be woken up by the caller of this function, or a
1066 * signal. Therefore, callers *should* always wake up the thread this function
1069 * Return: If there's a thread currently waiting for process work,
1070 * returns that thread. Otherwise returns NULL.
1072 static struct binder_thread
*
1073 binder_select_thread_ilocked(struct binder_proc
*proc
)
1075 struct binder_thread
*thread
;
1077 assert_spin_locked(&proc
->inner_lock
);
1078 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1079 struct binder_thread
,
1080 waiting_thread_node
);
1083 list_del_init(&thread
->waiting_thread_node
);
1089 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1090 * @proc: process to wake up a thread in
1091 * @thread: specific thread to wake-up (may be NULL)
1092 * @sync: whether to do a synchronous wake-up
1094 * This function wakes up a thread in the @proc process.
1095 * The caller may provide a specific thread to wake-up in
1096 * the @thread parameter. If @thread is NULL, this function
1097 * will wake up threads that have called poll().
1099 * Note that for this function to work as expected, callers
1100 * should first call binder_select_thread() to find a thread
1101 * to handle the work (if they don't have a thread already),
1102 * and pass the result into the @thread parameter.
1104 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1105 struct binder_thread
*thread
,
1108 assert_spin_locked(&proc
->inner_lock
);
1112 wake_up_interruptible_sync(&thread
->wait
);
1114 wake_up_interruptible(&thread
->wait
);
1118 /* Didn't find a thread waiting for proc work; this can happen
1120 * 1. All threads are busy handling transactions
1121 * In that case, one of those threads should call back into
1122 * the kernel driver soon and pick up this work.
1123 * 2. Threads are using the (e)poll interface, in which case
1124 * they may be blocked on the waitqueue without having been
1125 * added to waiting_threads. For this case, we just iterate
1126 * over all threads not handling transaction work, and
1127 * wake them all up. We wake all because we don't know whether
1128 * a thread that called into (e)poll is handling non-binder
1131 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1134 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1136 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1138 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1141 static bool is_rt_policy(int policy
)
1143 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1146 static bool is_fair_policy(int policy
)
1148 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1151 static bool binder_supported_policy(int policy
)
1153 return is_fair_policy(policy
) || is_rt_policy(policy
);
1156 static int to_userspace_prio(int policy
, int kernel_priority
)
1158 if (is_fair_policy(policy
))
1159 return PRIO_TO_NICE(kernel_priority
);
1161 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1164 static int to_kernel_prio(int policy
, int user_priority
)
1166 if (is_fair_policy(policy
))
1167 return NICE_TO_PRIO(user_priority
);
1169 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1172 static void binder_do_set_priority(struct task_struct
*task
,
1173 struct binder_priority desired
,
1176 int priority
; /* user-space prio value */
1178 unsigned int policy
= desired
.sched_policy
;
1180 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1183 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1185 priority
= to_userspace_prio(policy
, desired
.prio
);
1187 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1188 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1190 if (max_rtprio
== 0) {
1191 policy
= SCHED_NORMAL
;
1192 priority
= MIN_NICE
;
1193 } else if (priority
> max_rtprio
) {
1194 priority
= max_rtprio
;
1198 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1199 long min_nice
= rlimit_to_nice(task_rlimit(task
, RLIMIT_NICE
));
1201 if (min_nice
> MAX_NICE
) {
1202 binder_user_error("%d RLIMIT_NICE not set\n",
1205 } else if (priority
< min_nice
) {
1206 priority
= min_nice
;
1210 if (policy
!= desired
.sched_policy
||
1211 to_kernel_prio(policy
, priority
) != desired
.prio
)
1212 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1213 "%d: priority %d not allowed, using %d instead\n",
1214 task
->pid
, desired
.prio
,
1215 to_kernel_prio(policy
, priority
));
1217 trace_binder_set_priority(task
->tgid
, task
->pid
, task
->normal_prio
,
1218 to_kernel_prio(policy
, priority
),
1221 /* Set the actual priority */
1222 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1223 struct sched_param params
;
1225 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1227 sched_setscheduler_nocheck(task
,
1228 policy
| SCHED_RESET_ON_FORK
,
1231 if (is_fair_policy(policy
))
1232 set_user_nice(task
, priority
);
1235 static void binder_set_priority(struct task_struct
*task
,
1236 struct binder_priority desired
)
1238 binder_do_set_priority(task
, desired
, /* verify = */ true);
1241 static void binder_restore_priority(struct task_struct
*task
,
1242 struct binder_priority desired
)
1244 binder_do_set_priority(task
, desired
, /* verify = */ false);
1247 static void binder_transaction_priority(struct task_struct
*task
,
1248 struct binder_transaction
*t
,
1249 struct binder_priority node_prio
,
1252 struct binder_priority desired_prio
= t
->priority
;
1254 if (t
->set_priority_called
)
1257 t
->set_priority_called
= true;
1258 t
->saved_priority
.sched_policy
= task
->policy
;
1259 t
->saved_priority
.prio
= task
->normal_prio
;
1261 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1262 desired_prio
.prio
= NICE_TO_PRIO(0);
1263 desired_prio
.sched_policy
= SCHED_NORMAL
;
1266 if (node_prio
.prio
< t
->priority
.prio
||
1267 (node_prio
.prio
== t
->priority
.prio
&&
1268 node_prio
.sched_policy
== SCHED_FIFO
)) {
1270 * In case the minimum priority on the node is
1271 * higher (lower value), use that priority. If
1272 * the priority is the same, but the node uses
1273 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1274 * run unbounded, unlike SCHED_RR.
1276 desired_prio
= node_prio
;
1279 binder_set_priority(task
, desired_prio
);
1282 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1283 binder_uintptr_t ptr
)
1285 struct rb_node
*n
= proc
->nodes
.rb_node
;
1286 struct binder_node
*node
;
1288 assert_spin_locked(&proc
->inner_lock
);
1291 node
= rb_entry(n
, struct binder_node
, rb_node
);
1293 if (ptr
< node
->ptr
)
1295 else if (ptr
> node
->ptr
)
1299 * take an implicit weak reference
1300 * to ensure node stays alive until
1301 * call to binder_put_node()
1303 binder_inc_node_tmpref_ilocked(node
);
1310 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1311 binder_uintptr_t ptr
)
1313 struct binder_node
*node
;
1315 binder_inner_proc_lock(proc
);
1316 node
= binder_get_node_ilocked(proc
, ptr
);
1317 binder_inner_proc_unlock(proc
);
1321 static struct binder_node
*binder_init_node_ilocked(
1322 struct binder_proc
*proc
,
1323 struct binder_node
*new_node
,
1324 struct flat_binder_object
*fp
)
1326 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1327 struct rb_node
*parent
= NULL
;
1328 struct binder_node
*node
;
1329 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1330 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1331 __u32 flags
= fp
? fp
->flags
: 0;
1334 assert_spin_locked(&proc
->inner_lock
);
1339 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1341 if (ptr
< node
->ptr
)
1343 else if (ptr
> node
->ptr
)
1344 p
= &(*p
)->rb_right
;
1347 * A matching node is already in
1348 * the rb tree. Abandon the init
1351 binder_inc_node_tmpref_ilocked(node
);
1356 binder_stats_created(BINDER_STAT_NODE
);
1358 rb_link_node(&node
->rb_node
, parent
, p
);
1359 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1360 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1363 node
->cookie
= cookie
;
1364 node
->work
.type
= BINDER_WORK_NODE
;
1365 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1366 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_SCHED_POLICY_MASK
) >>
1367 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1368 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1369 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1370 node
->txn_security_ctx
= !!(flags
& FLAT_BINDER_FLAG_TXN_SECURITY_CTX
);
1371 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1372 spin_lock_init(&node
->lock
);
1373 INIT_LIST_HEAD(&node
->work
.entry
);
1374 INIT_LIST_HEAD(&node
->async_todo
);
1375 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1376 "%d:%d node %d u%016llx c%016llx created\n",
1377 proc
->pid
, current
->pid
, node
->debug_id
,
1378 (u64
)node
->ptr
, (u64
)node
->cookie
);
1383 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1384 struct flat_binder_object
*fp
)
1386 struct binder_node
*node
;
1387 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1391 binder_inner_proc_lock(proc
);
1392 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1393 binder_inner_proc_unlock(proc
);
1394 if (node
!= new_node
)
1396 * The node was already added by another thread
1403 static void binder_free_node(struct binder_node
*node
)
1406 binder_stats_deleted(BINDER_STAT_NODE
);
1409 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1411 struct list_head
*target_list
)
1413 struct binder_proc
*proc
= node
->proc
;
1415 assert_spin_locked(&node
->lock
);
1417 assert_spin_locked(&proc
->inner_lock
);
1420 if (target_list
== NULL
&&
1421 node
->internal_strong_refs
== 0 &&
1423 node
== node
->proc
->context
->
1424 binder_context_mgr_node
&&
1425 node
->has_strong_ref
)) {
1426 pr_err("invalid inc strong node for %d\n",
1430 node
->internal_strong_refs
++;
1432 node
->local_strong_refs
++;
1433 if (!node
->has_strong_ref
&& target_list
) {
1434 binder_dequeue_work_ilocked(&node
->work
);
1436 * Note: this function is the only place where we queue
1437 * directly to a thread->todo without using the
1438 * corresponding binder_enqueue_thread_work() helper
1439 * functions; in this case it's ok to not set the
1440 * process_todo flag, since we know this node work will
1441 * always be followed by other work that starts queue
1442 * processing: in case of synchronous transactions, a
1443 * BR_REPLY or BR_ERROR; in case of oneway
1444 * transactions, a BR_TRANSACTION_COMPLETE.
1446 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1450 node
->local_weak_refs
++;
1451 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1452 if (target_list
== NULL
) {
1453 pr_err("invalid inc weak node for %d\n",
1460 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1466 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1467 struct list_head
*target_list
)
1471 binder_node_inner_lock(node
);
1472 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1473 binder_node_inner_unlock(node
);
1478 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1479 int strong
, int internal
)
1481 struct binder_proc
*proc
= node
->proc
;
1483 assert_spin_locked(&node
->lock
);
1485 assert_spin_locked(&proc
->inner_lock
);
1488 node
->internal_strong_refs
--;
1490 node
->local_strong_refs
--;
1491 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1495 node
->local_weak_refs
--;
1496 if (node
->local_weak_refs
|| node
->tmp_refs
||
1497 !hlist_empty(&node
->refs
))
1501 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1502 if (list_empty(&node
->work
.entry
)) {
1503 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1504 binder_wakeup_proc_ilocked(proc
);
1507 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1508 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1510 binder_dequeue_work_ilocked(&node
->work
);
1511 rb_erase(&node
->rb_node
, &proc
->nodes
);
1512 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1513 "refless node %d deleted\n",
1516 BUG_ON(!list_empty(&node
->work
.entry
));
1517 spin_lock(&binder_dead_nodes_lock
);
1519 * tmp_refs could have changed so
1522 if (node
->tmp_refs
) {
1523 spin_unlock(&binder_dead_nodes_lock
);
1526 hlist_del(&node
->dead_node
);
1527 spin_unlock(&binder_dead_nodes_lock
);
1528 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1529 "dead node %d deleted\n",
1538 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1542 binder_node_inner_lock(node
);
1543 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1544 binder_node_inner_unlock(node
);
1546 binder_free_node(node
);
1549 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1552 * No call to binder_inc_node() is needed since we
1553 * don't need to inform userspace of any changes to
1560 * binder_inc_node_tmpref() - take a temporary reference on node
1561 * @node: node to reference
1563 * Take reference on node to prevent the node from being freed
1564 * while referenced only by a local variable. The inner lock is
1565 * needed to serialize with the node work on the queue (which
1566 * isn't needed after the node is dead). If the node is dead
1567 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1568 * node->tmp_refs against dead-node-only cases where the node
1569 * lock cannot be acquired (eg traversing the dead node list to
1572 static void binder_inc_node_tmpref(struct binder_node
*node
)
1574 binder_node_lock(node
);
1576 binder_inner_proc_lock(node
->proc
);
1578 spin_lock(&binder_dead_nodes_lock
);
1579 binder_inc_node_tmpref_ilocked(node
);
1581 binder_inner_proc_unlock(node
->proc
);
1583 spin_unlock(&binder_dead_nodes_lock
);
1584 binder_node_unlock(node
);
1588 * binder_dec_node_tmpref() - remove a temporary reference on node
1589 * @node: node to reference
1591 * Release temporary reference on node taken via binder_inc_node_tmpref()
1593 static void binder_dec_node_tmpref(struct binder_node
*node
)
1597 binder_node_inner_lock(node
);
1599 spin_lock(&binder_dead_nodes_lock
);
1601 BUG_ON(node
->tmp_refs
< 0);
1603 spin_unlock(&binder_dead_nodes_lock
);
1605 * Call binder_dec_node() to check if all refcounts are 0
1606 * and cleanup is needed. Calling with strong=0 and internal=1
1607 * causes no actual reference to be released in binder_dec_node().
1608 * If that changes, a change is needed here too.
1610 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1611 binder_node_inner_unlock(node
);
1613 binder_free_node(node
);
1616 static void binder_put_node(struct binder_node
*node
)
1618 binder_dec_node_tmpref(node
);
1621 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1622 u32 desc
, bool need_strong_ref
)
1624 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1625 struct binder_ref
*ref
;
1628 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1630 if (desc
< ref
->data
.desc
) {
1632 } else if (desc
> ref
->data
.desc
) {
1634 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1635 binder_user_error("tried to use weak ref as strong ref\n");
1645 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1646 * @proc: binder_proc that owns the ref
1647 * @node: binder_node of target
1648 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1650 * Look up the ref for the given node and return it if it exists
1652 * If it doesn't exist and the caller provides a newly allocated
1653 * ref, initialize the fields of the newly allocated ref and insert
1654 * into the given proc rb_trees and node refs list.
1656 * Return: the ref for node. It is possible that another thread
1657 * allocated/initialized the ref first in which case the
1658 * returned ref would be different than the passed-in
1659 * new_ref. new_ref must be kfree'd by the caller in
1662 static struct binder_ref
*binder_get_ref_for_node_olocked(
1663 struct binder_proc
*proc
,
1664 struct binder_node
*node
,
1665 struct binder_ref
*new_ref
)
1667 struct binder_context
*context
= proc
->context
;
1668 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1669 struct rb_node
*parent
= NULL
;
1670 struct binder_ref
*ref
;
1675 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1677 if (node
< ref
->node
)
1679 else if (node
> ref
->node
)
1680 p
= &(*p
)->rb_right
;
1687 binder_stats_created(BINDER_STAT_REF
);
1688 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1689 new_ref
->proc
= proc
;
1690 new_ref
->node
= node
;
1691 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1692 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1694 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1695 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1696 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1697 if (ref
->data
.desc
> new_ref
->data
.desc
)
1699 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1702 p
= &proc
->refs_by_desc
.rb_node
;
1705 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1707 if (new_ref
->data
.desc
< ref
->data
.desc
)
1709 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1710 p
= &(*p
)->rb_right
;
1714 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1715 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1717 binder_node_lock(node
);
1718 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1720 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1721 "%d new ref %d desc %d for node %d\n",
1722 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1724 binder_node_unlock(node
);
1728 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1730 bool delete_node
= false;
1732 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1733 "%d delete ref %d desc %d for node %d\n",
1734 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1735 ref
->node
->debug_id
);
1737 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1738 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1740 binder_node_inner_lock(ref
->node
);
1741 if (ref
->data
.strong
)
1742 binder_dec_node_nilocked(ref
->node
, 1, 1);
1744 hlist_del(&ref
->node_entry
);
1745 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1746 binder_node_inner_unlock(ref
->node
);
1748 * Clear ref->node unless we want the caller to free the node
1752 * The caller uses ref->node to determine
1753 * whether the node needs to be freed. Clear
1754 * it since the node is still alive.
1760 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1761 "%d delete ref %d desc %d has death notification\n",
1762 ref
->proc
->pid
, ref
->data
.debug_id
,
1764 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1765 binder_stats_deleted(BINDER_STAT_DEATH
);
1767 binder_stats_deleted(BINDER_STAT_REF
);
1771 * binder_inc_ref_olocked() - increment the ref for given handle
1772 * @ref: ref to be incremented
1773 * @strong: if true, strong increment, else weak
1774 * @target_list: list to queue node work on
1776 * Increment the ref. @ref->proc->outer_lock must be held on entry
1778 * Return: 0, if successful, else errno
1780 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1781 struct list_head
*target_list
)
1786 if (ref
->data
.strong
== 0) {
1787 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1793 if (ref
->data
.weak
== 0) {
1794 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1804 * binder_dec_ref() - dec the ref for given handle
1805 * @ref: ref to be decremented
1806 * @strong: if true, strong decrement, else weak
1808 * Decrement the ref.
1810 * Return: true if ref is cleaned up and ready to be freed
1812 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1815 if (ref
->data
.strong
== 0) {
1816 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1817 ref
->proc
->pid
, ref
->data
.debug_id
,
1818 ref
->data
.desc
, ref
->data
.strong
,
1823 if (ref
->data
.strong
== 0)
1824 binder_dec_node(ref
->node
, strong
, 1);
1826 if (ref
->data
.weak
== 0) {
1827 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1828 ref
->proc
->pid
, ref
->data
.debug_id
,
1829 ref
->data
.desc
, ref
->data
.strong
,
1835 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1836 binder_cleanup_ref_olocked(ref
);
1843 * binder_get_node_from_ref() - get the node from the given proc/desc
1844 * @proc: proc containing the ref
1845 * @desc: the handle associated with the ref
1846 * @need_strong_ref: if true, only return node if ref is strong
1847 * @rdata: the id/refcount data for the ref
1849 * Given a proc and ref handle, return the associated binder_node
1851 * Return: a binder_node or NULL if not found or not strong when strong required
1853 static struct binder_node
*binder_get_node_from_ref(
1854 struct binder_proc
*proc
,
1855 u32 desc
, bool need_strong_ref
,
1856 struct binder_ref_data
*rdata
)
1858 struct binder_node
*node
;
1859 struct binder_ref
*ref
;
1861 binder_proc_lock(proc
);
1862 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1867 * Take an implicit reference on the node to ensure
1868 * it stays alive until the call to binder_put_node()
1870 binder_inc_node_tmpref(node
);
1873 binder_proc_unlock(proc
);
1878 binder_proc_unlock(proc
);
1883 * binder_free_ref() - free the binder_ref
1886 * Free the binder_ref. Free the binder_node indicated by ref->node
1887 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1889 static void binder_free_ref(struct binder_ref
*ref
)
1892 binder_free_node(ref
->node
);
1898 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1899 * @proc: proc containing the ref
1900 * @desc: the handle associated with the ref
1901 * @increment: true=inc reference, false=dec reference
1902 * @strong: true=strong reference, false=weak reference
1903 * @rdata: the id/refcount data for the ref
1905 * Given a proc and ref handle, increment or decrement the ref
1906 * according to "increment" arg.
1908 * Return: 0 if successful, else errno
1910 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1911 uint32_t desc
, bool increment
, bool strong
,
1912 struct binder_ref_data
*rdata
)
1915 struct binder_ref
*ref
;
1916 bool delete_ref
= false;
1918 binder_proc_lock(proc
);
1919 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1925 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1927 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1931 binder_proc_unlock(proc
);
1934 binder_free_ref(ref
);
1938 binder_proc_unlock(proc
);
1943 * binder_dec_ref_for_handle() - dec the ref for given handle
1944 * @proc: proc containing the ref
1945 * @desc: the handle associated with the ref
1946 * @strong: true=strong reference, false=weak reference
1947 * @rdata: the id/refcount data for the ref
1949 * Just calls binder_update_ref_for_handle() to decrement the ref.
1951 * Return: 0 if successful, else errno
1953 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1954 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1956 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1961 * binder_inc_ref_for_node() - increment the ref for given proc/node
1962 * @proc: proc containing the ref
1963 * @node: target node
1964 * @strong: true=strong reference, false=weak reference
1965 * @target_list: worklist to use if node is incremented
1966 * @rdata: the id/refcount data for the ref
1968 * Given a proc and node, increment the ref. Create the ref if it
1969 * doesn't already exist
1971 * Return: 0 if successful, else errno
1973 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1974 struct binder_node
*node
,
1976 struct list_head
*target_list
,
1977 struct binder_ref_data
*rdata
)
1979 struct binder_ref
*ref
;
1980 struct binder_ref
*new_ref
= NULL
;
1983 binder_proc_lock(proc
);
1984 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1986 binder_proc_unlock(proc
);
1987 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1990 binder_proc_lock(proc
);
1991 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1993 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1995 binder_proc_unlock(proc
);
1996 if (new_ref
&& ref
!= new_ref
)
1998 * Another thread created the ref first so
1999 * free the one we allocated
2005 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
2006 struct binder_transaction
*t
)
2008 BUG_ON(!target_thread
);
2009 assert_spin_locked(&target_thread
->proc
->inner_lock
);
2010 BUG_ON(target_thread
->transaction_stack
!= t
);
2011 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
2012 target_thread
->transaction_stack
=
2013 target_thread
->transaction_stack
->from_parent
;
2018 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2019 * @thread: thread to decrement
2021 * A thread needs to be kept alive while being used to create or
2022 * handle a transaction. binder_get_txn_from() is used to safely
2023 * extract t->from from a binder_transaction and keep the thread
2024 * indicated by t->from from being freed. When done with that
2025 * binder_thread, this function is called to decrement the
2026 * tmp_ref and free if appropriate (thread has been released
2027 * and no transaction being processed by the driver)
2029 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
2032 * atomic is used to protect the counter value while
2033 * it cannot reach zero or thread->is_dead is false
2035 binder_inner_proc_lock(thread
->proc
);
2036 atomic_dec(&thread
->tmp_ref
);
2037 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
2038 binder_inner_proc_unlock(thread
->proc
);
2039 binder_free_thread(thread
);
2042 binder_inner_proc_unlock(thread
->proc
);
2046 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2047 * @proc: proc to decrement
2049 * A binder_proc needs to be kept alive while being used to create or
2050 * handle a transaction. proc->tmp_ref is incremented when
2051 * creating a new transaction or the binder_proc is currently in-use
2052 * by threads that are being released. When done with the binder_proc,
2053 * this function is called to decrement the counter and free the
2054 * proc if appropriate (proc has been released, all threads have
2055 * been released and not currenly in-use to process a transaction).
2057 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
2059 binder_inner_proc_lock(proc
);
2061 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
2063 binder_inner_proc_unlock(proc
);
2064 binder_free_proc(proc
);
2067 binder_inner_proc_unlock(proc
);
2071 * binder_get_txn_from() - safely extract the "from" thread in transaction
2072 * @t: binder transaction for t->from
2074 * Atomically return the "from" thread and increment the tmp_ref
2075 * count for the thread to ensure it stays alive until
2076 * binder_thread_dec_tmpref() is called.
2078 * Return: the value of t->from
2080 static struct binder_thread
*binder_get_txn_from(
2081 struct binder_transaction
*t
)
2083 struct binder_thread
*from
;
2085 spin_lock(&t
->lock
);
2088 atomic_inc(&from
->tmp_ref
);
2089 spin_unlock(&t
->lock
);
2094 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2095 * @t: binder transaction for t->from
2097 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2098 * to guarantee that the thread cannot be released while operating on it.
2099 * The caller must call binder_inner_proc_unlock() to release the inner lock
2100 * as well as call binder_dec_thread_txn() to release the reference.
2102 * Return: the value of t->from
2104 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2105 struct binder_transaction
*t
)
2107 struct binder_thread
*from
;
2109 from
= binder_get_txn_from(t
);
2112 binder_inner_proc_lock(from
->proc
);
2114 BUG_ON(from
!= t
->from
);
2117 binder_inner_proc_unlock(from
->proc
);
2118 binder_thread_dec_tmpref(from
);
2122 static void binder_free_transaction(struct binder_transaction
*t
)
2125 t
->buffer
->transaction
= NULL
;
2127 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2130 static void binder_send_failed_reply(struct binder_transaction
*t
,
2131 uint32_t error_code
)
2133 struct binder_thread
*target_thread
;
2134 struct binder_transaction
*next
;
2136 BUG_ON(t
->flags
& TF_ONE_WAY
);
2138 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2139 if (target_thread
) {
2140 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2141 "send failed reply for transaction %d to %d:%d\n",
2143 target_thread
->proc
->pid
,
2144 target_thread
->pid
);
2146 binder_pop_transaction_ilocked(target_thread
, t
);
2147 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2148 target_thread
->reply_error
.cmd
= error_code
;
2149 binder_enqueue_thread_work_ilocked(
2151 &target_thread
->reply_error
.work
);
2152 wake_up_interruptible(&target_thread
->wait
);
2155 * Cannot get here for normal operation, but
2156 * we can if multiple synchronous transactions
2157 * are sent without blocking for responses.
2158 * Just ignore the 2nd error in this case.
2160 pr_warn("Unexpected reply error: %u\n",
2161 target_thread
->reply_error
.cmd
);
2163 binder_inner_proc_unlock(target_thread
->proc
);
2164 binder_thread_dec_tmpref(target_thread
);
2165 binder_free_transaction(t
);
2168 next
= t
->from_parent
;
2170 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2171 "send failed reply for transaction %d, target dead\n",
2174 binder_free_transaction(t
);
2176 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2177 "reply failed, no target thread at root\n");
2181 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2182 "reply failed, no target thread -- retry %d\n",
2188 * binder_cleanup_transaction() - cleans up undelivered transaction
2189 * @t: transaction that needs to be cleaned up
2190 * @reason: reason the transaction wasn't delivered
2191 * @error_code: error to return to caller (if synchronous call)
2193 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2195 uint32_t error_code
)
2197 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2198 binder_send_failed_reply(t
, error_code
);
2200 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2201 "undelivered transaction %d, %s\n",
2202 t
->debug_id
, reason
);
2203 binder_free_transaction(t
);
2208 * binder_validate_object() - checks for a valid metadata object in a buffer.
2209 * @buffer: binder_buffer that we're parsing.
2210 * @offset: offset in the buffer at which to validate an object.
2212 * Return: If there's a valid metadata object at @offset in @buffer, the
2213 * size of that object. Otherwise, it returns zero.
2215 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2217 /* Check if we can read a header first */
2218 struct binder_object_header
*hdr
;
2219 size_t object_size
= 0;
2221 if (buffer
->data_size
< sizeof(*hdr
) ||
2222 offset
> buffer
->data_size
- sizeof(*hdr
) ||
2223 !IS_ALIGNED(offset
, sizeof(u32
)))
2226 /* Ok, now see if we can read a complete object. */
2227 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2228 switch (hdr
->type
) {
2229 case BINDER_TYPE_BINDER
:
2230 case BINDER_TYPE_WEAK_BINDER
:
2231 case BINDER_TYPE_HANDLE
:
2232 case BINDER_TYPE_WEAK_HANDLE
:
2233 object_size
= sizeof(struct flat_binder_object
);
2235 case BINDER_TYPE_FD
:
2236 object_size
= sizeof(struct binder_fd_object
);
2238 case BINDER_TYPE_PTR
:
2239 object_size
= sizeof(struct binder_buffer_object
);
2241 case BINDER_TYPE_FDA
:
2242 object_size
= sizeof(struct binder_fd_array_object
);
2247 if (offset
<= buffer
->data_size
- object_size
&&
2248 buffer
->data_size
>= object_size
)
2255 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2256 * @b: binder_buffer containing the object
2257 * @index: index in offset array at which the binder_buffer_object is
2259 * @start: points to the start of the offset array
2260 * @num_valid: the number of valid offsets in the offset array
2262 * Return: If @index is within the valid range of the offset array
2263 * described by @start and @num_valid, and if there's a valid
2264 * binder_buffer_object at the offset found in index @index
2265 * of the offset array, that object is returned. Otherwise,
2266 * %NULL is returned.
2267 * Note that the offset found in index @index itself is not
2268 * verified; this function assumes that @num_valid elements
2269 * from @start were previously verified to have valid offsets.
2271 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2272 binder_size_t index
,
2273 binder_size_t
*start
,
2274 binder_size_t num_valid
)
2276 struct binder_buffer_object
*buffer_obj
;
2277 binder_size_t
*offp
;
2279 if (index
>= num_valid
)
2282 offp
= start
+ index
;
2283 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2284 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2291 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2292 * @b: transaction buffer
2293 * @objects_start start of objects buffer
2294 * @buffer: binder_buffer_object in which to fix up
2295 * @offset: start offset in @buffer to fix up
2296 * @last_obj: last binder_buffer_object that we fixed up in
2297 * @last_min_offset: minimum fixup offset in @last_obj
2299 * Return: %true if a fixup in buffer @buffer at offset @offset is
2302 * For safety reasons, we only allow fixups inside a buffer to happen
2303 * at increasing offsets; additionally, we only allow fixup on the last
2304 * buffer object that was verified, or one of its parents.
2306 * Example of what is allowed:
2309 * B (parent = A, offset = 0)
2310 * C (parent = A, offset = 16)
2311 * D (parent = C, offset = 0)
2312 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2314 * Examples of what is not allowed:
2316 * Decreasing offsets within the same parent:
2318 * C (parent = A, offset = 16)
2319 * B (parent = A, offset = 0) // decreasing offset within A
2321 * Referring to a parent that wasn't the last object or any of its parents:
2323 * B (parent = A, offset = 0)
2324 * C (parent = A, offset = 0)
2325 * C (parent = A, offset = 16)
2326 * D (parent = B, offset = 0) // B is not A or any of A's parents
2328 static bool binder_validate_fixup(struct binder_buffer
*b
,
2329 binder_size_t
*objects_start
,
2330 struct binder_buffer_object
*buffer
,
2331 binder_size_t fixup_offset
,
2332 struct binder_buffer_object
*last_obj
,
2333 binder_size_t last_min_offset
)
2336 /* Nothing to fix up in */
2340 while (last_obj
!= buffer
) {
2342 * Safe to retrieve the parent of last_obj, since it
2343 * was already previously verified by the driver.
2345 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2347 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2348 last_obj
= (struct binder_buffer_object
*)
2349 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2351 return (fixup_offset
>= last_min_offset
);
2354 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2355 struct binder_buffer
*buffer
,
2356 binder_size_t
*failed_at
)
2358 binder_size_t
*offp
, *off_start
, *off_end
;
2359 int debug_id
= buffer
->debug_id
;
2361 binder_debug(BINDER_DEBUG_TRANSACTION
,
2362 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2363 proc
->pid
, buffer
->debug_id
,
2364 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2366 if (buffer
->target_node
)
2367 binder_dec_node(buffer
->target_node
, 1, 0);
2369 off_start
= (binder_size_t
*)(buffer
->data
+
2370 ALIGN(buffer
->data_size
, sizeof(void *)));
2372 off_end
= failed_at
;
2374 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2375 for (offp
= off_start
; offp
< off_end
; offp
++) {
2376 struct binder_object_header
*hdr
;
2377 size_t object_size
= binder_validate_object(buffer
, *offp
);
2379 if (object_size
== 0) {
2380 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2381 debug_id
, (u64
)*offp
, buffer
->data_size
);
2384 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2385 switch (hdr
->type
) {
2386 case BINDER_TYPE_BINDER
:
2387 case BINDER_TYPE_WEAK_BINDER
: {
2388 struct flat_binder_object
*fp
;
2389 struct binder_node
*node
;
2391 fp
= to_flat_binder_object(hdr
);
2392 node
= binder_get_node(proc
, fp
->binder
);
2394 pr_err("transaction release %d bad node %016llx\n",
2395 debug_id
, (u64
)fp
->binder
);
2398 binder_debug(BINDER_DEBUG_TRANSACTION
,
2399 " node %d u%016llx\n",
2400 node
->debug_id
, (u64
)node
->ptr
);
2401 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2403 binder_put_node(node
);
2405 case BINDER_TYPE_HANDLE
:
2406 case BINDER_TYPE_WEAK_HANDLE
: {
2407 struct flat_binder_object
*fp
;
2408 struct binder_ref_data rdata
;
2411 fp
= to_flat_binder_object(hdr
);
2412 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2413 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2416 pr_err("transaction release %d bad handle %d, ret = %d\n",
2417 debug_id
, fp
->handle
, ret
);
2420 binder_debug(BINDER_DEBUG_TRANSACTION
,
2421 " ref %d desc %d\n",
2422 rdata
.debug_id
, rdata
.desc
);
2425 case BINDER_TYPE_FD
: {
2426 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2428 binder_debug(BINDER_DEBUG_TRANSACTION
,
2429 " fd %d\n", fp
->fd
);
2431 task_close_fd(proc
, fp
->fd
);
2433 case BINDER_TYPE_PTR
:
2435 * Nothing to do here, this will get cleaned up when the
2436 * transaction buffer gets freed
2439 case BINDER_TYPE_FDA
: {
2440 struct binder_fd_array_object
*fda
;
2441 struct binder_buffer_object
*parent
;
2442 uintptr_t parent_buffer
;
2445 binder_size_t fd_buf_size
;
2447 fda
= to_binder_fd_array_object(hdr
);
2448 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2452 pr_err("transaction release %d bad parent offset",
2457 * Since the parent was already fixed up, convert it
2458 * back to kernel address space to access it
2460 parent_buffer
= parent
->buffer
-
2461 binder_alloc_get_user_buffer_offset(
2464 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2465 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2466 pr_err("transaction release %d invalid number of fds (%lld)\n",
2467 debug_id
, (u64
)fda
->num_fds
);
2470 if (fd_buf_size
> parent
->length
||
2471 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2472 /* No space for all file descriptors here. */
2473 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2474 debug_id
, (u64
)fda
->num_fds
);
2477 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2478 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2479 task_close_fd(proc
, fd_array
[fd_index
]);
2482 pr_err("transaction release %d bad object type %x\n",
2483 debug_id
, hdr
->type
);
2489 static int binder_translate_binder(struct flat_binder_object
*fp
,
2490 struct binder_transaction
*t
,
2491 struct binder_thread
*thread
)
2493 struct binder_node
*node
;
2494 struct binder_proc
*proc
= thread
->proc
;
2495 struct binder_proc
*target_proc
= t
->to_proc
;
2496 struct binder_ref_data rdata
;
2499 node
= binder_get_node(proc
, fp
->binder
);
2501 node
= binder_new_node(proc
, fp
);
2505 if (fp
->cookie
!= node
->cookie
) {
2506 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2507 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2508 node
->debug_id
, (u64
)fp
->cookie
,
2513 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2518 ret
= binder_inc_ref_for_node(target_proc
, node
,
2519 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2520 &thread
->todo
, &rdata
);
2524 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2525 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2527 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2529 fp
->handle
= rdata
.desc
;
2532 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2533 binder_debug(BINDER_DEBUG_TRANSACTION
,
2534 " node %d u%016llx -> ref %d desc %d\n",
2535 node
->debug_id
, (u64
)node
->ptr
,
2536 rdata
.debug_id
, rdata
.desc
);
2538 binder_put_node(node
);
2542 static int binder_translate_handle(struct flat_binder_object
*fp
,
2543 struct binder_transaction
*t
,
2544 struct binder_thread
*thread
)
2546 struct binder_proc
*proc
= thread
->proc
;
2547 struct binder_proc
*target_proc
= t
->to_proc
;
2548 struct binder_node
*node
;
2549 struct binder_ref_data src_rdata
;
2552 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2553 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2555 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2556 proc
->pid
, thread
->pid
, fp
->handle
);
2559 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2564 binder_node_lock(node
);
2565 if (node
->proc
== target_proc
) {
2566 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2567 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2569 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2570 fp
->binder
= node
->ptr
;
2571 fp
->cookie
= node
->cookie
;
2573 binder_inner_proc_lock(node
->proc
);
2574 binder_inc_node_nilocked(node
,
2575 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2578 binder_inner_proc_unlock(node
->proc
);
2579 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2580 binder_debug(BINDER_DEBUG_TRANSACTION
,
2581 " ref %d desc %d -> node %d u%016llx\n",
2582 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2584 binder_node_unlock(node
);
2586 struct binder_ref_data dest_rdata
;
2588 binder_node_unlock(node
);
2589 ret
= binder_inc_ref_for_node(target_proc
, node
,
2590 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2596 fp
->handle
= dest_rdata
.desc
;
2598 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2600 binder_debug(BINDER_DEBUG_TRANSACTION
,
2601 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2602 src_rdata
.debug_id
, src_rdata
.desc
,
2603 dest_rdata
.debug_id
, dest_rdata
.desc
,
2607 binder_put_node(node
);
2611 static int binder_translate_fd(int fd
,
2612 struct binder_transaction
*t
,
2613 struct binder_thread
*thread
,
2614 struct binder_transaction
*in_reply_to
)
2616 struct binder_proc
*proc
= thread
->proc
;
2617 struct binder_proc
*target_proc
= t
->to_proc
;
2621 bool target_allows_fd
;
2624 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2626 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2627 if (!target_allows_fd
) {
2628 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2629 proc
->pid
, thread
->pid
,
2630 in_reply_to
? "reply" : "transaction",
2633 goto err_fd_not_accepted
;
2638 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2639 proc
->pid
, thread
->pid
, fd
);
2643 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2649 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2650 if (target_fd
< 0) {
2652 goto err_get_unused_fd
;
2654 task_fd_install(target_proc
, target_fd
, file
);
2655 trace_binder_transaction_fd(t
, fd
, target_fd
);
2656 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2665 err_fd_not_accepted
:
2669 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2670 struct binder_buffer_object
*parent
,
2671 struct binder_transaction
*t
,
2672 struct binder_thread
*thread
,
2673 struct binder_transaction
*in_reply_to
)
2675 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2677 uintptr_t parent_buffer
;
2679 struct binder_proc
*proc
= thread
->proc
;
2680 struct binder_proc
*target_proc
= t
->to_proc
;
2682 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2683 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2684 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2685 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2688 if (fd_buf_size
> parent
->length
||
2689 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2690 /* No space for all file descriptors here. */
2691 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2692 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2696 * Since the parent was already fixed up, convert it
2697 * back to the kernel address space to access it
2699 parent_buffer
= parent
->buffer
-
2700 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2701 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2702 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2703 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2704 proc
->pid
, thread
->pid
);
2707 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2708 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2711 goto err_translate_fd_failed
;
2712 fd_array
[fdi
] = target_fd
;
2716 err_translate_fd_failed
:
2718 * Failed to allocate fd or security error, free fds
2721 num_installed_fds
= fdi
;
2722 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2723 task_close_fd(target_proc
, fd_array
[fdi
]);
2727 static int binder_fixup_parent(struct binder_transaction
*t
,
2728 struct binder_thread
*thread
,
2729 struct binder_buffer_object
*bp
,
2730 binder_size_t
*off_start
,
2731 binder_size_t num_valid
,
2732 struct binder_buffer_object
*last_fixup_obj
,
2733 binder_size_t last_fixup_min_off
)
2735 struct binder_buffer_object
*parent
;
2737 struct binder_buffer
*b
= t
->buffer
;
2738 struct binder_proc
*proc
= thread
->proc
;
2739 struct binder_proc
*target_proc
= t
->to_proc
;
2741 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2744 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2746 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2747 proc
->pid
, thread
->pid
);
2751 if (!binder_validate_fixup(b
, off_start
,
2752 parent
, bp
->parent_offset
,
2754 last_fixup_min_off
)) {
2755 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2756 proc
->pid
, thread
->pid
);
2760 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2761 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2762 /* No space for a pointer here! */
2763 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2764 proc
->pid
, thread
->pid
);
2767 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2768 binder_alloc_get_user_buffer_offset(
2769 &target_proc
->alloc
));
2770 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2776 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2777 * @t: transaction to send
2778 * @proc: process to send the transaction to
2779 * @thread: thread in @proc to send the transaction to (may be NULL)
2781 * This function queues a transaction to the specified process. It will try
2782 * to find a thread in the target process to handle the transaction and
2783 * wake it up. If no thread is found, the work is queued to the proc
2786 * If the @thread parameter is not NULL, the transaction is always queued
2787 * to the waitlist of that specific thread.
2789 * Return: true if the transactions was successfully queued
2790 * false if the target process or thread is dead
2792 static bool binder_proc_transaction(struct binder_transaction
*t
,
2793 struct binder_proc
*proc
,
2794 struct binder_thread
*thread
)
2796 struct binder_node
*node
= t
->buffer
->target_node
;
2797 struct binder_priority node_prio
;
2798 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2799 bool pending_async
= false;
2802 binder_node_lock(node
);
2803 node_prio
.prio
= node
->min_priority
;
2804 node_prio
.sched_policy
= node
->sched_policy
;
2808 if (node
->has_async_transaction
) {
2809 pending_async
= true;
2811 node
->has_async_transaction
= true;
2815 binder_inner_proc_lock(proc
);
2817 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2818 binder_inner_proc_unlock(proc
);
2819 binder_node_unlock(node
);
2823 if (!thread
&& !pending_async
)
2824 thread
= binder_select_thread_ilocked(proc
);
2827 binder_transaction_priority(thread
->task
, t
, node_prio
,
2829 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2830 } else if (!pending_async
) {
2831 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2833 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2837 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2839 binder_inner_proc_unlock(proc
);
2840 binder_node_unlock(node
);
2846 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2847 * @node: struct binder_node for which to get refs
2848 * @proc: returns @node->proc if valid
2849 * @error: if no @proc then returns BR_DEAD_REPLY
2851 * User-space normally keeps the node alive when creating a transaction
2852 * since it has a reference to the target. The local strong ref keeps it
2853 * alive if the sending process dies before the target process processes
2854 * the transaction. If the source process is malicious or has a reference
2855 * counting bug, relying on the local strong ref can fail.
2857 * Since user-space can cause the local strong ref to go away, we also take
2858 * a tmpref on the node to ensure it survives while we are constructing
2859 * the transaction. We also need a tmpref on the proc while we are
2860 * constructing the transaction, so we take that here as well.
2862 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2863 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2864 * target proc has died, @error is set to BR_DEAD_REPLY
2866 static struct binder_node
*binder_get_node_refs_for_txn(
2867 struct binder_node
*node
,
2868 struct binder_proc
**procp
,
2871 struct binder_node
*target_node
= NULL
;
2873 binder_node_inner_lock(node
);
2876 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2877 binder_inc_node_tmpref_ilocked(node
);
2878 node
->proc
->tmp_ref
++;
2879 *procp
= node
->proc
;
2881 *error
= BR_DEAD_REPLY
;
2882 binder_node_inner_unlock(node
);
2887 static void binder_transaction(struct binder_proc
*proc
,
2888 struct binder_thread
*thread
,
2889 struct binder_transaction_data
*tr
, int reply
,
2890 binder_size_t extra_buffers_size
)
2893 struct binder_transaction
*t
;
2894 struct binder_work
*tcomplete
;
2895 binder_size_t
*offp
, *off_end
, *off_start
;
2896 binder_size_t off_min
;
2897 u8
*sg_bufp
, *sg_buf_end
;
2898 struct binder_proc
*target_proc
= NULL
;
2899 struct binder_thread
*target_thread
= NULL
;
2900 struct binder_node
*target_node
= NULL
;
2901 struct binder_transaction
*in_reply_to
= NULL
;
2902 struct binder_transaction_log_entry
*e
;
2903 uint32_t return_error
= 0;
2904 uint32_t return_error_param
= 0;
2905 uint32_t return_error_line
= 0;
2906 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2907 binder_size_t last_fixup_min_off
= 0;
2908 struct binder_context
*context
= proc
->context
;
2909 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2910 char *secctx
= NULL
;
2913 e
= binder_transaction_log_add(&binder_transaction_log
);
2914 e
->debug_id
= t_debug_id
;
2915 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2916 e
->from_proc
= proc
->pid
;
2917 e
->from_thread
= thread
->pid
;
2918 e
->target_handle
= tr
->target
.handle
;
2919 e
->data_size
= tr
->data_size
;
2920 e
->offsets_size
= tr
->offsets_size
;
2921 e
->context_name
= proc
->context
->name
;
2924 binder_inner_proc_lock(proc
);
2925 in_reply_to
= thread
->transaction_stack
;
2926 if (in_reply_to
== NULL
) {
2927 binder_inner_proc_unlock(proc
);
2928 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2929 proc
->pid
, thread
->pid
);
2930 return_error
= BR_FAILED_REPLY
;
2931 return_error_param
= -EPROTO
;
2932 return_error_line
= __LINE__
;
2933 goto err_empty_call_stack
;
2935 if (in_reply_to
->to_thread
!= thread
) {
2936 spin_lock(&in_reply_to
->lock
);
2937 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2938 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2939 in_reply_to
->to_proc
?
2940 in_reply_to
->to_proc
->pid
: 0,
2941 in_reply_to
->to_thread
?
2942 in_reply_to
->to_thread
->pid
: 0);
2943 spin_unlock(&in_reply_to
->lock
);
2944 binder_inner_proc_unlock(proc
);
2945 return_error
= BR_FAILED_REPLY
;
2946 return_error_param
= -EPROTO
;
2947 return_error_line
= __LINE__
;
2949 goto err_bad_call_stack
;
2951 thread
->transaction_stack
= in_reply_to
->to_parent
;
2952 binder_inner_proc_unlock(proc
);
2953 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2954 if (target_thread
== NULL
) {
2955 return_error
= BR_DEAD_REPLY
;
2956 return_error_line
= __LINE__
;
2957 goto err_dead_binder
;
2959 if (target_thread
->transaction_stack
!= in_reply_to
) {
2960 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2961 proc
->pid
, thread
->pid
,
2962 target_thread
->transaction_stack
?
2963 target_thread
->transaction_stack
->debug_id
: 0,
2964 in_reply_to
->debug_id
);
2965 binder_inner_proc_unlock(target_thread
->proc
);
2966 return_error
= BR_FAILED_REPLY
;
2967 return_error_param
= -EPROTO
;
2968 return_error_line
= __LINE__
;
2970 target_thread
= NULL
;
2971 goto err_dead_binder
;
2973 target_proc
= target_thread
->proc
;
2974 target_proc
->tmp_ref
++;
2975 binder_inner_proc_unlock(target_thread
->proc
);
2977 if (tr
->target
.handle
) {
2978 struct binder_ref
*ref
;
2981 * There must already be a strong ref
2982 * on this node. If so, do a strong
2983 * increment on the node to ensure it
2984 * stays alive until the transaction is
2987 binder_proc_lock(proc
);
2988 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2991 target_node
= binder_get_node_refs_for_txn(
2992 ref
->node
, &target_proc
,
2995 binder_user_error("%d:%d got transaction to invalid handle\n",
2996 proc
->pid
, thread
->pid
);
2997 return_error
= BR_FAILED_REPLY
;
2999 binder_proc_unlock(proc
);
3001 mutex_lock(&context
->context_mgr_node_lock
);
3002 target_node
= context
->binder_context_mgr_node
;
3004 target_node
= binder_get_node_refs_for_txn(
3005 target_node
, &target_proc
,
3008 return_error
= BR_DEAD_REPLY
;
3009 mutex_unlock(&context
->context_mgr_node_lock
);
3010 if (target_node
&& target_proc
== proc
) {
3011 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3012 proc
->pid
, thread
->pid
);
3013 return_error
= BR_FAILED_REPLY
;
3014 return_error_param
= -EINVAL
;
3015 return_error_line
= __LINE__
;
3016 goto err_invalid_target_handle
;
3021 * return_error is set above
3023 return_error_param
= -EINVAL
;
3024 return_error_line
= __LINE__
;
3025 goto err_dead_binder
;
3027 e
->to_node
= target_node
->debug_id
;
3028 #ifdef CONFIG_SAMSUNG_FREECESS
3030 && (target_proc
->tsk
->cred
->euid
.val
> 10000)
3031 && (proc
->pid
!= target_proc
->pid
)) {
3032 binder_report(proc
->tsk
, target_proc
->tsk
, tr
->flags
& TF_ONE_WAY
);
3036 if (security_binder_transaction(proc
->tsk
,
3037 target_proc
->tsk
) < 0) {
3038 return_error
= BR_FAILED_REPLY
;
3039 return_error_param
= -EPERM
;
3040 return_error_line
= __LINE__
;
3041 goto err_invalid_target_handle
;
3043 binder_inner_proc_lock(proc
);
3044 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3045 struct binder_transaction
*tmp
;
3047 tmp
= thread
->transaction_stack
;
3048 if (tmp
->to_thread
!= thread
) {
3049 spin_lock(&tmp
->lock
);
3050 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3051 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3052 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3054 tmp
->to_thread
->pid
: 0);
3055 spin_unlock(&tmp
->lock
);
3056 binder_inner_proc_unlock(proc
);
3057 return_error
= BR_FAILED_REPLY
;
3058 return_error_param
= -EPROTO
;
3059 return_error_line
= __LINE__
;
3060 goto err_bad_call_stack
;
3063 struct binder_thread
*from
;
3065 spin_lock(&tmp
->lock
);
3067 if (from
&& from
->proc
== target_proc
) {
3068 atomic_inc(&from
->tmp_ref
);
3069 target_thread
= from
;
3070 spin_unlock(&tmp
->lock
);
3073 spin_unlock(&tmp
->lock
);
3074 tmp
= tmp
->from_parent
;
3077 binder_inner_proc_unlock(proc
);
3080 e
->to_thread
= target_thread
->pid
;
3081 e
->to_proc
= target_proc
->pid
;
3083 /* TODO: reuse incoming transaction for reply */
3084 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3086 return_error
= BR_FAILED_REPLY
;
3087 return_error_param
= -ENOMEM
;
3088 return_error_line
= __LINE__
;
3089 goto err_alloc_t_failed
;
3091 binder_stats_created(BINDER_STAT_TRANSACTION
);
3092 spin_lock_init(&t
->lock
);
3094 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3095 if (tcomplete
== NULL
) {
3096 return_error
= BR_FAILED_REPLY
;
3097 return_error_param
= -ENOMEM
;
3098 return_error_line
= __LINE__
;
3099 goto err_alloc_tcomplete_failed
;
3101 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3103 t
->debug_id
= t_debug_id
;
3106 binder_debug(BINDER_DEBUG_TRANSACTION
,
3107 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3108 proc
->pid
, thread
->pid
, t
->debug_id
,
3109 target_proc
->pid
, target_thread
->pid
,
3110 (u64
)tr
->data
.ptr
.buffer
,
3111 (u64
)tr
->data
.ptr
.offsets
,
3112 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3113 (u64
)extra_buffers_size
);
3115 binder_debug(BINDER_DEBUG_TRANSACTION
,
3116 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3117 proc
->pid
, thread
->pid
, t
->debug_id
,
3118 target_proc
->pid
, target_node
->debug_id
,
3119 (u64
)tr
->data
.ptr
.buffer
,
3120 (u64
)tr
->data
.ptr
.offsets
,
3121 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3122 (u64
)extra_buffers_size
);
3124 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3128 t
->sender_euid
= task_euid(proc
->tsk
);
3129 t
->to_proc
= target_proc
;
3130 t
->to_thread
= target_thread
;
3132 t
->flags
= tr
->flags
;
3133 if (!(t
->flags
& TF_ONE_WAY
) &&
3134 binder_supported_policy(current
->policy
)) {
3135 /* Inherit supported policies for synchronous transactions */
3136 t
->priority
.sched_policy
= current
->policy
;
3137 t
->priority
.prio
= current
->normal_prio
;
3139 /* Otherwise, fall back to the default priority */
3140 t
->priority
= target_proc
->default_priority
;
3142 if (target_node
&& target_node
->txn_security_ctx
) {
3145 security_task_getsecid(proc
->tsk
, &secid
);
3146 ret
= security_secid_to_secctx(secid
, &secctx
, &secctx_sz
);
3148 return_error
= BR_FAILED_REPLY
;
3149 return_error_param
= ret
;
3150 return_error_line
= __LINE__
;
3151 goto err_get_secctx_failed
;
3153 extra_buffers_size
+= ALIGN(secctx_sz
, sizeof(u64
));
3157 trace_binder_transaction(reply
, t
, target_node
);
3159 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3160 tr
->offsets_size
, extra_buffers_size
,
3161 !reply
&& (t
->flags
& TF_ONE_WAY
));
3162 if (IS_ERR(t
->buffer
)) {
3164 * -ESRCH indicates VMA cleared. The target is dying.
3166 return_error_param
= PTR_ERR(t
->buffer
);
3167 return_error
= return_error_param
== -ESRCH
?
3168 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3169 return_error_line
= __LINE__
;
3171 goto err_binder_alloc_buf_failed
;
3174 size_t buf_offset
= ALIGN(tr
->data_size
, sizeof(void *)) +
3175 ALIGN(tr
->offsets_size
, sizeof(void *)) +
3176 ALIGN(extra_buffers_size
, sizeof(void *)) -
3177 ALIGN(secctx_sz
, sizeof(u64
));
3178 char *kptr
= t
->buffer
->data
+ buf_offset
;
3180 t
->security_ctx
= (uintptr_t)kptr
+
3181 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
3182 memcpy(kptr
, secctx
, secctx_sz
);
3183 security_release_secctx(secctx
, secctx_sz
);
3187 t
->buffer
->debug_id
= t
->debug_id
;
3188 t
->buffer
->transaction
= t
;
3189 t
->buffer
->target_node
= target_node
;
3190 trace_binder_transaction_alloc_buf(t
->buffer
);
3191 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3192 ALIGN(tr
->data_size
, sizeof(void *)));
3195 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3196 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3197 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3198 proc
->pid
, thread
->pid
);
3199 return_error
= BR_FAILED_REPLY
;
3200 return_error_param
= -EFAULT
;
3201 return_error_line
= __LINE__
;
3202 goto err_copy_data_failed
;
3204 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3205 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3206 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3207 proc
->pid
, thread
->pid
);
3208 return_error
= BR_FAILED_REPLY
;
3209 return_error_param
= -EFAULT
;
3210 return_error_line
= __LINE__
;
3211 goto err_copy_data_failed
;
3213 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3214 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3215 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3216 return_error
= BR_FAILED_REPLY
;
3217 return_error_param
= -EINVAL
;
3218 return_error_line
= __LINE__
;
3219 goto err_bad_offset
;
3221 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3222 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3223 proc
->pid
, thread
->pid
,
3224 (u64
)extra_buffers_size
);
3225 return_error
= BR_FAILED_REPLY
;
3226 return_error_param
= -EINVAL
;
3227 return_error_line
= __LINE__
;
3228 goto err_bad_offset
;
3230 off_end
= (void *)off_start
+ tr
->offsets_size
;
3231 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3232 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3234 for (; offp
< off_end
; offp
++) {
3235 struct binder_object_header
*hdr
;
3236 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3238 if (object_size
== 0 || *offp
< off_min
) {
3239 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3240 proc
->pid
, thread
->pid
, (u64
)*offp
,
3242 (u64
)t
->buffer
->data_size
);
3243 return_error
= BR_FAILED_REPLY
;
3244 return_error_param
= -EINVAL
;
3245 return_error_line
= __LINE__
;
3246 goto err_bad_offset
;
3249 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3250 off_min
= *offp
+ object_size
;
3251 switch (hdr
->type
) {
3252 case BINDER_TYPE_BINDER
:
3253 case BINDER_TYPE_WEAK_BINDER
: {
3254 struct flat_binder_object
*fp
;
3256 fp
= to_flat_binder_object(hdr
);
3257 ret
= binder_translate_binder(fp
, t
, thread
);
3259 return_error
= BR_FAILED_REPLY
;
3260 return_error_param
= ret
;
3261 return_error_line
= __LINE__
;
3262 goto err_translate_failed
;
3265 case BINDER_TYPE_HANDLE
:
3266 case BINDER_TYPE_WEAK_HANDLE
: {
3267 struct flat_binder_object
*fp
;
3269 fp
= to_flat_binder_object(hdr
);
3270 ret
= binder_translate_handle(fp
, t
, thread
);
3272 return_error
= BR_FAILED_REPLY
;
3273 return_error_param
= ret
;
3274 return_error_line
= __LINE__
;
3275 goto err_translate_failed
;
3279 case BINDER_TYPE_FD
: {
3280 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3281 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3284 if (target_fd
< 0) {
3285 return_error
= BR_FAILED_REPLY
;
3286 return_error_param
= target_fd
;
3287 return_error_line
= __LINE__
;
3288 goto err_translate_failed
;
3293 case BINDER_TYPE_FDA
: {
3294 struct binder_fd_array_object
*fda
=
3295 to_binder_fd_array_object(hdr
);
3296 struct binder_buffer_object
*parent
=
3297 binder_validate_ptr(t
->buffer
, fda
->parent
,
3301 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3302 proc
->pid
, thread
->pid
);
3303 return_error
= BR_FAILED_REPLY
;
3304 return_error_param
= -EINVAL
;
3305 return_error_line
= __LINE__
;
3306 goto err_bad_parent
;
3308 if (!binder_validate_fixup(t
->buffer
, off_start
,
3309 parent
, fda
->parent_offset
,
3311 last_fixup_min_off
)) {
3312 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3313 proc
->pid
, thread
->pid
);
3314 return_error
= BR_FAILED_REPLY
;
3315 return_error_param
= -EINVAL
;
3316 return_error_line
= __LINE__
;
3317 goto err_bad_parent
;
3319 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3322 return_error
= BR_FAILED_REPLY
;
3323 return_error_param
= ret
;
3324 return_error_line
= __LINE__
;
3325 goto err_translate_failed
;
3327 last_fixup_obj
= parent
;
3328 last_fixup_min_off
=
3329 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3331 case BINDER_TYPE_PTR
: {
3332 struct binder_buffer_object
*bp
=
3333 to_binder_buffer_object(hdr
);
3334 size_t buf_left
= sg_buf_end
- sg_bufp
;
3336 if (bp
->length
> buf_left
) {
3337 binder_user_error("%d:%d got transaction with too large buffer\n",
3338 proc
->pid
, thread
->pid
);
3339 return_error
= BR_FAILED_REPLY
;
3340 return_error_param
= -EINVAL
;
3341 return_error_line
= __LINE__
;
3342 goto err_bad_offset
;
3344 if (copy_from_user(sg_bufp
,
3345 (const void __user
*)(uintptr_t)
3346 bp
->buffer
, bp
->length
)) {
3347 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3348 proc
->pid
, thread
->pid
);
3349 return_error_param
= -EFAULT
;
3350 return_error
= BR_FAILED_REPLY
;
3351 return_error_line
= __LINE__
;
3352 goto err_copy_data_failed
;
3354 /* Fixup buffer pointer to target proc address space */
3355 bp
->buffer
= (uintptr_t)sg_bufp
+
3356 binder_alloc_get_user_buffer_offset(
3357 &target_proc
->alloc
);
3358 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3360 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3363 last_fixup_min_off
);
3365 return_error
= BR_FAILED_REPLY
;
3366 return_error_param
= ret
;
3367 return_error_line
= __LINE__
;
3368 goto err_translate_failed
;
3370 last_fixup_obj
= bp
;
3371 last_fixup_min_off
= 0;
3374 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3375 proc
->pid
, thread
->pid
, hdr
->type
);
3376 return_error
= BR_FAILED_REPLY
;
3377 return_error_param
= -EINVAL
;
3378 return_error_line
= __LINE__
;
3379 goto err_bad_object_type
;
3382 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3383 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3386 binder_enqueue_thread_work(thread
, tcomplete
);
3387 binder_inner_proc_lock(target_proc
);
3388 if (target_thread
->is_dead
) {
3389 binder_inner_proc_unlock(target_proc
);
3390 goto err_dead_proc_or_thread
;
3392 BUG_ON(t
->buffer
->async_transaction
!= 0);
3393 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3394 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3395 binder_inner_proc_unlock(target_proc
);
3396 wake_up_interruptible_sync(&target_thread
->wait
);
3397 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3398 binder_free_transaction(in_reply_to
);
3399 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3400 BUG_ON(t
->buffer
->async_transaction
!= 0);
3401 binder_inner_proc_lock(proc
);
3403 * Defer the TRANSACTION_COMPLETE, so we don't return to
3404 * userspace immediately; this allows the target process to
3405 * immediately start processing this transaction, reducing
3406 * latency. We will then return the TRANSACTION_COMPLETE when
3407 * the target replies (or there is an error).
3409 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3411 t
->from_parent
= thread
->transaction_stack
;
3412 thread
->transaction_stack
= t
;
3413 binder_inner_proc_unlock(proc
);
3414 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3415 binder_inner_proc_lock(proc
);
3416 binder_pop_transaction_ilocked(thread
, t
);
3417 binder_inner_proc_unlock(proc
);
3418 goto err_dead_proc_or_thread
;
3421 BUG_ON(target_node
== NULL
);
3422 BUG_ON(t
->buffer
->async_transaction
!= 1);
3423 binder_enqueue_thread_work(thread
, tcomplete
);
3424 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3425 goto err_dead_proc_or_thread
;
3428 binder_thread_dec_tmpref(target_thread
);
3429 binder_proc_dec_tmpref(target_proc
);
3431 binder_dec_node_tmpref(target_node
);
3433 * write barrier to synchronize with initialization
3437 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3440 err_dead_proc_or_thread
:
3441 return_error
= BR_DEAD_REPLY
;
3442 return_error_line
= __LINE__
;
3443 binder_dequeue_work(proc
, tcomplete
);
3444 err_translate_failed
:
3445 err_bad_object_type
:
3448 err_copy_data_failed
:
3449 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3450 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3452 binder_dec_node_tmpref(target_node
);
3454 t
->buffer
->transaction
= NULL
;
3455 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3456 err_binder_alloc_buf_failed
:
3458 security_release_secctx(secctx
, secctx_sz
);
3459 err_get_secctx_failed
:
3461 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3462 err_alloc_tcomplete_failed
:
3464 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3467 err_empty_call_stack
:
3469 err_invalid_target_handle
:
3471 binder_thread_dec_tmpref(target_thread
);
3473 binder_proc_dec_tmpref(target_proc
);
3475 binder_dec_node(target_node
, 1, 0);
3476 binder_dec_node_tmpref(target_node
);
3479 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3480 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3481 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3482 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3486 struct binder_transaction_log_entry
*fe
;
3488 e
->return_error
= return_error
;
3489 e
->return_error_param
= return_error_param
;
3490 e
->return_error_line
= return_error_line
;
3491 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3494 * write barrier to synchronize with initialization
3498 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3499 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3502 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3504 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3505 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3506 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3507 binder_send_failed_reply(in_reply_to
, return_error
);
3509 thread
->return_error
.cmd
= return_error
;
3510 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3514 static int binder_thread_write(struct binder_proc
*proc
,
3515 struct binder_thread
*thread
,
3516 binder_uintptr_t binder_buffer
, size_t size
,
3517 binder_size_t
*consumed
)
3520 struct binder_context
*context
= proc
->context
;
3521 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3522 void __user
*ptr
= buffer
+ *consumed
;
3523 void __user
*end
= buffer
+ size
;
3525 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3528 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3530 ptr
+= sizeof(uint32_t);
3531 trace_binder_command(cmd
);
3532 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3533 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3534 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3535 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3543 const char *debug_string
;
3544 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3545 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3546 struct binder_ref_data rdata
;
3548 if (get_user(target
, (uint32_t __user
*)ptr
))
3551 ptr
+= sizeof(uint32_t);
3553 if (increment
&& !target
) {
3554 struct binder_node
*ctx_mgr_node
;
3555 mutex_lock(&context
->context_mgr_node_lock
);
3556 ctx_mgr_node
= context
->binder_context_mgr_node
;
3558 ret
= binder_inc_ref_for_node(
3560 strong
, NULL
, &rdata
);
3561 mutex_unlock(&context
->context_mgr_node_lock
);
3564 ret
= binder_update_ref_for_handle(
3565 proc
, target
, increment
, strong
,
3567 if (!ret
&& rdata
.desc
!= target
) {
3568 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3569 proc
->pid
, thread
->pid
,
3570 target
, rdata
.desc
);
3574 debug_string
= "IncRefs";
3577 debug_string
= "Acquire";
3580 debug_string
= "Release";
3584 debug_string
= "DecRefs";
3588 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3589 proc
->pid
, thread
->pid
, debug_string
,
3590 strong
, target
, ret
);
3593 binder_debug(BINDER_DEBUG_USER_REFS
,
3594 "%d:%d %s ref %d desc %d s %d w %d\n",
3595 proc
->pid
, thread
->pid
, debug_string
,
3596 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3600 case BC_INCREFS_DONE
:
3601 case BC_ACQUIRE_DONE
: {
3602 binder_uintptr_t node_ptr
;
3603 binder_uintptr_t cookie
;
3604 struct binder_node
*node
;
3607 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3609 ptr
+= sizeof(binder_uintptr_t
);
3610 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3612 ptr
+= sizeof(binder_uintptr_t
);
3613 node
= binder_get_node(proc
, node_ptr
);
3615 binder_user_error("%d:%d %s u%016llx no match\n",
3616 proc
->pid
, thread
->pid
,
3617 cmd
== BC_INCREFS_DONE
?
3623 if (cookie
!= node
->cookie
) {
3624 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3625 proc
->pid
, thread
->pid
,
3626 cmd
== BC_INCREFS_DONE
?
3627 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3628 (u64
)node_ptr
, node
->debug_id
,
3629 (u64
)cookie
, (u64
)node
->cookie
);
3630 binder_put_node(node
);
3633 binder_node_inner_lock(node
);
3634 if (cmd
== BC_ACQUIRE_DONE
) {
3635 if (node
->pending_strong_ref
== 0) {
3636 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3637 proc
->pid
, thread
->pid
,
3639 binder_node_inner_unlock(node
);
3640 binder_put_node(node
);
3643 node
->pending_strong_ref
= 0;
3645 if (node
->pending_weak_ref
== 0) {
3646 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3647 proc
->pid
, thread
->pid
,
3649 binder_node_inner_unlock(node
);
3650 binder_put_node(node
);
3653 node
->pending_weak_ref
= 0;
3655 free_node
= binder_dec_node_nilocked(node
,
3656 cmd
== BC_ACQUIRE_DONE
, 0);
3658 binder_debug(BINDER_DEBUG_USER_REFS
,
3659 "%d:%d %s node %d ls %d lw %d tr %d\n",
3660 proc
->pid
, thread
->pid
,
3661 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3662 node
->debug_id
, node
->local_strong_refs
,
3663 node
->local_weak_refs
, node
->tmp_refs
);
3664 binder_node_inner_unlock(node
);
3665 binder_put_node(node
);
3668 case BC_ATTEMPT_ACQUIRE
:
3669 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3671 case BC_ACQUIRE_RESULT
:
3672 pr_err("BC_ACQUIRE_RESULT not supported\n");
3675 case BC_FREE_BUFFER
: {
3676 binder_uintptr_t data_ptr
;
3677 struct binder_buffer
*buffer
;
3679 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3681 ptr
+= sizeof(binder_uintptr_t
);
3683 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3685 if (IS_ERR_OR_NULL(buffer
)) {
3686 if (PTR_ERR(buffer
) == -EPERM
) {
3688 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3689 proc
->pid
, thread
->pid
,
3693 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3694 proc
->pid
, thread
->pid
,
3699 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3700 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3701 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3703 buffer
->transaction
? "active" : "finished");
3705 if (buffer
->transaction
) {
3706 buffer
->transaction
->buffer
= NULL
;
3707 buffer
->transaction
= NULL
;
3709 if (buffer
->async_transaction
&& buffer
->target_node
) {
3710 struct binder_node
*buf_node
;
3711 struct binder_work
*w
;
3713 buf_node
= buffer
->target_node
;
3714 binder_node_inner_lock(buf_node
);
3715 BUG_ON(!buf_node
->has_async_transaction
);
3716 BUG_ON(buf_node
->proc
!= proc
);
3717 w
= binder_dequeue_work_head_ilocked(
3718 &buf_node
->async_todo
);
3720 buf_node
->has_async_transaction
= false;
3722 binder_enqueue_work_ilocked(
3724 binder_wakeup_proc_ilocked(proc
);
3726 binder_node_inner_unlock(buf_node
);
3728 trace_binder_transaction_buffer_release(buffer
);
3729 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3730 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3734 case BC_TRANSACTION_SG
:
3736 struct binder_transaction_data_sg tr
;
3738 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3741 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3742 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3745 case BC_TRANSACTION
:
3747 struct binder_transaction_data tr
;
3749 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3752 binder_transaction(proc
, thread
, &tr
,
3753 cmd
== BC_REPLY
, 0);
3757 case BC_REGISTER_LOOPER
:
3758 binder_debug(BINDER_DEBUG_THREADS
,
3759 "%d:%d BC_REGISTER_LOOPER\n",
3760 proc
->pid
, thread
->pid
);
3761 binder_inner_proc_lock(proc
);
3762 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3763 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3764 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3765 proc
->pid
, thread
->pid
);
3766 } else if (proc
->requested_threads
== 0) {
3767 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3768 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3769 proc
->pid
, thread
->pid
);
3771 proc
->requested_threads
--;
3772 proc
->requested_threads_started
++;
3774 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3775 binder_inner_proc_unlock(proc
);
3777 case BC_ENTER_LOOPER
:
3778 binder_debug(BINDER_DEBUG_THREADS
,
3779 "%d:%d BC_ENTER_LOOPER\n",
3780 proc
->pid
, thread
->pid
);
3781 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3782 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3783 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3784 proc
->pid
, thread
->pid
);
3786 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3788 case BC_EXIT_LOOPER
:
3789 binder_debug(BINDER_DEBUG_THREADS
,
3790 "%d:%d BC_EXIT_LOOPER\n",
3791 proc
->pid
, thread
->pid
);
3792 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3795 case BC_REQUEST_DEATH_NOTIFICATION
:
3796 case BC_CLEAR_DEATH_NOTIFICATION
: {
3798 binder_uintptr_t cookie
;
3799 struct binder_ref
*ref
;
3800 struct binder_ref_death
*death
= NULL
;
3802 if (get_user(target
, (uint32_t __user
*)ptr
))
3804 ptr
+= sizeof(uint32_t);
3805 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3807 ptr
+= sizeof(binder_uintptr_t
);
3808 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3810 * Allocate memory for death notification
3811 * before taking lock
3813 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3814 if (death
== NULL
) {
3815 WARN_ON(thread
->return_error
.cmd
!=
3817 thread
->return_error
.cmd
= BR_ERROR
;
3818 binder_enqueue_thread_work(
3820 &thread
->return_error
.work
);
3822 BINDER_DEBUG_FAILED_TRANSACTION
,
3823 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3824 proc
->pid
, thread
->pid
);
3828 binder_proc_lock(proc
);
3829 ref
= binder_get_ref_olocked(proc
, target
, false);
3831 binder_user_error("%d:%d %s invalid ref %d\n",
3832 proc
->pid
, thread
->pid
,
3833 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3834 "BC_REQUEST_DEATH_NOTIFICATION" :
3835 "BC_CLEAR_DEATH_NOTIFICATION",
3837 binder_proc_unlock(proc
);
3842 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3843 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3844 proc
->pid
, thread
->pid
,
3845 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3846 "BC_REQUEST_DEATH_NOTIFICATION" :
3847 "BC_CLEAR_DEATH_NOTIFICATION",
3848 (u64
)cookie
, ref
->data
.debug_id
,
3849 ref
->data
.desc
, ref
->data
.strong
,
3850 ref
->data
.weak
, ref
->node
->debug_id
);
3852 binder_node_lock(ref
->node
);
3853 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3855 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3856 proc
->pid
, thread
->pid
);
3857 binder_node_unlock(ref
->node
);
3858 binder_proc_unlock(proc
);
3862 binder_stats_created(BINDER_STAT_DEATH
);
3863 INIT_LIST_HEAD(&death
->work
.entry
);
3864 death
->cookie
= cookie
;
3866 if (ref
->node
->proc
== NULL
) {
3867 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3869 binder_inner_proc_lock(proc
);
3870 binder_enqueue_work_ilocked(
3871 &ref
->death
->work
, &proc
->todo
);
3872 binder_wakeup_proc_ilocked(proc
);
3873 binder_inner_proc_unlock(proc
);
3876 if (ref
->death
== NULL
) {
3877 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3878 proc
->pid
, thread
->pid
);
3879 binder_node_unlock(ref
->node
);
3880 binder_proc_unlock(proc
);
3884 if (death
->cookie
!= cookie
) {
3885 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3886 proc
->pid
, thread
->pid
,
3889 binder_node_unlock(ref
->node
);
3890 binder_proc_unlock(proc
);
3894 binder_inner_proc_lock(proc
);
3895 if (list_empty(&death
->work
.entry
)) {
3896 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3897 if (thread
->looper
&
3898 (BINDER_LOOPER_STATE_REGISTERED
|
3899 BINDER_LOOPER_STATE_ENTERED
))
3900 binder_enqueue_thread_work_ilocked(
3904 binder_enqueue_work_ilocked(
3907 binder_wakeup_proc_ilocked(
3911 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3912 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3914 binder_inner_proc_unlock(proc
);
3916 binder_node_unlock(ref
->node
);
3917 binder_proc_unlock(proc
);
3919 case BC_DEAD_BINDER_DONE
: {
3920 struct binder_work
*w
;
3921 binder_uintptr_t cookie
;
3922 struct binder_ref_death
*death
= NULL
;
3924 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3927 ptr
+= sizeof(cookie
);
3928 binder_inner_proc_lock(proc
);
3929 list_for_each_entry(w
, &proc
->delivered_death
,
3931 struct binder_ref_death
*tmp_death
=
3933 struct binder_ref_death
,
3936 if (tmp_death
->cookie
== cookie
) {
3941 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3942 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3943 proc
->pid
, thread
->pid
, (u64
)cookie
,
3945 if (death
== NULL
) {
3946 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3947 proc
->pid
, thread
->pid
, (u64
)cookie
);
3948 binder_inner_proc_unlock(proc
);
3951 binder_dequeue_work_ilocked(&death
->work
);
3952 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3953 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3954 if (thread
->looper
&
3955 (BINDER_LOOPER_STATE_REGISTERED
|
3956 BINDER_LOOPER_STATE_ENTERED
))
3957 binder_enqueue_thread_work_ilocked(
3958 thread
, &death
->work
);
3960 binder_enqueue_work_ilocked(
3963 binder_wakeup_proc_ilocked(proc
);
3966 binder_inner_proc_unlock(proc
);
3970 pr_err("%d:%d unknown command %d\n",
3971 proc
->pid
, thread
->pid
, cmd
);
3974 *consumed
= ptr
- buffer
;
3979 static void binder_stat_br(struct binder_proc
*proc
,
3980 struct binder_thread
*thread
, uint32_t cmd
)
3982 trace_binder_return(cmd
);
3983 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3984 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3985 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3986 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3990 static int binder_put_node_cmd(struct binder_proc
*proc
,
3991 struct binder_thread
*thread
,
3993 binder_uintptr_t node_ptr
,
3994 binder_uintptr_t node_cookie
,
3996 uint32_t cmd
, const char *cmd_name
)
3998 void __user
*ptr
= *ptrp
;
4000 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4002 ptr
+= sizeof(uint32_t);
4004 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
4006 ptr
+= sizeof(binder_uintptr_t
);
4008 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
4010 ptr
+= sizeof(binder_uintptr_t
);
4012 binder_stat_br(proc
, thread
, cmd
);
4013 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
4014 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
4015 (u64
)node_ptr
, (u64
)node_cookie
);
4021 static int binder_wait_for_work(struct binder_thread
*thread
,
4025 struct binder_proc
*proc
= thread
->proc
;
4028 freezer_do_not_count();
4029 binder_inner_proc_lock(proc
);
4031 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
4032 if (binder_has_work_ilocked(thread
, do_proc_work
))
4035 list_add(&thread
->waiting_thread_node
,
4036 &proc
->waiting_threads
);
4037 binder_inner_proc_unlock(proc
);
4039 binder_inner_proc_lock(proc
);
4040 list_del_init(&thread
->waiting_thread_node
);
4041 if (signal_pending(current
)) {
4046 finish_wait(&thread
->wait
, &wait
);
4047 binder_inner_proc_unlock(proc
);
4053 static int binder_thread_read(struct binder_proc
*proc
,
4054 struct binder_thread
*thread
,
4055 binder_uintptr_t binder_buffer
, size_t size
,
4056 binder_size_t
*consumed
, int non_block
)
4058 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4059 void __user
*ptr
= buffer
+ *consumed
;
4060 void __user
*end
= buffer
+ size
;
4063 int wait_for_proc_work
;
4065 if (*consumed
== 0) {
4066 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4068 ptr
+= sizeof(uint32_t);
4072 binder_inner_proc_lock(proc
);
4073 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4074 binder_inner_proc_unlock(proc
);
4076 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4078 trace_binder_wait_for_work(wait_for_proc_work
,
4079 !!thread
->transaction_stack
,
4080 !binder_worklist_empty(proc
, &thread
->todo
));
4081 if (wait_for_proc_work
) {
4082 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4083 BINDER_LOOPER_STATE_ENTERED
))) {
4084 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4085 proc
->pid
, thread
->pid
, thread
->looper
);
4086 wait_event_interruptible(binder_user_error_wait
,
4087 binder_stop_on_user_error
< 2);
4089 binder_restore_priority(current
, proc
->default_priority
);
4093 if (!binder_has_work(thread
, wait_for_proc_work
))
4096 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4099 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4106 struct binder_transaction_data_secctx tr
;
4107 struct binder_transaction_data
*trd
= &tr
.transaction_data
;
4109 struct binder_work
*w
= NULL
;
4110 struct list_head
*list
= NULL
;
4111 struct binder_transaction
*t
= NULL
;
4112 struct binder_thread
*t_from
;
4113 size_t trsize
= sizeof(*trd
);
4115 binder_inner_proc_lock(proc
);
4116 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4117 list
= &thread
->todo
;
4118 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4122 binder_inner_proc_unlock(proc
);
4125 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4130 if (end
- ptr
< sizeof(tr
) + 4) {
4131 binder_inner_proc_unlock(proc
);
4134 w
= binder_dequeue_work_head_ilocked(list
);
4135 if (binder_worklist_empty_ilocked(&thread
->todo
))
4136 thread
->process_todo
= false;
4139 case BINDER_WORK_TRANSACTION
: {
4140 binder_inner_proc_unlock(proc
);
4141 t
= container_of(w
, struct binder_transaction
, work
);
4143 case BINDER_WORK_RETURN_ERROR
: {
4144 struct binder_error
*e
= container_of(
4145 w
, struct binder_error
, work
);
4147 WARN_ON(e
->cmd
== BR_OK
);
4148 binder_inner_proc_unlock(proc
);
4149 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4153 ptr
+= sizeof(uint32_t);
4155 binder_stat_br(proc
, thread
, cmd
);
4157 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4158 binder_inner_proc_unlock(proc
);
4159 cmd
= BR_TRANSACTION_COMPLETE
;
4160 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4162 ptr
+= sizeof(uint32_t);
4164 binder_stat_br(proc
, thread
, cmd
);
4165 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4166 "%d:%d BR_TRANSACTION_COMPLETE\n",
4167 proc
->pid
, thread
->pid
);
4169 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4171 case BINDER_WORK_NODE
: {
4172 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4174 binder_uintptr_t node_ptr
= node
->ptr
;
4175 binder_uintptr_t node_cookie
= node
->cookie
;
4176 int node_debug_id
= node
->debug_id
;
4179 void __user
*orig_ptr
= ptr
;
4181 BUG_ON(proc
!= node
->proc
);
4182 strong
= node
->internal_strong_refs
||
4183 node
->local_strong_refs
;
4184 weak
= !hlist_empty(&node
->refs
) ||
4185 node
->local_weak_refs
||
4186 node
->tmp_refs
|| strong
;
4187 has_strong_ref
= node
->has_strong_ref
;
4188 has_weak_ref
= node
->has_weak_ref
;
4190 if (weak
&& !has_weak_ref
) {
4191 node
->has_weak_ref
= 1;
4192 node
->pending_weak_ref
= 1;
4193 node
->local_weak_refs
++;
4195 if (strong
&& !has_strong_ref
) {
4196 node
->has_strong_ref
= 1;
4197 node
->pending_strong_ref
= 1;
4198 node
->local_strong_refs
++;
4200 if (!strong
&& has_strong_ref
)
4201 node
->has_strong_ref
= 0;
4202 if (!weak
&& has_weak_ref
)
4203 node
->has_weak_ref
= 0;
4204 if (!weak
&& !strong
) {
4205 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4206 "%d:%d node %d u%016llx c%016llx deleted\n",
4207 proc
->pid
, thread
->pid
,
4211 rb_erase(&node
->rb_node
, &proc
->nodes
);
4212 binder_inner_proc_unlock(proc
);
4213 binder_node_lock(node
);
4215 * Acquire the node lock before freeing the
4216 * node to serialize with other threads that
4217 * may have been holding the node lock while
4218 * decrementing this node (avoids race where
4219 * this thread frees while the other thread
4220 * is unlocking the node after the final
4223 binder_node_unlock(node
);
4224 binder_free_node(node
);
4226 binder_inner_proc_unlock(proc
);
4228 if (weak
&& !has_weak_ref
)
4229 ret
= binder_put_node_cmd(
4230 proc
, thread
, &ptr
, node_ptr
,
4231 node_cookie
, node_debug_id
,
4232 BR_INCREFS
, "BR_INCREFS");
4233 if (!ret
&& strong
&& !has_strong_ref
)
4234 ret
= binder_put_node_cmd(
4235 proc
, thread
, &ptr
, node_ptr
,
4236 node_cookie
, node_debug_id
,
4237 BR_ACQUIRE
, "BR_ACQUIRE");
4238 if (!ret
&& !strong
&& has_strong_ref
)
4239 ret
= binder_put_node_cmd(
4240 proc
, thread
, &ptr
, node_ptr
,
4241 node_cookie
, node_debug_id
,
4242 BR_RELEASE
, "BR_RELEASE");
4243 if (!ret
&& !weak
&& has_weak_ref
)
4244 ret
= binder_put_node_cmd(
4245 proc
, thread
, &ptr
, node_ptr
,
4246 node_cookie
, node_debug_id
,
4247 BR_DECREFS
, "BR_DECREFS");
4248 if (orig_ptr
== ptr
)
4249 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4250 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4251 proc
->pid
, thread
->pid
,
4258 case BINDER_WORK_DEAD_BINDER
:
4259 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4260 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4261 struct binder_ref_death
*death
;
4263 binder_uintptr_t cookie
;
4265 death
= container_of(w
, struct binder_ref_death
, work
);
4266 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4267 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4269 cmd
= BR_DEAD_BINDER
;
4270 cookie
= death
->cookie
;
4272 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4273 "%d:%d %s %016llx\n",
4274 proc
->pid
, thread
->pid
,
4275 cmd
== BR_DEAD_BINDER
?
4277 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4279 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4280 binder_inner_proc_unlock(proc
);
4282 binder_stats_deleted(BINDER_STAT_DEATH
);
4284 binder_enqueue_work_ilocked(
4285 w
, &proc
->delivered_death
);
4286 binder_inner_proc_unlock(proc
);
4288 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4290 ptr
+= sizeof(uint32_t);
4291 if (put_user(cookie
,
4292 (binder_uintptr_t __user
*)ptr
))
4294 ptr
+= sizeof(binder_uintptr_t
);
4295 binder_stat_br(proc
, thread
, cmd
);
4296 if (cmd
== BR_DEAD_BINDER
)
4297 goto done
; /* DEAD_BINDER notifications can cause transactions */
4304 BUG_ON(t
->buffer
== NULL
);
4305 if (t
->buffer
->target_node
) {
4306 struct binder_node
*target_node
= t
->buffer
->target_node
;
4307 struct binder_priority node_prio
;
4309 trd
->target
.ptr
= target_node
->ptr
;
4310 trd
->cookie
= target_node
->cookie
;
4312 node_prio
.sched_policy
= target_node
->sched_policy
;
4313 node_prio
.prio
= target_node
->min_priority
;
4314 binder_transaction_priority(current
, t
, node_prio
,
4315 target_node
->inherit_rt
);
4316 cmd
= BR_TRANSACTION
;
4318 trd
->target
.ptr
= 0;
4322 trd
->code
= t
->code
;
4323 trd
->flags
= t
->flags
;
4324 trd
->sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4326 t_from
= binder_get_txn_from(t
);
4328 struct task_struct
*sender
= t_from
->proc
->tsk
;
4330 trd
->sender_pid
= task_tgid_nr_ns(sender
,
4331 task_active_pid_ns(current
));
4333 trd
->sender_pid
= 0;
4336 trd
->data_size
= t
->buffer
->data_size
;
4337 trd
->offsets_size
= t
->buffer
->offsets_size
;
4338 trd
->data
.ptr
.buffer
= (binder_uintptr_t
)
4339 ((uintptr_t)t
->buffer
->data
+
4340 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4341 trd
->data
.ptr
.offsets
= trd
->data
.ptr
.buffer
+
4342 ALIGN(t
->buffer
->data_size
,
4344 tr
.secctx
= t
->security_ctx
;
4345 if (t
->security_ctx
) {
4346 cmd
= BR_TRANSACTION_SEC_CTX
;
4347 trsize
= sizeof(tr
);
4351 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4353 binder_thread_dec_tmpref(t_from
);
4355 binder_cleanup_transaction(t
, "put_user failed",
4360 ptr
+= sizeof(uint32_t);
4361 if (copy_to_user(ptr
, &tr
, trsize
)) {
4363 binder_thread_dec_tmpref(t_from
);
4365 binder_cleanup_transaction(t
, "copy_to_user failed",
4372 trace_binder_transaction_received(t
);
4373 binder_stat_br(proc
, thread
, cmd
);
4374 binder_debug(BINDER_DEBUG_TRANSACTION
,
4375 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4376 proc
->pid
, thread
->pid
,
4377 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4378 (cmd
== BR_TRANSACTION_SEC_CTX
) ?
4379 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4380 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4381 t_from
? t_from
->pid
: 0, cmd
,
4382 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4383 (u64
)trd
->data
.ptr
.buffer
,
4384 (u64
)trd
->data
.ptr
.offsets
);
4387 binder_thread_dec_tmpref(t_from
);
4388 t
->buffer
->allow_user_free
= 1;
4389 if (cmd
!= BR_REPLY
&& !(t
->flags
& TF_ONE_WAY
)) {
4390 binder_inner_proc_lock(thread
->proc
);
4391 t
->to_parent
= thread
->transaction_stack
;
4392 t
->to_thread
= thread
;
4393 thread
->transaction_stack
= t
;
4394 binder_inner_proc_unlock(thread
->proc
);
4396 binder_free_transaction(t
);
4403 *consumed
= ptr
- buffer
;
4404 binder_inner_proc_lock(proc
);
4405 if (proc
->requested_threads
== 0 &&
4406 list_empty(&thread
->proc
->waiting_threads
) &&
4407 proc
->requested_threads_started
< proc
->max_threads
&&
4408 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4409 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4410 /*spawn a new thread if we leave this out */) {
4411 proc
->requested_threads
++;
4412 binder_inner_proc_unlock(proc
);
4413 binder_debug(BINDER_DEBUG_THREADS
,
4414 "%d:%d BR_SPAWN_LOOPER\n",
4415 proc
->pid
, thread
->pid
);
4416 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4418 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4420 binder_inner_proc_unlock(proc
);
4424 static void binder_release_work(struct binder_proc
*proc
,
4425 struct list_head
*list
)
4427 struct binder_work
*w
;
4430 w
= binder_dequeue_work_head(proc
, list
);
4435 case BINDER_WORK_TRANSACTION
: {
4436 struct binder_transaction
*t
;
4438 t
= container_of(w
, struct binder_transaction
, work
);
4440 binder_cleanup_transaction(t
, "process died.",
4443 case BINDER_WORK_RETURN_ERROR
: {
4444 struct binder_error
*e
= container_of(
4445 w
, struct binder_error
, work
);
4447 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4448 "undelivered TRANSACTION_ERROR: %u\n",
4451 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4452 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4453 "undelivered TRANSACTION_COMPLETE\n");
4455 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4457 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4458 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4459 struct binder_ref_death
*death
;
4461 death
= container_of(w
, struct binder_ref_death
, work
);
4462 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4463 "undelivered death notification, %016llx\n",
4464 (u64
)death
->cookie
);
4466 binder_stats_deleted(BINDER_STAT_DEATH
);
4469 pr_err("unexpected work type, %d, not freed\n",
4477 static struct binder_thread
*binder_get_thread_ilocked(
4478 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4480 struct binder_thread
*thread
= NULL
;
4481 struct rb_node
*parent
= NULL
;
4482 struct rb_node
**p
= &proc
->threads
.rb_node
;
4486 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4488 if (current
->pid
< thread
->pid
)
4490 else if (current
->pid
> thread
->pid
)
4491 p
= &(*p
)->rb_right
;
4497 thread
= new_thread
;
4498 binder_stats_created(BINDER_STAT_THREAD
);
4499 thread
->proc
= proc
;
4500 thread
->pid
= current
->pid
;
4501 get_task_struct(current
);
4502 thread
->task
= current
;
4503 atomic_set(&thread
->tmp_ref
, 0);
4504 init_waitqueue_head(&thread
->wait
);
4505 INIT_LIST_HEAD(&thread
->todo
);
4506 rb_link_node(&thread
->rb_node
, parent
, p
);
4507 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4508 thread
->looper_need_return
= true;
4509 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4510 thread
->return_error
.cmd
= BR_OK
;
4511 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4512 thread
->reply_error
.cmd
= BR_OK
;
4513 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4517 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4519 struct binder_thread
*thread
;
4520 struct binder_thread
*new_thread
;
4522 binder_inner_proc_lock(proc
);
4523 thread
= binder_get_thread_ilocked(proc
, NULL
);
4524 binder_inner_proc_unlock(proc
);
4526 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4527 if (new_thread
== NULL
)
4529 binder_inner_proc_lock(proc
);
4530 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4531 binder_inner_proc_unlock(proc
);
4532 if (thread
!= new_thread
)
4538 static void binder_free_proc(struct binder_proc
*proc
)
4540 BUG_ON(!list_empty(&proc
->todo
));
4541 BUG_ON(!list_empty(&proc
->delivered_death
));
4542 binder_alloc_deferred_release(&proc
->alloc
);
4543 put_task_struct(proc
->tsk
);
4544 binder_stats_deleted(BINDER_STAT_PROC
);
4548 static void binder_free_thread(struct binder_thread
*thread
)
4550 BUG_ON(!list_empty(&thread
->todo
));
4551 binder_stats_deleted(BINDER_STAT_THREAD
);
4552 binder_proc_dec_tmpref(thread
->proc
);
4553 put_task_struct(thread
->task
);
4557 static int binder_thread_release(struct binder_proc
*proc
,
4558 struct binder_thread
*thread
)
4560 struct binder_transaction
*t
;
4561 struct binder_transaction
*send_reply
= NULL
;
4562 int active_transactions
= 0;
4563 struct binder_transaction
*last_t
= NULL
;
4565 binder_inner_proc_lock(thread
->proc
);
4567 * take a ref on the proc so it survives
4568 * after we remove this thread from proc->threads.
4569 * The corresponding dec is when we actually
4570 * free the thread in binder_free_thread()
4574 * take a ref on this thread to ensure it
4575 * survives while we are releasing it
4577 atomic_inc(&thread
->tmp_ref
);
4578 rb_erase(&thread
->rb_node
, &proc
->threads
);
4579 t
= thread
->transaction_stack
;
4581 spin_lock(&t
->lock
);
4582 if (t
->to_thread
== thread
)
4585 thread
->is_dead
= true;
4589 active_transactions
++;
4590 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4591 "release %d:%d transaction %d %s, still active\n",
4592 proc
->pid
, thread
->pid
,
4594 (t
->to_thread
== thread
) ? "in" : "out");
4596 if (t
->to_thread
== thread
) {
4598 t
->to_thread
= NULL
;
4600 t
->buffer
->transaction
= NULL
;
4604 } else if (t
->from
== thread
) {
4609 spin_unlock(&last_t
->lock
);
4611 spin_lock(&t
->lock
);
4615 * If this thread used poll, make sure we remove the waitqueue
4616 * from any epoll data structures holding it with POLLFREE.
4617 * waitqueue_active() is safe to use here because we're holding
4620 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4621 waitqueue_active(&thread
->wait
)) {
4622 wake_up_poll(&thread
->wait
, POLLHUP
| POLLFREE
);
4625 binder_inner_proc_unlock(thread
->proc
);
4628 * This is needed to avoid races between wake_up_poll() above and
4629 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4630 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4631 * lock, so we can be sure it's done after calling synchronize_rcu().
4633 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4637 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4638 binder_release_work(proc
, &thread
->todo
);
4639 binder_thread_dec_tmpref(thread
);
4640 return active_transactions
;
4643 static unsigned int binder_poll(struct file
*filp
,
4644 struct poll_table_struct
*wait
)
4646 struct binder_proc
*proc
= filp
->private_data
;
4647 struct binder_thread
*thread
= NULL
;
4648 bool wait_for_proc_work
;
4650 thread
= binder_get_thread(proc
);
4654 binder_inner_proc_lock(thread
->proc
);
4655 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4656 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4658 binder_inner_proc_unlock(thread
->proc
);
4660 poll_wait(filp
, &thread
->wait
, wait
);
4662 if (binder_has_work(thread
, wait_for_proc_work
))
4668 static int binder_ioctl_write_read(struct file
*filp
,
4669 unsigned int cmd
, unsigned long arg
,
4670 struct binder_thread
*thread
)
4673 struct binder_proc
*proc
= filp
->private_data
;
4674 unsigned int size
= _IOC_SIZE(cmd
);
4675 void __user
*ubuf
= (void __user
*)arg
;
4676 struct binder_write_read bwr
;
4678 if (size
!= sizeof(struct binder_write_read
)) {
4682 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4686 binder_debug(BINDER_DEBUG_READ_WRITE
,
4687 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4688 proc
->pid
, thread
->pid
,
4689 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4690 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4692 if (bwr
.write_size
> 0) {
4693 ret
= binder_thread_write(proc
, thread
,
4696 &bwr
.write_consumed
);
4697 trace_binder_write_done(ret
);
4699 bwr
.read_consumed
= 0;
4700 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4705 if (bwr
.read_size
> 0) {
4706 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4709 filp
->f_flags
& O_NONBLOCK
);
4710 trace_binder_read_done(ret
);
4711 binder_inner_proc_lock(proc
);
4712 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4713 binder_wakeup_proc_ilocked(proc
);
4714 binder_inner_proc_unlock(proc
);
4716 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4721 binder_debug(BINDER_DEBUG_READ_WRITE
,
4722 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4723 proc
->pid
, thread
->pid
,
4724 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4725 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4726 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4734 static int binder_ioctl_set_ctx_mgr(struct file
*filp
,
4735 struct flat_binder_object
*fbo
)
4738 struct binder_proc
*proc
= filp
->private_data
;
4739 struct binder_context
*context
= proc
->context
;
4740 struct binder_node
*new_node
;
4741 kuid_t curr_euid
= current_euid();
4743 mutex_lock(&context
->context_mgr_node_lock
);
4744 if (context
->binder_context_mgr_node
) {
4745 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4749 ret
= security_binder_set_context_mgr(proc
->tsk
);
4752 if (uid_valid(context
->binder_context_mgr_uid
)) {
4753 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4754 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4755 from_kuid(&init_user_ns
, curr_euid
),
4756 from_kuid(&init_user_ns
,
4757 context
->binder_context_mgr_uid
));
4762 context
->binder_context_mgr_uid
= curr_euid
;
4764 new_node
= binder_new_node(proc
, fbo
);
4769 binder_node_lock(new_node
);
4770 new_node
->local_weak_refs
++;
4771 new_node
->local_strong_refs
++;
4772 new_node
->has_strong_ref
= 1;
4773 new_node
->has_weak_ref
= 1;
4774 context
->binder_context_mgr_node
= new_node
;
4775 binder_node_unlock(new_node
);
4776 binder_put_node(new_node
);
4778 mutex_unlock(&context
->context_mgr_node_lock
);
4782 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4783 struct binder_node_debug_info
*info
)
4786 binder_uintptr_t ptr
= info
->ptr
;
4788 memset(info
, 0, sizeof(*info
));
4790 binder_inner_proc_lock(proc
);
4791 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4792 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4794 if (node
->ptr
> ptr
) {
4795 info
->ptr
= node
->ptr
;
4796 info
->cookie
= node
->cookie
;
4797 info
->has_strong_ref
= node
->has_strong_ref
;
4798 info
->has_weak_ref
= node
->has_weak_ref
;
4802 binder_inner_proc_unlock(proc
);
4807 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4810 struct binder_proc
*proc
= filp
->private_data
;
4811 struct binder_thread
*thread
;
4812 unsigned int size
= _IOC_SIZE(cmd
);
4813 void __user
*ubuf
= (void __user
*)arg
;
4815 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4816 proc->pid, current->pid, cmd, arg);*/
4818 binder_selftest_alloc(&proc
->alloc
);
4820 trace_binder_ioctl(cmd
, arg
);
4822 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4826 thread
= binder_get_thread(proc
);
4827 if (thread
== NULL
) {
4833 case BINDER_WRITE_READ
:
4834 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4838 case BINDER_SET_MAX_THREADS
: {
4841 if (copy_from_user(&max_threads
, ubuf
,
4842 sizeof(max_threads
))) {
4846 binder_inner_proc_lock(proc
);
4847 proc
->max_threads
= max_threads
;
4848 binder_inner_proc_unlock(proc
);
4851 case BINDER_SET_CONTEXT_MGR_EXT
: {
4852 struct flat_binder_object fbo
;
4854 if (copy_from_user(&fbo
, ubuf
, sizeof(fbo
))) {
4858 ret
= binder_ioctl_set_ctx_mgr(filp
, &fbo
);
4864 case BINDER_SET_CONTEXT_MGR
:
4865 ret
= binder_ioctl_set_ctx_mgr(filp
, NULL
);
4869 case BINDER_THREAD_EXIT
:
4870 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4871 proc
->pid
, thread
->pid
);
4872 binder_thread_release(proc
, thread
);
4875 case BINDER_VERSION
: {
4876 struct binder_version __user
*ver
= ubuf
;
4878 if (size
!= sizeof(struct binder_version
)) {
4882 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4883 &ver
->protocol_version
)) {
4889 case BINDER_GET_NODE_DEBUG_INFO
: {
4890 struct binder_node_debug_info info
;
4892 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4897 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4901 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4914 thread
->looper_need_return
= false;
4915 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4916 if (ret
&& ret
!= -ERESTARTSYS
)
4917 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4919 trace_binder_ioctl_done(ret
);
4923 static void binder_vma_open(struct vm_area_struct
*vma
)
4925 struct binder_proc
*proc
= vma
->vm_private_data
;
4927 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4928 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4929 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4930 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4931 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4934 static void binder_vma_close(struct vm_area_struct
*vma
)
4936 struct binder_proc
*proc
= vma
->vm_private_data
;
4938 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4939 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4940 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4941 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4942 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4943 binder_alloc_vma_close(&proc
->alloc
);
4944 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4947 static int binder_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
4949 return VM_FAULT_SIGBUS
;
4952 static const struct vm_operations_struct binder_vm_ops
= {
4953 .open
= binder_vma_open
,
4954 .close
= binder_vma_close
,
4955 .fault
= binder_vm_fault
,
4958 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4961 struct binder_proc
*proc
= filp
->private_data
;
4962 const char *failure_string
;
4964 if (proc
->tsk
!= current
->group_leader
)
4967 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4968 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4970 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4971 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4972 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4973 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4974 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4976 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4978 failure_string
= "bad vm_flags";
4981 vma
->vm_flags
|= VM_DONTCOPY
| VM_MIXEDMAP
;
4982 vma
->vm_flags
&= ~VM_MAYWRITE
;
4984 vma
->vm_ops
= &binder_vm_ops
;
4985 vma
->vm_private_data
= proc
;
4987 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4990 mutex_lock(&proc
->files_lock
);
4991 proc
->files
= get_files_struct(current
);
4992 mutex_unlock(&proc
->files_lock
);
4996 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
4997 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
5001 static int binder_open(struct inode
*nodp
, struct file
*filp
)
5003 struct binder_proc
*proc
;
5004 struct binder_device
*binder_dev
;
5006 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
5007 current
->group_leader
->pid
, current
->pid
);
5009 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
5012 spin_lock_init(&proc
->inner_lock
);
5013 spin_lock_init(&proc
->outer_lock
);
5014 get_task_struct(current
->group_leader
);
5015 proc
->tsk
= current
->group_leader
;
5016 mutex_init(&proc
->files_lock
);
5017 INIT_LIST_HEAD(&proc
->todo
);
5018 if (binder_supported_policy(current
->policy
)) {
5019 proc
->default_priority
.sched_policy
= current
->policy
;
5020 proc
->default_priority
.prio
= current
->normal_prio
;
5022 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
5023 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
5026 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
5028 proc
->context
= &binder_dev
->context
;
5029 binder_alloc_init(&proc
->alloc
);
5031 binder_stats_created(BINDER_STAT_PROC
);
5032 proc
->pid
= current
->group_leader
->pid
;
5033 INIT_LIST_HEAD(&proc
->delivered_death
);
5034 INIT_LIST_HEAD(&proc
->waiting_threads
);
5035 filp
->private_data
= proc
;
5037 mutex_lock(&binder_procs_lock
);
5038 hlist_add_head(&proc
->proc_node
, &binder_procs
);
5039 mutex_unlock(&binder_procs_lock
);
5041 if (binder_debugfs_dir_entry_proc
) {
5044 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5046 * proc debug entries are shared between contexts, so
5047 * this will fail if the process tries to open the driver
5048 * again with a different context. The priting code will
5049 * anyway print all contexts that a given PID has, so this
5052 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
5053 binder_debugfs_dir_entry_proc
,
5054 (void *)(unsigned long)proc
->pid
,
5061 static int binder_flush(struct file
*filp
, fl_owner_t id
)
5063 struct binder_proc
*proc
= filp
->private_data
;
5065 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
5070 static void binder_deferred_flush(struct binder_proc
*proc
)
5075 binder_inner_proc_lock(proc
);
5076 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5077 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5079 thread
->looper_need_return
= true;
5080 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5081 wake_up_interruptible(&thread
->wait
);
5085 binder_inner_proc_unlock(proc
);
5087 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5088 "binder_flush: %d woke %d threads\n", proc
->pid
,
5092 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5094 struct binder_proc
*proc
= filp
->private_data
;
5096 debugfs_remove(proc
->debugfs_entry
);
5097 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5102 static int binder_node_release(struct binder_node
*node
, int refs
)
5104 struct binder_ref
*ref
;
5106 struct binder_proc
*proc
= node
->proc
;
5108 binder_release_work(proc
, &node
->async_todo
);
5110 binder_node_lock(node
);
5111 binder_inner_proc_lock(proc
);
5112 binder_dequeue_work_ilocked(&node
->work
);
5114 * The caller must have taken a temporary ref on the node,
5116 BUG_ON(!node
->tmp_refs
);
5117 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5118 binder_inner_proc_unlock(proc
);
5119 binder_node_unlock(node
);
5120 binder_free_node(node
);
5126 node
->local_strong_refs
= 0;
5127 node
->local_weak_refs
= 0;
5128 binder_inner_proc_unlock(proc
);
5130 spin_lock(&binder_dead_nodes_lock
);
5131 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5132 spin_unlock(&binder_dead_nodes_lock
);
5134 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5137 * Need the node lock to synchronize
5138 * with new notification requests and the
5139 * inner lock to synchronize with queued
5140 * death notifications.
5142 binder_inner_proc_lock(ref
->proc
);
5144 binder_inner_proc_unlock(ref
->proc
);
5150 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5151 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5152 binder_enqueue_work_ilocked(&ref
->death
->work
,
5154 binder_wakeup_proc_ilocked(ref
->proc
);
5155 binder_inner_proc_unlock(ref
->proc
);
5158 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5159 "node %d now dead, refs %d, death %d\n",
5160 node
->debug_id
, refs
, death
);
5161 binder_node_unlock(node
);
5162 binder_put_node(node
);
5167 static void binder_deferred_release(struct binder_proc
*proc
)
5169 struct binder_context
*context
= proc
->context
;
5171 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5173 BUG_ON(proc
->files
);
5175 mutex_lock(&binder_procs_lock
);
5176 hlist_del(&proc
->proc_node
);
5177 mutex_unlock(&binder_procs_lock
);
5179 mutex_lock(&context
->context_mgr_node_lock
);
5180 if (context
->binder_context_mgr_node
&&
5181 context
->binder_context_mgr_node
->proc
== proc
) {
5182 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5183 "%s: %d context_mgr_node gone\n",
5184 __func__
, proc
->pid
);
5185 context
->binder_context_mgr_node
= NULL
;
5187 mutex_unlock(&context
->context_mgr_node_lock
);
5188 binder_inner_proc_lock(proc
);
5190 * Make sure proc stays alive after we
5191 * remove all the threads
5195 proc
->is_dead
= true;
5197 active_transactions
= 0;
5198 while ((n
= rb_first(&proc
->threads
))) {
5199 struct binder_thread
*thread
;
5201 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5202 binder_inner_proc_unlock(proc
);
5204 active_transactions
+= binder_thread_release(proc
, thread
);
5205 binder_inner_proc_lock(proc
);
5210 while ((n
= rb_first(&proc
->nodes
))) {
5211 struct binder_node
*node
;
5213 node
= rb_entry(n
, struct binder_node
, rb_node
);
5216 * take a temporary ref on the node before
5217 * calling binder_node_release() which will either
5218 * kfree() the node or call binder_put_node()
5220 binder_inc_node_tmpref_ilocked(node
);
5221 rb_erase(&node
->rb_node
, &proc
->nodes
);
5222 binder_inner_proc_unlock(proc
);
5223 incoming_refs
= binder_node_release(node
, incoming_refs
);
5224 binder_inner_proc_lock(proc
);
5226 binder_inner_proc_unlock(proc
);
5229 binder_proc_lock(proc
);
5230 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5231 struct binder_ref
*ref
;
5233 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5235 binder_cleanup_ref_olocked(ref
);
5236 binder_proc_unlock(proc
);
5237 binder_free_ref(ref
);
5238 binder_proc_lock(proc
);
5240 binder_proc_unlock(proc
);
5242 binder_release_work(proc
, &proc
->todo
);
5243 binder_release_work(proc
, &proc
->delivered_death
);
5245 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5246 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5247 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5248 outgoing_refs
, active_transactions
);
5250 binder_proc_dec_tmpref(proc
);
5253 static void binder_deferred_func(struct work_struct
*work
)
5255 struct binder_proc
*proc
;
5256 struct files_struct
*files
;
5261 mutex_lock(&binder_deferred_lock
);
5262 if (!hlist_empty(&binder_deferred_list
)) {
5263 proc
= hlist_entry(binder_deferred_list
.first
,
5264 struct binder_proc
, deferred_work_node
);
5265 hlist_del_init(&proc
->deferred_work_node
);
5266 defer
= proc
->deferred_work
;
5267 proc
->deferred_work
= 0;
5272 mutex_unlock(&binder_deferred_lock
);
5275 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5276 mutex_lock(&proc
->files_lock
);
5277 files
= proc
->files
;
5280 mutex_unlock(&proc
->files_lock
);
5283 if (defer
& BINDER_DEFERRED_FLUSH
)
5284 binder_deferred_flush(proc
);
5286 if (defer
& BINDER_DEFERRED_RELEASE
)
5287 binder_deferred_release(proc
); /* frees proc */
5290 put_files_struct(files
);
5293 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5296 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5298 mutex_lock(&binder_deferred_lock
);
5299 proc
->deferred_work
|= defer
;
5300 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5301 hlist_add_head(&proc
->deferred_work_node
,
5302 &binder_deferred_list
);
5303 schedule_work(&binder_deferred_work
);
5305 mutex_unlock(&binder_deferred_lock
);
5308 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5309 struct binder_proc
*proc
,
5311 struct binder_transaction
*t
)
5313 struct binder_proc
*to_proc
;
5314 struct binder_buffer
*buffer
= t
->buffer
;
5316 spin_lock(&t
->lock
);
5317 to_proc
= t
->to_proc
;
5319 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5320 prefix
, t
->debug_id
, t
,
5321 t
->from
? t
->from
->proc
->pid
: 0,
5322 t
->from
? t
->from
->pid
: 0,
5323 to_proc
? to_proc
->pid
: 0,
5324 t
->to_thread
? t
->to_thread
->pid
: 0,
5325 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5326 t
->priority
.prio
, t
->need_reply
);
5327 spin_unlock(&t
->lock
);
5329 if (proc
!= to_proc
) {
5331 * Can only safely deref buffer if we are holding the
5332 * correct proc inner lock for this node
5338 if (buffer
== NULL
) {
5339 seq_puts(m
, " buffer free\n");
5342 if (buffer
->target_node
)
5343 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5344 seq_printf(m
, " size %zd:%zd data %p\n",
5345 buffer
->data_size
, buffer
->offsets_size
,
5349 static void print_binder_work_ilocked(struct seq_file
*m
,
5350 struct binder_proc
*proc
,
5352 const char *transaction_prefix
,
5353 struct binder_work
*w
)
5355 struct binder_node
*node
;
5356 struct binder_transaction
*t
;
5359 case BINDER_WORK_TRANSACTION
:
5360 t
= container_of(w
, struct binder_transaction
, work
);
5361 print_binder_transaction_ilocked(
5362 m
, proc
, transaction_prefix
, t
);
5364 case BINDER_WORK_RETURN_ERROR
: {
5365 struct binder_error
*e
= container_of(
5366 w
, struct binder_error
, work
);
5368 seq_printf(m
, "%stransaction error: %u\n",
5371 case BINDER_WORK_TRANSACTION_COMPLETE
:
5372 seq_printf(m
, "%stransaction complete\n", prefix
);
5374 case BINDER_WORK_NODE
:
5375 node
= container_of(w
, struct binder_node
, work
);
5376 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5377 prefix
, node
->debug_id
,
5378 (u64
)node
->ptr
, (u64
)node
->cookie
);
5380 case BINDER_WORK_DEAD_BINDER
:
5381 seq_printf(m
, "%shas dead binder\n", prefix
);
5383 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5384 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5386 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5387 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5390 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5395 static void print_binder_thread_ilocked(struct seq_file
*m
,
5396 struct binder_thread
*thread
,
5399 struct binder_transaction
*t
;
5400 struct binder_work
*w
;
5401 size_t start_pos
= m
->count
;
5404 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5405 thread
->pid
, thread
->looper
,
5406 thread
->looper_need_return
,
5407 atomic_read(&thread
->tmp_ref
));
5408 header_pos
= m
->count
;
5409 t
= thread
->transaction_stack
;
5411 if (t
->from
== thread
) {
5412 print_binder_transaction_ilocked(m
, thread
->proc
,
5413 " outgoing transaction", t
);
5415 } else if (t
->to_thread
== thread
) {
5416 print_binder_transaction_ilocked(m
, thread
->proc
,
5417 " incoming transaction", t
);
5420 print_binder_transaction_ilocked(m
, thread
->proc
,
5421 " bad transaction", t
);
5425 list_for_each_entry(w
, &thread
->todo
, entry
) {
5426 print_binder_work_ilocked(m
, thread
->proc
, " ",
5427 " pending transaction", w
);
5429 if (!print_always
&& m
->count
== header_pos
)
5430 m
->count
= start_pos
;
5433 static void print_binder_node_nilocked(struct seq_file
*m
,
5434 struct binder_node
*node
)
5436 struct binder_ref
*ref
;
5437 struct binder_work
*w
;
5441 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5444 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5445 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5446 node
->sched_policy
, node
->min_priority
,
5447 node
->has_strong_ref
, node
->has_weak_ref
,
5448 node
->local_strong_refs
, node
->local_weak_refs
,
5449 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5451 seq_puts(m
, " proc");
5452 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5453 seq_printf(m
, " %d", ref
->proc
->pid
);
5457 list_for_each_entry(w
, &node
->async_todo
, entry
)
5458 print_binder_work_ilocked(m
, node
->proc
, " ",
5459 " pending async transaction", w
);
5463 static void print_binder_ref_olocked(struct seq_file
*m
,
5464 struct binder_ref
*ref
)
5466 binder_node_lock(ref
->node
);
5467 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5468 ref
->data
.debug_id
, ref
->data
.desc
,
5469 ref
->node
->proc
? "" : "dead ",
5470 ref
->node
->debug_id
, ref
->data
.strong
,
5471 ref
->data
.weak
, ref
->death
);
5472 binder_node_unlock(ref
->node
);
5475 static void print_binder_proc(struct seq_file
*m
,
5476 struct binder_proc
*proc
, int print_all
)
5478 struct binder_work
*w
;
5480 size_t start_pos
= m
->count
;
5482 struct binder_node
*last_node
= NULL
;
5484 seq_printf(m
, "proc %d\n", proc
->pid
);
5485 seq_printf(m
, "context %s\n", proc
->context
->name
);
5486 header_pos
= m
->count
;
5488 binder_inner_proc_lock(proc
);
5489 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5490 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5491 rb_node
), print_all
);
5493 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5494 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5497 * take a temporary reference on the node so it
5498 * survives and isn't removed from the tree
5499 * while we print it.
5501 binder_inc_node_tmpref_ilocked(node
);
5502 /* Need to drop inner lock to take node lock */
5503 binder_inner_proc_unlock(proc
);
5505 binder_put_node(last_node
);
5506 binder_node_inner_lock(node
);
5507 print_binder_node_nilocked(m
, node
);
5508 binder_node_inner_unlock(node
);
5510 binder_inner_proc_lock(proc
);
5512 binder_inner_proc_unlock(proc
);
5514 binder_put_node(last_node
);
5517 binder_proc_lock(proc
);
5518 for (n
= rb_first(&proc
->refs_by_desc
);
5521 print_binder_ref_olocked(m
, rb_entry(n
,
5524 binder_proc_unlock(proc
);
5526 binder_alloc_print_allocated(m
, &proc
->alloc
);
5527 binder_inner_proc_lock(proc
);
5528 list_for_each_entry(w
, &proc
->todo
, entry
)
5529 print_binder_work_ilocked(m
, proc
, " ",
5530 " pending transaction", w
);
5531 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5532 seq_puts(m
, " has delivered dead binder\n");
5535 binder_inner_proc_unlock(proc
);
5536 if (!print_all
&& m
->count
== header_pos
)
5537 m
->count
= start_pos
;
5540 #ifdef CONFIG_SAMSUNG_FREECESS
5541 static void binder_in_transaction(struct binder_proc
*proc
)
5543 struct rb_node
*n
= NULL
;
5544 struct binder_thread
*thread
= NULL
;
5546 struct task_struct
*tsk
= NULL
;
5547 struct binder_transaction
*t
= NULL
;
5551 //check binder threads todo and transcation_stack list
5552 binder_inner_proc_lock(proc
);
5553 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5554 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5555 empty
= binder_worklist_empty_ilocked(&thread
->todo
);
5559 //have some binders to do
5561 //report uid to FW, only report one time
5562 uid
= tsk
->cred
->euid
.val
;
5563 binder_inner_proc_unlock(proc
);
5564 cfb_report(uid
, "thread");
5568 //processing one binder call
5569 t
= thread
->transaction_stack
;
5571 spin_lock(&t
->lock
);
5572 if (t
->to_thread
== thread
) {
5573 //check incoming, it has one
5575 uid
= tsk
->cred
->euid
.val
;
5577 spin_unlock(&t
->lock
);
5579 //report uid to FW, only report one time
5580 binder_inner_proc_unlock(proc
);
5581 cfb_report(uid
, "transaction_stack");
5588 //check binder proc todo list
5589 empty
= binder_worklist_empty_ilocked(&proc
->todo
);
5591 if (tsk
!= NULL
&& !empty
) {
5593 uid
= tsk
->cred
->euid
.val
;
5594 binder_inner_proc_unlock(proc
);
5595 cfb_report(uid
, "proc");
5598 binder_inner_proc_unlock(proc
);
5601 void binders_in_transcation(int uid
)
5603 struct binder_proc
*itr
;
5605 mutex_lock(&binder_procs_lock
);
5606 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5607 if (itr
!= NULL
&& (itr
->tsk
->cred
->euid
.val
== uid
)) {
5608 binder_in_transaction(itr
);
5611 mutex_unlock(&binder_procs_lock
);
5615 static const char * const binder_return_strings
[] = {
5620 "BR_ACQUIRE_RESULT",
5622 "BR_TRANSACTION_COMPLETE",
5627 "BR_ATTEMPT_ACQUIRE",
5632 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5636 static const char * const binder_command_strings
[] = {
5639 "BC_ACQUIRE_RESULT",
5647 "BC_ATTEMPT_ACQUIRE",
5648 "BC_REGISTER_LOOPER",
5651 "BC_REQUEST_DEATH_NOTIFICATION",
5652 "BC_CLEAR_DEATH_NOTIFICATION",
5653 "BC_DEAD_BINDER_DONE",
5654 "BC_TRANSACTION_SG",
5658 static const char * const binder_objstat_strings
[] = {
5665 "transaction_complete"
5668 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5669 struct binder_stats
*stats
)
5673 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5674 ARRAY_SIZE(binder_command_strings
));
5675 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5676 int temp
= atomic_read(&stats
->bc
[i
]);
5679 seq_printf(m
, "%s%s: %d\n", prefix
,
5680 binder_command_strings
[i
], temp
);
5683 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5684 ARRAY_SIZE(binder_return_strings
));
5685 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5686 int temp
= atomic_read(&stats
->br
[i
]);
5689 seq_printf(m
, "%s%s: %d\n", prefix
,
5690 binder_return_strings
[i
], temp
);
5693 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5694 ARRAY_SIZE(binder_objstat_strings
));
5695 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5696 ARRAY_SIZE(stats
->obj_deleted
));
5697 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5698 int created
= atomic_read(&stats
->obj_created
[i
]);
5699 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5701 if (created
|| deleted
)
5702 seq_printf(m
, "%s%s: active %d total %d\n",
5704 binder_objstat_strings
[i
],
5710 static void print_binder_proc_stats(struct seq_file
*m
,
5711 struct binder_proc
*proc
)
5713 struct binder_work
*w
;
5714 struct binder_thread
*thread
;
5716 int count
, strong
, weak
, ready_threads
;
5717 size_t free_async_space
=
5718 binder_alloc_get_free_async_space(&proc
->alloc
);
5720 seq_printf(m
, "proc %d\n", proc
->pid
);
5721 seq_printf(m
, "context %s\n", proc
->context
->name
);
5724 binder_inner_proc_lock(proc
);
5725 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5728 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5731 seq_printf(m
, " threads: %d\n", count
);
5732 seq_printf(m
, " requested threads: %d+%d/%d\n"
5733 " ready threads %d\n"
5734 " free async space %zd\n", proc
->requested_threads
,
5735 proc
->requested_threads_started
, proc
->max_threads
,
5739 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5741 binder_inner_proc_unlock(proc
);
5742 seq_printf(m
, " nodes: %d\n", count
);
5746 binder_proc_lock(proc
);
5747 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5748 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5751 strong
+= ref
->data
.strong
;
5752 weak
+= ref
->data
.weak
;
5754 binder_proc_unlock(proc
);
5755 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5757 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5758 seq_printf(m
, " buffers: %d\n", count
);
5760 binder_alloc_print_pages(m
, &proc
->alloc
);
5763 binder_inner_proc_lock(proc
);
5764 list_for_each_entry(w
, &proc
->todo
, entry
) {
5765 if (w
->type
== BINDER_WORK_TRANSACTION
)
5768 binder_inner_proc_unlock(proc
);
5769 seq_printf(m
, " pending transactions: %d\n", count
);
5771 print_binder_stats(m
, " ", &proc
->stats
);
5775 static int binder_state_show(struct seq_file
*m
, void *unused
)
5777 struct binder_proc
*proc
;
5778 struct binder_node
*node
;
5779 struct binder_node
*last_node
= NULL
;
5781 seq_puts(m
, "binder state:\n");
5783 spin_lock(&binder_dead_nodes_lock
);
5784 if (!hlist_empty(&binder_dead_nodes
))
5785 seq_puts(m
, "dead nodes:\n");
5786 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5788 * take a temporary reference on the node so it
5789 * survives and isn't removed from the list
5790 * while we print it.
5793 spin_unlock(&binder_dead_nodes_lock
);
5795 binder_put_node(last_node
);
5796 binder_node_lock(node
);
5797 print_binder_node_nilocked(m
, node
);
5798 binder_node_unlock(node
);
5800 spin_lock(&binder_dead_nodes_lock
);
5802 spin_unlock(&binder_dead_nodes_lock
);
5804 binder_put_node(last_node
);
5806 mutex_lock(&binder_procs_lock
);
5807 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5808 print_binder_proc(m
, proc
, 1);
5809 mutex_unlock(&binder_procs_lock
);
5814 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5816 struct binder_proc
*proc
;
5818 seq_puts(m
, "binder stats:\n");
5820 print_binder_stats(m
, "", &binder_stats
);
5822 mutex_lock(&binder_procs_lock
);
5823 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5824 print_binder_proc_stats(m
, proc
);
5825 mutex_unlock(&binder_procs_lock
);
5830 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5832 struct binder_proc
*proc
;
5834 seq_puts(m
, "binder transactions:\n");
5835 mutex_lock(&binder_procs_lock
);
5836 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5837 print_binder_proc(m
, proc
, 0);
5838 mutex_unlock(&binder_procs_lock
);
5843 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5845 struct binder_proc
*itr
;
5846 int pid
= (unsigned long)m
->private;
5848 mutex_lock(&binder_procs_lock
);
5849 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5850 if (itr
->pid
== pid
) {
5851 seq_puts(m
, "binder proc state:\n");
5852 print_binder_proc(m
, itr
, 1);
5855 mutex_unlock(&binder_procs_lock
);
5860 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5861 struct binder_transaction_log_entry
*e
)
5863 int debug_id
= READ_ONCE(e
->debug_id_done
);
5865 * read barrier to guarantee debug_id_done read before
5866 * we print the log values
5870 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5871 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5872 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5873 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5874 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5875 e
->return_error
, e
->return_error_param
,
5876 e
->return_error_line
);
5878 * read-barrier to guarantee read of debug_id_done after
5879 * done printing the fields of the entry
5882 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5883 "\n" : " (incomplete)\n");
5886 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5888 struct binder_transaction_log
*log
= m
->private;
5889 unsigned int log_cur
= atomic_read(&log
->cur
);
5894 count
= log_cur
+ 1;
5895 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5896 0 : count
% ARRAY_SIZE(log
->entry
);
5897 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5898 count
= ARRAY_SIZE(log
->entry
);
5899 for (i
= 0; i
< count
; i
++) {
5900 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5902 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5907 static const struct file_operations binder_fops
= {
5908 .owner
= THIS_MODULE
,
5909 .poll
= binder_poll
,
5910 .unlocked_ioctl
= binder_ioctl
,
5911 .compat_ioctl
= binder_ioctl
,
5912 .mmap
= binder_mmap
,
5913 .open
= binder_open
,
5914 .flush
= binder_flush
,
5915 .release
= binder_release
,
5918 BINDER_DEBUG_ENTRY(state
);
5919 BINDER_DEBUG_ENTRY(stats
);
5920 BINDER_DEBUG_ENTRY(transactions
);
5921 BINDER_DEBUG_ENTRY(transaction_log
);
5923 static int __init
init_binder_device(const char *name
)
5926 struct binder_device
*binder_device
;
5928 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5932 binder_device
->miscdev
.fops
= &binder_fops
;
5933 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5934 binder_device
->miscdev
.name
= name
;
5936 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5937 binder_device
->context
.name
= name
;
5938 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5940 ret
= misc_register(&binder_device
->miscdev
);
5942 kfree(binder_device
);
5946 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5951 static int __init
binder_init(void)
5954 char *device_name
, *device_names
;
5955 struct binder_device
*device
;
5956 struct hlist_node
*tmp
;
5958 ret
= binder_alloc_shrinker_init();
5962 atomic_set(&binder_transaction_log
.cur
, ~0U);
5963 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5965 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5966 if (binder_debugfs_dir_entry_root
)
5967 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5968 binder_debugfs_dir_entry_root
);
5970 if (binder_debugfs_dir_entry_root
) {
5971 debugfs_create_file("state",
5973 binder_debugfs_dir_entry_root
,
5975 &binder_state_fops
);
5976 debugfs_create_file("stats",
5978 binder_debugfs_dir_entry_root
,
5980 &binder_stats_fops
);
5981 debugfs_create_file("transactions",
5983 binder_debugfs_dir_entry_root
,
5985 &binder_transactions_fops
);
5986 debugfs_create_file("transaction_log",
5988 binder_debugfs_dir_entry_root
,
5989 &binder_transaction_log
,
5990 &binder_transaction_log_fops
);
5991 debugfs_create_file("failed_transaction_log",
5993 binder_debugfs_dir_entry_root
,
5994 &binder_transaction_log_failed
,
5995 &binder_transaction_log_fops
);
5999 * Copy the module_parameter string, because we don't want to
6000 * tokenize it in-place.
6002 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
6003 if (!device_names
) {
6005 goto err_alloc_device_names_failed
;
6007 strcpy(device_names
, binder_devices_param
);
6009 while ((device_name
= strsep(&device_names
, ","))) {
6010 ret
= init_binder_device(device_name
);
6012 goto err_init_binder_device_failed
;
6017 err_init_binder_device_failed
:
6018 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
6019 misc_deregister(&device
->miscdev
);
6020 hlist_del(&device
->hlist
);
6023 err_alloc_device_names_failed
:
6024 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
6029 device_initcall(binder_init
);
6031 #define CREATE_TRACE_POINTS
6032 #include "binder_trace.h"
6034 MODULE_LICENSE("GPL v2");