3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
79 #include <uapi/linux/android/binder.h>
80 #include <uapi/linux/sched/types.h>
81 #include "binder_alloc.h"
82 #include "binder_trace.h"
84 static HLIST_HEAD(binder_deferred_list
);
85 static DEFINE_MUTEX(binder_deferred_lock
);
87 static HLIST_HEAD(binder_devices
);
88 static HLIST_HEAD(binder_procs
);
89 static DEFINE_MUTEX(binder_procs_lock
);
91 static HLIST_HEAD(binder_dead_nodes
);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
94 static struct dentry
*binder_debugfs_dir_entry_root
;
95 static struct dentry
*binder_debugfs_dir_entry_proc
;
96 static atomic_t binder_last_id
;
98 #define BINDER_DEBUG_ENTRY(name) \
99 static int binder_##name##_open(struct inode *inode, struct file *file) \
101 return single_open(file, binder_##name##_show, inode->i_private); \
104 static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
112 static int binder_proc_show(struct seq_file
*m
, void *unused
);
113 BINDER_DEBUG_ENTRY(proc
);
115 /* This is only defined in include/asm-arm/sizes.h */
121 #define SZ_4M 0x400000
124 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
127 BINDER_DEBUG_USER_ERROR
= 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
133 BINDER_DEBUG_READ_WRITE
= 1U << 6,
134 BINDER_DEBUG_USER_REFS
= 1U << 7,
135 BINDER_DEBUG_THREADS
= 1U << 8,
136 BINDER_DEBUG_TRANSACTION
= 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
140 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
141 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
143 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
144 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
145 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
147 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
148 module_param_named(devices
, binder_devices_param
, charp
, 0444);
150 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
151 static int binder_stop_on_user_error
;
153 static int binder_set_stop_on_user_error(const char *val
,
154 const struct kernel_param
*kp
)
158 ret
= param_set_int(val
, kp
);
159 if (binder_stop_on_user_error
< 2)
160 wake_up(&binder_user_error_wait
);
163 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
164 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
166 #define binder_debug(mask, x...) \
168 if (binder_debug_mask & mask) \
172 #define binder_user_error(x...) \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
180 #define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
185 #define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
188 #define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
191 enum binder_stat_types
{
197 BINDER_STAT_TRANSACTION
,
198 BINDER_STAT_TRANSACTION_COMPLETE
,
202 struct binder_stats
{
203 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
204 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
205 atomic_t obj_created
[BINDER_STAT_COUNT
];
206 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
209 static struct binder_stats binder_stats
;
211 static inline void binder_stats_deleted(enum binder_stat_types type
)
213 atomic_inc(&binder_stats
.obj_deleted
[type
]);
216 static inline void binder_stats_created(enum binder_stat_types type
)
218 atomic_inc(&binder_stats
.obj_created
[type
]);
221 struct binder_transaction_log_entry
{
233 int return_error_line
;
234 uint32_t return_error
;
235 uint32_t return_error_param
;
236 const char *context_name
;
238 struct binder_transaction_log
{
241 struct binder_transaction_log_entry entry
[32];
243 static struct binder_transaction_log binder_transaction_log
;
244 static struct binder_transaction_log binder_transaction_log_failed
;
246 static struct binder_transaction_log_entry
*binder_transaction_log_add(
247 struct binder_transaction_log
*log
)
249 struct binder_transaction_log_entry
*e
;
250 unsigned int cur
= atomic_inc_return(&log
->cur
);
252 if (cur
>= ARRAY_SIZE(log
->entry
))
254 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
255 WRITE_ONCE(e
->debug_id_done
, 0);
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
262 memset(e
, 0, sizeof(*e
));
266 struct binder_context
{
267 struct binder_node
*binder_context_mgr_node
;
268 struct mutex context_mgr_node_lock
;
270 kuid_t binder_context_mgr_uid
;
274 struct binder_device
{
275 struct hlist_node hlist
;
276 struct miscdevice miscdev
;
277 struct binder_context context
;
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
285 * There are separate work lists for proc, thread, and node (async).
288 struct list_head entry
;
291 BINDER_WORK_TRANSACTION
= 1,
292 BINDER_WORK_TRANSACTION_COMPLETE
,
293 BINDER_WORK_RETURN_ERROR
,
295 BINDER_WORK_DEAD_BINDER
,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
301 struct binder_error
{
302 struct binder_work work
;
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
312 * (protected by @proc->inner_lock)
313 * @rb_node: element for proc->nodes tree
314 * (protected by @proc->inner_lock)
315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
320 * (protected by @lock)
321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
323 * (protected by @proc->inner_lock if @proc
325 * @local_weak_refs: weak user refs from local process
326 * (protected by @proc->inner_lock if @proc
328 * @local_strong_refs: strong user refs from local process
329 * (protected by @proc->inner_lock if @proc
331 * @tmp_refs: temporary kernel refs
332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
342 * (protected by @proc->inner_lock if @proc
344 * @pending_strong_ref: userspace has acked notification of strong ref
345 * (protected by @proc->inner_lock if @proc
347 * @has_weak_ref: userspace notified of weak ref
348 * (protected by @proc->inner_lock if @proc
350 * @pending_weak_ref: userspace has acked notification of weak ref
351 * (protected by @proc->inner_lock if @proc
353 * @has_async_transaction: async transaction to node in progress
354 * (protected by @lock)
355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @inherit_rt: inherit RT scheduling policy from caller
362 * (invariant after initialized)
363 * @async_todo: list of async work items
364 * (protected by @proc->inner_lock)
366 * Bookkeeping structure for binder nodes.
371 struct binder_work work
;
373 struct rb_node rb_node
;
374 struct hlist_node dead_node
;
376 struct binder_proc
*proc
;
377 struct hlist_head refs
;
378 int internal_strong_refs
;
380 int local_strong_refs
;
382 binder_uintptr_t ptr
;
383 binder_uintptr_t cookie
;
386 * bitfield elements protected by
390 u8 pending_strong_ref
:1;
392 u8 pending_weak_ref
:1;
396 * invariant after initialization
403 bool has_async_transaction
;
404 struct list_head async_todo
;
407 struct binder_ref_death
{
409 * @work: worklist element for death notifications
410 * (protected by inner_lock of the proc that
411 * this ref belongs to)
413 struct binder_work work
;
414 binder_uintptr_t cookie
;
418 * struct binder_ref_data - binder_ref counts and id
419 * @debug_id: unique ID for the ref
420 * @desc: unique userspace handle for ref
421 * @strong: strong ref count (debugging only if not locked)
422 * @weak: weak ref count (debugging only if not locked)
424 * Structure to hold ref count and ref id information. Since
425 * the actual ref can only be accessed with a lock, this structure
426 * is used to return information about the ref to callers of
427 * ref inc/dec functions.
429 struct binder_ref_data
{
437 * struct binder_ref - struct to track references on nodes
438 * @data: binder_ref_data containing id, handle, and current refcounts
439 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
440 * @rb_node_node: node for lookup by @node in proc's rb_tree
441 * @node_entry: list entry for node->refs list in target node
442 * (protected by @node->lock)
443 * @proc: binder_proc containing ref
444 * @node: binder_node of target node. When cleaning up a
445 * ref for deletion in binder_cleanup_ref, a non-NULL
446 * @node indicates the node must be freed
447 * @death: pointer to death notification (ref_death) if requested
448 * (protected by @node->lock)
450 * Structure to track references from procA to target node (on procB). This
451 * structure is unsafe to access without holding @proc->outer_lock.
454 /* Lookups needed: */
455 /* node + proc => ref (transaction) */
456 /* desc + proc => ref (transaction, inc/dec ref) */
457 /* node => refs + procs (proc exit) */
458 struct binder_ref_data data
;
459 struct rb_node rb_node_desc
;
460 struct rb_node rb_node_node
;
461 struct hlist_node node_entry
;
462 struct binder_proc
*proc
;
463 struct binder_node
*node
;
464 struct binder_ref_death
*death
;
467 enum binder_deferred_state
{
468 BINDER_DEFERRED_PUT_FILES
= 0x01,
469 BINDER_DEFERRED_FLUSH
= 0x02,
470 BINDER_DEFERRED_RELEASE
= 0x04,
474 * struct binder_priority - scheduler policy and priority
475 * @sched_policy scheduler policy
476 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
478 * The binder driver supports inheriting the following scheduler policies:
484 struct binder_priority
{
485 unsigned int sched_policy
;
490 * struct binder_proc - binder process bookkeeping
491 * @proc_node: element for binder_procs list
492 * @threads: rbtree of binder_threads in this proc
493 * (protected by @inner_lock)
494 * @nodes: rbtree of binder nodes associated with
495 * this proc ordered by node->ptr
496 * (protected by @inner_lock)
497 * @refs_by_desc: rbtree of refs ordered by ref->desc
498 * (protected by @outer_lock)
499 * @refs_by_node: rbtree of refs ordered by ref->node
500 * (protected by @outer_lock)
501 * @waiting_threads: threads currently waiting for proc work
502 * (protected by @inner_lock)
503 * @pid PID of group_leader of process
504 * (invariant after initialized)
505 * @tsk task_struct for group_leader of process
506 * (invariant after initialized)
507 * @files files_struct for process
508 * (protected by @files_lock)
509 * @files_lock mutex to protect @files
510 * @deferred_work_node: element for binder_deferred_list
511 * (protected by binder_deferred_lock)
512 * @deferred_work: bitmap of deferred work to perform
513 * (protected by binder_deferred_lock)
514 * @is_dead: process is dead and awaiting free
515 * when outstanding transactions are cleaned up
516 * (protected by @inner_lock)
517 * @todo: list of work for this process
518 * (protected by @inner_lock)
519 * @stats: per-process binder statistics
520 * (atomics, no lock needed)
521 * @delivered_death: list of delivered death notification
522 * (protected by @inner_lock)
523 * @max_threads: cap on number of binder threads
524 * (protected by @inner_lock)
525 * @requested_threads: number of binder threads requested but not
526 * yet started. In current implementation, can
528 * (protected by @inner_lock)
529 * @requested_threads_started: number binder threads started
530 * (protected by @inner_lock)
531 * @tmp_ref: temporary reference to indicate proc is in use
532 * (protected by @inner_lock)
533 * @default_priority: default scheduler priority
534 * (invariant after initialized)
535 * @debugfs_entry: debugfs node
536 * @alloc: binder allocator bookkeeping
537 * @context: binder_context for this proc
538 * (invariant after initialized)
539 * @inner_lock: can nest under outer_lock and/or node lock
540 * @outer_lock: no nesting under innor or node lock
541 * Lock order: 1) outer, 2) node, 3) inner
543 * Bookkeeping structure for binder processes
546 struct hlist_node proc_node
;
547 struct rb_root threads
;
548 struct rb_root nodes
;
549 struct rb_root refs_by_desc
;
550 struct rb_root refs_by_node
;
551 struct list_head waiting_threads
;
553 struct task_struct
*tsk
;
554 struct files_struct
*files
;
555 struct mutex files_lock
;
556 struct hlist_node deferred_work_node
;
560 struct list_head todo
;
561 struct binder_stats stats
;
562 struct list_head delivered_death
;
564 int requested_threads
;
565 int requested_threads_started
;
567 struct binder_priority default_priority
;
568 struct dentry
*debugfs_entry
;
569 struct binder_alloc alloc
;
570 struct binder_context
*context
;
571 spinlock_t inner_lock
;
572 spinlock_t outer_lock
;
576 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
577 BINDER_LOOPER_STATE_ENTERED
= 0x02,
578 BINDER_LOOPER_STATE_EXITED
= 0x04,
579 BINDER_LOOPER_STATE_INVALID
= 0x08,
580 BINDER_LOOPER_STATE_WAITING
= 0x10,
581 BINDER_LOOPER_STATE_POLL
= 0x20,
585 * struct binder_thread - binder thread bookkeeping
586 * @proc: binder process for this thread
587 * (invariant after initialization)
588 * @rb_node: element for proc->threads rbtree
589 * (protected by @proc->inner_lock)
590 * @waiting_thread_node: element for @proc->waiting_threads list
591 * (protected by @proc->inner_lock)
592 * @pid: PID for this thread
593 * (invariant after initialization)
594 * @looper: bitmap of looping state
595 * (only accessed by this thread)
596 * @looper_needs_return: looping thread needs to exit driver
598 * @transaction_stack: stack of in-progress transactions for this thread
599 * (protected by @proc->inner_lock)
600 * @todo: list of work to do for this thread
601 * (protected by @proc->inner_lock)
602 * @process_todo: whether work in @todo should be processed
603 * (protected by @proc->inner_lock)
604 * @return_error: transaction errors reported by this thread
605 * (only accessed by this thread)
606 * @reply_error: transaction errors reported by target thread
607 * (protected by @proc->inner_lock)
608 * @wait: wait queue for thread work
609 * @stats: per-thread statistics
610 * (atomics, no lock needed)
611 * @tmp_ref: temporary reference to indicate thread is in use
612 * (atomic since @proc->inner_lock cannot
613 * always be acquired)
614 * @is_dead: thread is dead and awaiting free
615 * when outstanding transactions are cleaned up
616 * (protected by @proc->inner_lock)
617 * @task: struct task_struct for this thread
619 * Bookkeeping structure for binder threads.
621 struct binder_thread
{
622 struct binder_proc
*proc
;
623 struct rb_node rb_node
;
624 struct list_head waiting_thread_node
;
626 int looper
; /* only modified by this thread */
627 bool looper_need_return
; /* can be written by other thread */
628 struct binder_transaction
*transaction_stack
;
629 struct list_head todo
;
631 struct binder_error return_error
;
632 struct binder_error reply_error
;
633 wait_queue_head_t wait
;
634 struct binder_stats stats
;
637 struct task_struct
*task
;
640 struct binder_transaction
{
642 struct binder_work work
;
643 struct binder_thread
*from
;
644 struct binder_transaction
*from_parent
;
645 struct binder_proc
*to_proc
;
646 struct binder_thread
*to_thread
;
647 struct binder_transaction
*to_parent
;
648 unsigned need_reply
:1;
649 /* unsigned is_dead:1; */ /* not used at the moment */
651 struct binder_buffer
*buffer
;
654 struct binder_priority priority
;
655 struct binder_priority saved_priority
;
656 bool set_priority_called
;
659 * @lock: protects @from, @to_proc, and @to_thread
661 * @from, @to_proc, and @to_thread can be set to NULL
662 * during thread teardown
668 * binder_proc_lock() - Acquire outer lock for given binder_proc
669 * @proc: struct binder_proc to acquire
671 * Acquires proc->outer_lock. Used to protect binder_ref
672 * structures associated with the given proc.
674 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
676 _binder_proc_lock(struct binder_proc
*proc
, int line
)
678 binder_debug(BINDER_DEBUG_SPINLOCKS
,
679 "%s: line=%d\n", __func__
, line
);
680 spin_lock(&proc
->outer_lock
);
684 * binder_proc_unlock() - Release spinlock for given binder_proc
685 * @proc: struct binder_proc to acquire
687 * Release lock acquired via binder_proc_lock()
689 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
691 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
693 binder_debug(BINDER_DEBUG_SPINLOCKS
,
694 "%s: line=%d\n", __func__
, line
);
695 spin_unlock(&proc
->outer_lock
);
699 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
700 * @proc: struct binder_proc to acquire
702 * Acquires proc->inner_lock. Used to protect todo lists
704 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
706 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
708 binder_debug(BINDER_DEBUG_SPINLOCKS
,
709 "%s: line=%d\n", __func__
, line
);
710 spin_lock(&proc
->inner_lock
);
714 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
715 * @proc: struct binder_proc to acquire
717 * Release lock acquired via binder_inner_proc_lock()
719 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
721 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
723 binder_debug(BINDER_DEBUG_SPINLOCKS
,
724 "%s: line=%d\n", __func__
, line
);
725 spin_unlock(&proc
->inner_lock
);
729 * binder_node_lock() - Acquire spinlock for given binder_node
730 * @node: struct binder_node to acquire
732 * Acquires node->lock. Used to protect binder_node fields
734 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
736 _binder_node_lock(struct binder_node
*node
, int line
)
738 binder_debug(BINDER_DEBUG_SPINLOCKS
,
739 "%s: line=%d\n", __func__
, line
);
740 spin_lock(&node
->lock
);
744 * binder_node_unlock() - Release spinlock for given binder_proc
745 * @node: struct binder_node to acquire
747 * Release lock acquired via binder_node_lock()
749 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
751 _binder_node_unlock(struct binder_node
*node
, int line
)
753 binder_debug(BINDER_DEBUG_SPINLOCKS
,
754 "%s: line=%d\n", __func__
, line
);
755 spin_unlock(&node
->lock
);
759 * binder_node_inner_lock() - Acquire node and inner locks
760 * @node: struct binder_node to acquire
762 * Acquires node->lock. If node->proc also acquires
763 * proc->inner_lock. Used to protect binder_node fields
765 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
767 _binder_node_inner_lock(struct binder_node
*node
, int line
)
769 binder_debug(BINDER_DEBUG_SPINLOCKS
,
770 "%s: line=%d\n", __func__
, line
);
771 spin_lock(&node
->lock
);
773 binder_inner_proc_lock(node
->proc
);
777 * binder_node_unlock() - Release node and inner locks
778 * @node: struct binder_node to acquire
780 * Release lock acquired via binder_node_lock()
782 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
784 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
786 struct binder_proc
*proc
= node
->proc
;
788 binder_debug(BINDER_DEBUG_SPINLOCKS
,
789 "%s: line=%d\n", __func__
, line
);
791 binder_inner_proc_unlock(proc
);
792 spin_unlock(&node
->lock
);
795 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
797 return list_empty(list
);
801 * binder_worklist_empty() - Check if no items on the work list
802 * @proc: binder_proc associated with list
803 * @list: list to check
805 * Return: true if there are no items on list, else false
807 static bool binder_worklist_empty(struct binder_proc
*proc
,
808 struct list_head
*list
)
812 binder_inner_proc_lock(proc
);
813 ret
= binder_worklist_empty_ilocked(list
);
814 binder_inner_proc_unlock(proc
);
819 * binder_enqueue_work_ilocked() - Add an item to the work list
820 * @work: struct binder_work to add to list
821 * @target_list: list to add work to
823 * Adds the work to the specified list. Asserts that work
824 * is not already on a list.
826 * Requires the proc->inner_lock to be held.
829 binder_enqueue_work_ilocked(struct binder_work
*work
,
830 struct list_head
*target_list
)
832 BUG_ON(target_list
== NULL
);
833 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
834 list_add_tail(&work
->entry
, target_list
);
838 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
839 * @thread: thread to queue work to
840 * @work: struct binder_work to add to list
842 * Adds the work to the todo list of the thread. Doesn't set the process_todo
843 * flag, which means that (if it wasn't already set) the thread will go to
844 * sleep without handling this work when it calls read.
846 * Requires the proc->inner_lock to be held.
849 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
850 struct binder_work
*work
)
852 binder_enqueue_work_ilocked(work
, &thread
->todo
);
856 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
857 * @thread: thread to queue work to
858 * @work: struct binder_work to add to list
860 * Adds the work to the todo list of the thread, and enables processing
863 * Requires the proc->inner_lock to be held.
866 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
867 struct binder_work
*work
)
869 binder_enqueue_work_ilocked(work
, &thread
->todo
);
870 thread
->process_todo
= true;
874 * binder_enqueue_thread_work() - Add an item to the thread work list
875 * @thread: thread to queue work to
876 * @work: struct binder_work to add to list
878 * Adds the work to the todo list of the thread, and enables processing
882 binder_enqueue_thread_work(struct binder_thread
*thread
,
883 struct binder_work
*work
)
885 binder_inner_proc_lock(thread
->proc
);
886 binder_enqueue_thread_work_ilocked(thread
, work
);
887 binder_inner_proc_unlock(thread
->proc
);
891 binder_dequeue_work_ilocked(struct binder_work
*work
)
893 list_del_init(&work
->entry
);
897 * binder_dequeue_work() - Removes an item from the work list
898 * @proc: binder_proc associated with list
899 * @work: struct binder_work to remove from list
901 * Removes the specified work item from whatever list it is on.
902 * Can safely be called if work is not on any list.
905 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
907 binder_inner_proc_lock(proc
);
908 binder_dequeue_work_ilocked(work
);
909 binder_inner_proc_unlock(proc
);
912 static struct binder_work
*binder_dequeue_work_head_ilocked(
913 struct list_head
*list
)
915 struct binder_work
*w
;
917 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
919 list_del_init(&w
->entry
);
924 * binder_dequeue_work_head() - Dequeues the item at head of list
925 * @proc: binder_proc associated with list
926 * @list: list to dequeue head
928 * Removes the head of the list if there are items on the list
930 * Return: pointer dequeued binder_work, NULL if list was empty
932 static struct binder_work
*binder_dequeue_work_head(
933 struct binder_proc
*proc
,
934 struct list_head
*list
)
936 struct binder_work
*w
;
938 binder_inner_proc_lock(proc
);
939 w
= binder_dequeue_work_head_ilocked(list
);
940 binder_inner_proc_unlock(proc
);
945 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
946 static void binder_free_thread(struct binder_thread
*thread
);
947 static void binder_free_proc(struct binder_proc
*proc
);
948 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
950 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
952 unsigned long rlim_cur
;
956 mutex_lock(&proc
->files_lock
);
957 if (proc
->files
== NULL
) {
961 if (!lock_task_sighand(proc
->tsk
, &irqs
)) {
965 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
966 unlock_task_sighand(proc
->tsk
, &irqs
);
968 ret
= __alloc_fd(proc
->files
, 0, rlim_cur
, flags
);
970 mutex_unlock(&proc
->files_lock
);
975 * copied from fd_install
977 static void task_fd_install(
978 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
980 mutex_lock(&proc
->files_lock
);
982 __fd_install(proc
->files
, fd
, file
);
983 mutex_unlock(&proc
->files_lock
);
987 * copied from sys_close
989 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
993 mutex_lock(&proc
->files_lock
);
994 if (proc
->files
== NULL
) {
998 retval
= __close_fd(proc
->files
, fd
);
999 /* can't restart close syscall because file table entry was cleared */
1000 if (unlikely(retval
== -ERESTARTSYS
||
1001 retval
== -ERESTARTNOINTR
||
1002 retval
== -ERESTARTNOHAND
||
1003 retval
== -ERESTART_RESTARTBLOCK
))
1006 mutex_unlock(&proc
->files_lock
);
1010 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
1013 return thread
->process_todo
||
1014 thread
->looper_need_return
||
1016 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
1019 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
1023 binder_inner_proc_lock(thread
->proc
);
1024 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
1025 binder_inner_proc_unlock(thread
->proc
);
1030 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
1032 return !thread
->transaction_stack
&&
1033 binder_worklist_empty_ilocked(&thread
->todo
) &&
1034 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
1035 BINDER_LOOPER_STATE_REGISTERED
));
1038 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
1042 struct binder_thread
*thread
;
1044 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
1045 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
1046 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
1047 binder_available_for_proc_work_ilocked(thread
)) {
1049 wake_up_interruptible_sync(&thread
->wait
);
1051 wake_up_interruptible(&thread
->wait
);
1057 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1058 * @proc: process to select a thread from
1060 * Note that calling this function moves the thread off the waiting_threads
1061 * list, so it can only be woken up by the caller of this function, or a
1062 * signal. Therefore, callers *should* always wake up the thread this function
1065 * Return: If there's a thread currently waiting for process work,
1066 * returns that thread. Otherwise returns NULL.
1068 static struct binder_thread
*
1069 binder_select_thread_ilocked(struct binder_proc
*proc
)
1071 struct binder_thread
*thread
;
1073 assert_spin_locked(&proc
->inner_lock
);
1074 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1075 struct binder_thread
,
1076 waiting_thread_node
);
1079 list_del_init(&thread
->waiting_thread_node
);
1085 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1086 * @proc: process to wake up a thread in
1087 * @thread: specific thread to wake-up (may be NULL)
1088 * @sync: whether to do a synchronous wake-up
1090 * This function wakes up a thread in the @proc process.
1091 * The caller may provide a specific thread to wake-up in
1092 * the @thread parameter. If @thread is NULL, this function
1093 * will wake up threads that have called poll().
1095 * Note that for this function to work as expected, callers
1096 * should first call binder_select_thread() to find a thread
1097 * to handle the work (if they don't have a thread already),
1098 * and pass the result into the @thread parameter.
1100 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1101 struct binder_thread
*thread
,
1104 assert_spin_locked(&proc
->inner_lock
);
1108 wake_up_interruptible_sync(&thread
->wait
);
1110 wake_up_interruptible(&thread
->wait
);
1114 /* Didn't find a thread waiting for proc work; this can happen
1116 * 1. All threads are busy handling transactions
1117 * In that case, one of those threads should call back into
1118 * the kernel driver soon and pick up this work.
1119 * 2. Threads are using the (e)poll interface, in which case
1120 * they may be blocked on the waitqueue without having been
1121 * added to waiting_threads. For this case, we just iterate
1122 * over all threads not handling transaction work, and
1123 * wake them all up. We wake all because we don't know whether
1124 * a thread that called into (e)poll is handling non-binder
1127 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1130 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1132 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1134 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1137 static bool is_rt_policy(int policy
)
1139 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1142 static bool is_fair_policy(int policy
)
1144 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1147 static bool binder_supported_policy(int policy
)
1149 return is_fair_policy(policy
) || is_rt_policy(policy
);
1152 static int to_userspace_prio(int policy
, int kernel_priority
)
1154 if (is_fair_policy(policy
))
1155 return PRIO_TO_NICE(kernel_priority
);
1157 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1160 static int to_kernel_prio(int policy
, int user_priority
)
1162 if (is_fair_policy(policy
))
1163 return NICE_TO_PRIO(user_priority
);
1165 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1168 static void binder_do_set_priority(struct task_struct
*task
,
1169 struct binder_priority desired
,
1172 int priority
; /* user-space prio value */
1174 unsigned int policy
= desired
.sched_policy
;
1176 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1179 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1181 priority
= to_userspace_prio(policy
, desired
.prio
);
1183 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1184 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1186 if (max_rtprio
== 0) {
1187 policy
= SCHED_NORMAL
;
1188 priority
= MIN_NICE
;
1189 } else if (priority
> max_rtprio
) {
1190 priority
= max_rtprio
;
1194 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1195 long min_nice
= rlimit_to_nice(task_rlimit(task
, RLIMIT_NICE
));
1197 if (min_nice
> MAX_NICE
) {
1198 binder_user_error("%d RLIMIT_NICE not set\n",
1201 } else if (priority
< min_nice
) {
1202 priority
= min_nice
;
1206 if (policy
!= desired
.sched_policy
||
1207 to_kernel_prio(policy
, priority
) != desired
.prio
)
1208 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1209 "%d: priority %d not allowed, using %d instead\n",
1210 task
->pid
, desired
.prio
,
1211 to_kernel_prio(policy
, priority
));
1213 trace_binder_set_priority(task
->tgid
, task
->pid
, task
->normal_prio
,
1214 to_kernel_prio(policy
, priority
),
1217 /* Set the actual priority */
1218 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1219 struct sched_param params
;
1221 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1223 sched_setscheduler_nocheck(task
,
1224 policy
| SCHED_RESET_ON_FORK
,
1227 if (is_fair_policy(policy
))
1228 set_user_nice(task
, priority
);
1231 static void binder_set_priority(struct task_struct
*task
,
1232 struct binder_priority desired
)
1234 binder_do_set_priority(task
, desired
, /* verify = */ true);
1237 static void binder_restore_priority(struct task_struct
*task
,
1238 struct binder_priority desired
)
1240 binder_do_set_priority(task
, desired
, /* verify = */ false);
1243 static void binder_transaction_priority(struct task_struct
*task
,
1244 struct binder_transaction
*t
,
1245 struct binder_priority node_prio
,
1248 struct binder_priority desired_prio
= t
->priority
;
1250 if (t
->set_priority_called
)
1253 t
->set_priority_called
= true;
1254 t
->saved_priority
.sched_policy
= task
->policy
;
1255 t
->saved_priority
.prio
= task
->normal_prio
;
1257 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1258 desired_prio
.prio
= NICE_TO_PRIO(0);
1259 desired_prio
.sched_policy
= SCHED_NORMAL
;
1262 if (node_prio
.prio
< t
->priority
.prio
||
1263 (node_prio
.prio
== t
->priority
.prio
&&
1264 node_prio
.sched_policy
== SCHED_FIFO
)) {
1266 * In case the minimum priority on the node is
1267 * higher (lower value), use that priority. If
1268 * the priority is the same, but the node uses
1269 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1270 * run unbounded, unlike SCHED_RR.
1272 desired_prio
= node_prio
;
1275 binder_set_priority(task
, desired_prio
);
1278 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1279 binder_uintptr_t ptr
)
1281 struct rb_node
*n
= proc
->nodes
.rb_node
;
1282 struct binder_node
*node
;
1284 assert_spin_locked(&proc
->inner_lock
);
1287 node
= rb_entry(n
, struct binder_node
, rb_node
);
1289 if (ptr
< node
->ptr
)
1291 else if (ptr
> node
->ptr
)
1295 * take an implicit weak reference
1296 * to ensure node stays alive until
1297 * call to binder_put_node()
1299 binder_inc_node_tmpref_ilocked(node
);
1306 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1307 binder_uintptr_t ptr
)
1309 struct binder_node
*node
;
1311 binder_inner_proc_lock(proc
);
1312 node
= binder_get_node_ilocked(proc
, ptr
);
1313 binder_inner_proc_unlock(proc
);
1317 static struct binder_node
*binder_init_node_ilocked(
1318 struct binder_proc
*proc
,
1319 struct binder_node
*new_node
,
1320 struct flat_binder_object
*fp
)
1322 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1323 struct rb_node
*parent
= NULL
;
1324 struct binder_node
*node
;
1325 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1326 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1327 __u32 flags
= fp
? fp
->flags
: 0;
1330 assert_spin_locked(&proc
->inner_lock
);
1335 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1337 if (ptr
< node
->ptr
)
1339 else if (ptr
> node
->ptr
)
1340 p
= &(*p
)->rb_right
;
1343 * A matching node is already in
1344 * the rb tree. Abandon the init
1347 binder_inc_node_tmpref_ilocked(node
);
1352 binder_stats_created(BINDER_STAT_NODE
);
1354 rb_link_node(&node
->rb_node
, parent
, p
);
1355 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1356 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1359 node
->cookie
= cookie
;
1360 node
->work
.type
= BINDER_WORK_NODE
;
1361 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1362 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_SCHED_POLICY_MASK
) >>
1363 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1364 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1365 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1366 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1367 spin_lock_init(&node
->lock
);
1368 INIT_LIST_HEAD(&node
->work
.entry
);
1369 INIT_LIST_HEAD(&node
->async_todo
);
1370 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1371 "%d:%d node %d u%016llx c%016llx created\n",
1372 proc
->pid
, current
->pid
, node
->debug_id
,
1373 (u64
)node
->ptr
, (u64
)node
->cookie
);
1378 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1379 struct flat_binder_object
*fp
)
1381 struct binder_node
*node
;
1382 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1386 binder_inner_proc_lock(proc
);
1387 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1388 binder_inner_proc_unlock(proc
);
1389 if (node
!= new_node
)
1391 * The node was already added by another thread
1398 static void binder_free_node(struct binder_node
*node
)
1401 binder_stats_deleted(BINDER_STAT_NODE
);
1404 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1406 struct list_head
*target_list
)
1408 struct binder_proc
*proc
= node
->proc
;
1410 assert_spin_locked(&node
->lock
);
1412 assert_spin_locked(&proc
->inner_lock
);
1415 if (target_list
== NULL
&&
1416 node
->internal_strong_refs
== 0 &&
1418 node
== node
->proc
->context
->binder_context_mgr_node
&&
1419 node
->has_strong_ref
)) {
1420 pr_err("invalid inc strong node for %d\n",
1424 node
->internal_strong_refs
++;
1426 node
->local_strong_refs
++;
1427 if (!node
->has_strong_ref
&& target_list
) {
1428 binder_dequeue_work_ilocked(&node
->work
);
1430 * Note: this function is the only place where we queue
1431 * directly to a thread->todo without using the
1432 * corresponding binder_enqueue_thread_work() helper
1433 * functions; in this case it's ok to not set the
1434 * process_todo flag, since we know this node work will
1435 * always be followed by other work that starts queue
1436 * processing: in case of synchronous transactions, a
1437 * BR_REPLY or BR_ERROR; in case of oneway
1438 * transactions, a BR_TRANSACTION_COMPLETE.
1440 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1444 node
->local_weak_refs
++;
1445 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1446 if (target_list
== NULL
) {
1447 pr_err("invalid inc weak node for %d\n",
1454 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1460 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1461 struct list_head
*target_list
)
1465 binder_node_inner_lock(node
);
1466 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1467 binder_node_inner_unlock(node
);
1472 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1473 int strong
, int internal
)
1475 struct binder_proc
*proc
= node
->proc
;
1477 assert_spin_locked(&node
->lock
);
1479 assert_spin_locked(&proc
->inner_lock
);
1482 node
->internal_strong_refs
--;
1484 node
->local_strong_refs
--;
1485 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1489 node
->local_weak_refs
--;
1490 if (node
->local_weak_refs
|| node
->tmp_refs
||
1491 !hlist_empty(&node
->refs
))
1495 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1496 if (list_empty(&node
->work
.entry
)) {
1497 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1498 binder_wakeup_proc_ilocked(proc
);
1501 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1502 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1504 binder_dequeue_work_ilocked(&node
->work
);
1505 rb_erase(&node
->rb_node
, &proc
->nodes
);
1506 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1507 "refless node %d deleted\n",
1510 BUG_ON(!list_empty(&node
->work
.entry
));
1511 spin_lock(&binder_dead_nodes_lock
);
1513 * tmp_refs could have changed so
1516 if (node
->tmp_refs
) {
1517 spin_unlock(&binder_dead_nodes_lock
);
1520 hlist_del(&node
->dead_node
);
1521 spin_unlock(&binder_dead_nodes_lock
);
1522 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1523 "dead node %d deleted\n",
1532 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1536 binder_node_inner_lock(node
);
1537 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1538 binder_node_inner_unlock(node
);
1540 binder_free_node(node
);
1543 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1546 * No call to binder_inc_node() is needed since we
1547 * don't need to inform userspace of any changes to
1554 * binder_inc_node_tmpref() - take a temporary reference on node
1555 * @node: node to reference
1557 * Take reference on node to prevent the node from being freed
1558 * while referenced only by a local variable. The inner lock is
1559 * needed to serialize with the node work on the queue (which
1560 * isn't needed after the node is dead). If the node is dead
1561 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1562 * node->tmp_refs against dead-node-only cases where the node
1563 * lock cannot be acquired (eg traversing the dead node list to
1566 static void binder_inc_node_tmpref(struct binder_node
*node
)
1568 binder_node_lock(node
);
1570 binder_inner_proc_lock(node
->proc
);
1572 spin_lock(&binder_dead_nodes_lock
);
1573 binder_inc_node_tmpref_ilocked(node
);
1575 binder_inner_proc_unlock(node
->proc
);
1577 spin_unlock(&binder_dead_nodes_lock
);
1578 binder_node_unlock(node
);
1582 * binder_dec_node_tmpref() - remove a temporary reference on node
1583 * @node: node to reference
1585 * Release temporary reference on node taken via binder_inc_node_tmpref()
1587 static void binder_dec_node_tmpref(struct binder_node
*node
)
1591 binder_node_inner_lock(node
);
1593 spin_lock(&binder_dead_nodes_lock
);
1595 BUG_ON(node
->tmp_refs
< 0);
1597 spin_unlock(&binder_dead_nodes_lock
);
1599 * Call binder_dec_node() to check if all refcounts are 0
1600 * and cleanup is needed. Calling with strong=0 and internal=1
1601 * causes no actual reference to be released in binder_dec_node().
1602 * If that changes, a change is needed here too.
1604 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1605 binder_node_inner_unlock(node
);
1607 binder_free_node(node
);
1610 static void binder_put_node(struct binder_node
*node
)
1612 binder_dec_node_tmpref(node
);
1615 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1616 u32 desc
, bool need_strong_ref
)
1618 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1619 struct binder_ref
*ref
;
1622 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1624 if (desc
< ref
->data
.desc
) {
1626 } else if (desc
> ref
->data
.desc
) {
1628 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1629 binder_user_error("tried to use weak ref as strong ref\n");
1639 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1640 * @proc: binder_proc that owns the ref
1641 * @node: binder_node of target
1642 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1644 * Look up the ref for the given node and return it if it exists
1646 * If it doesn't exist and the caller provides a newly allocated
1647 * ref, initialize the fields of the newly allocated ref and insert
1648 * into the given proc rb_trees and node refs list.
1650 * Return: the ref for node. It is possible that another thread
1651 * allocated/initialized the ref first in which case the
1652 * returned ref would be different than the passed-in
1653 * new_ref. new_ref must be kfree'd by the caller in
1656 static struct binder_ref
*binder_get_ref_for_node_olocked(
1657 struct binder_proc
*proc
,
1658 struct binder_node
*node
,
1659 struct binder_ref
*new_ref
)
1661 struct binder_context
*context
= proc
->context
;
1662 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1663 struct rb_node
*parent
= NULL
;
1664 struct binder_ref
*ref
;
1669 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1671 if (node
< ref
->node
)
1673 else if (node
> ref
->node
)
1674 p
= &(*p
)->rb_right
;
1681 binder_stats_created(BINDER_STAT_REF
);
1682 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1683 new_ref
->proc
= proc
;
1684 new_ref
->node
= node
;
1685 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1686 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1688 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1689 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1690 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1691 if (ref
->data
.desc
> new_ref
->data
.desc
)
1693 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1696 p
= &proc
->refs_by_desc
.rb_node
;
1699 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1701 if (new_ref
->data
.desc
< ref
->data
.desc
)
1703 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1704 p
= &(*p
)->rb_right
;
1708 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1709 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1711 binder_node_lock(node
);
1712 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1714 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1715 "%d new ref %d desc %d for node %d\n",
1716 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1718 binder_node_unlock(node
);
1722 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1724 bool delete_node
= false;
1726 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1727 "%d delete ref %d desc %d for node %d\n",
1728 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1729 ref
->node
->debug_id
);
1731 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1732 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1734 binder_node_inner_lock(ref
->node
);
1735 if (ref
->data
.strong
)
1736 binder_dec_node_nilocked(ref
->node
, 1, 1);
1738 hlist_del(&ref
->node_entry
);
1739 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1740 binder_node_inner_unlock(ref
->node
);
1742 * Clear ref->node unless we want the caller to free the node
1746 * The caller uses ref->node to determine
1747 * whether the node needs to be freed. Clear
1748 * it since the node is still alive.
1754 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1755 "%d delete ref %d desc %d has death notification\n",
1756 ref
->proc
->pid
, ref
->data
.debug_id
,
1758 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1759 binder_stats_deleted(BINDER_STAT_DEATH
);
1761 binder_stats_deleted(BINDER_STAT_REF
);
1765 * binder_inc_ref_olocked() - increment the ref for given handle
1766 * @ref: ref to be incremented
1767 * @strong: if true, strong increment, else weak
1768 * @target_list: list to queue node work on
1770 * Increment the ref. @ref->proc->outer_lock must be held on entry
1772 * Return: 0, if successful, else errno
1774 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1775 struct list_head
*target_list
)
1780 if (ref
->data
.strong
== 0) {
1781 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1787 if (ref
->data
.weak
== 0) {
1788 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1798 * binder_dec_ref() - dec the ref for given handle
1799 * @ref: ref to be decremented
1800 * @strong: if true, strong decrement, else weak
1802 * Decrement the ref.
1804 * Return: true if ref is cleaned up and ready to be freed
1806 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1809 if (ref
->data
.strong
== 0) {
1810 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1811 ref
->proc
->pid
, ref
->data
.debug_id
,
1812 ref
->data
.desc
, ref
->data
.strong
,
1817 if (ref
->data
.strong
== 0)
1818 binder_dec_node(ref
->node
, strong
, 1);
1820 if (ref
->data
.weak
== 0) {
1821 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1822 ref
->proc
->pid
, ref
->data
.debug_id
,
1823 ref
->data
.desc
, ref
->data
.strong
,
1829 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1830 binder_cleanup_ref_olocked(ref
);
1837 * binder_get_node_from_ref() - get the node from the given proc/desc
1838 * @proc: proc containing the ref
1839 * @desc: the handle associated with the ref
1840 * @need_strong_ref: if true, only return node if ref is strong
1841 * @rdata: the id/refcount data for the ref
1843 * Given a proc and ref handle, return the associated binder_node
1845 * Return: a binder_node or NULL if not found or not strong when strong required
1847 static struct binder_node
*binder_get_node_from_ref(
1848 struct binder_proc
*proc
,
1849 u32 desc
, bool need_strong_ref
,
1850 struct binder_ref_data
*rdata
)
1852 struct binder_node
*node
;
1853 struct binder_ref
*ref
;
1855 binder_proc_lock(proc
);
1856 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1861 * Take an implicit reference on the node to ensure
1862 * it stays alive until the call to binder_put_node()
1864 binder_inc_node_tmpref(node
);
1867 binder_proc_unlock(proc
);
1872 binder_proc_unlock(proc
);
1877 * binder_free_ref() - free the binder_ref
1880 * Free the binder_ref. Free the binder_node indicated by ref->node
1881 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1883 static void binder_free_ref(struct binder_ref
*ref
)
1886 binder_free_node(ref
->node
);
1892 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1893 * @proc: proc containing the ref
1894 * @desc: the handle associated with the ref
1895 * @increment: true=inc reference, false=dec reference
1896 * @strong: true=strong reference, false=weak reference
1897 * @rdata: the id/refcount data for the ref
1899 * Given a proc and ref handle, increment or decrement the ref
1900 * according to "increment" arg.
1902 * Return: 0 if successful, else errno
1904 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1905 uint32_t desc
, bool increment
, bool strong
,
1906 struct binder_ref_data
*rdata
)
1909 struct binder_ref
*ref
;
1910 bool delete_ref
= false;
1912 binder_proc_lock(proc
);
1913 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1919 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1921 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1925 binder_proc_unlock(proc
);
1928 binder_free_ref(ref
);
1932 binder_proc_unlock(proc
);
1937 * binder_dec_ref_for_handle() - dec the ref for given handle
1938 * @proc: proc containing the ref
1939 * @desc: the handle associated with the ref
1940 * @strong: true=strong reference, false=weak reference
1941 * @rdata: the id/refcount data for the ref
1943 * Just calls binder_update_ref_for_handle() to decrement the ref.
1945 * Return: 0 if successful, else errno
1947 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1948 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1950 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1955 * binder_inc_ref_for_node() - increment the ref for given proc/node
1956 * @proc: proc containing the ref
1957 * @node: target node
1958 * @strong: true=strong reference, false=weak reference
1959 * @target_list: worklist to use if node is incremented
1960 * @rdata: the id/refcount data for the ref
1962 * Given a proc and node, increment the ref. Create the ref if it
1963 * doesn't already exist
1965 * Return: 0 if successful, else errno
1967 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1968 struct binder_node
*node
,
1970 struct list_head
*target_list
,
1971 struct binder_ref_data
*rdata
)
1973 struct binder_ref
*ref
;
1974 struct binder_ref
*new_ref
= NULL
;
1977 binder_proc_lock(proc
);
1978 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1980 binder_proc_unlock(proc
);
1981 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1984 binder_proc_lock(proc
);
1985 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1987 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1989 binder_proc_unlock(proc
);
1990 if (new_ref
&& ref
!= new_ref
)
1992 * Another thread created the ref first so
1993 * free the one we allocated
1999 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
2000 struct binder_transaction
*t
)
2002 BUG_ON(!target_thread
);
2003 assert_spin_locked(&target_thread
->proc
->inner_lock
);
2004 BUG_ON(target_thread
->transaction_stack
!= t
);
2005 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
2006 target_thread
->transaction_stack
=
2007 target_thread
->transaction_stack
->from_parent
;
2012 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2013 * @thread: thread to decrement
2015 * A thread needs to be kept alive while being used to create or
2016 * handle a transaction. binder_get_txn_from() is used to safely
2017 * extract t->from from a binder_transaction and keep the thread
2018 * indicated by t->from from being freed. When done with that
2019 * binder_thread, this function is called to decrement the
2020 * tmp_ref and free if appropriate (thread has been released
2021 * and no transaction being processed by the driver)
2023 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
2026 * atomic is used to protect the counter value while
2027 * it cannot reach zero or thread->is_dead is false
2029 binder_inner_proc_lock(thread
->proc
);
2030 atomic_dec(&thread
->tmp_ref
);
2031 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
2032 binder_inner_proc_unlock(thread
->proc
);
2033 binder_free_thread(thread
);
2036 binder_inner_proc_unlock(thread
->proc
);
2040 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2041 * @proc: proc to decrement
2043 * A binder_proc needs to be kept alive while being used to create or
2044 * handle a transaction. proc->tmp_ref is incremented when
2045 * creating a new transaction or the binder_proc is currently in-use
2046 * by threads that are being released. When done with the binder_proc,
2047 * this function is called to decrement the counter and free the
2048 * proc if appropriate (proc has been released, all threads have
2049 * been released and not currenly in-use to process a transaction).
2051 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
2053 binder_inner_proc_lock(proc
);
2055 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
2057 binder_inner_proc_unlock(proc
);
2058 binder_free_proc(proc
);
2061 binder_inner_proc_unlock(proc
);
2065 * binder_get_txn_from() - safely extract the "from" thread in transaction
2066 * @t: binder transaction for t->from
2068 * Atomically return the "from" thread and increment the tmp_ref
2069 * count for the thread to ensure it stays alive until
2070 * binder_thread_dec_tmpref() is called.
2072 * Return: the value of t->from
2074 static struct binder_thread
*binder_get_txn_from(
2075 struct binder_transaction
*t
)
2077 struct binder_thread
*from
;
2079 spin_lock(&t
->lock
);
2082 atomic_inc(&from
->tmp_ref
);
2083 spin_unlock(&t
->lock
);
2088 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2089 * @t: binder transaction for t->from
2091 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2092 * to guarantee that the thread cannot be released while operating on it.
2093 * The caller must call binder_inner_proc_unlock() to release the inner lock
2094 * as well as call binder_dec_thread_txn() to release the reference.
2096 * Return: the value of t->from
2098 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2099 struct binder_transaction
*t
)
2101 struct binder_thread
*from
;
2103 from
= binder_get_txn_from(t
);
2106 binder_inner_proc_lock(from
->proc
);
2108 BUG_ON(from
!= t
->from
);
2111 binder_inner_proc_unlock(from
->proc
);
2112 binder_thread_dec_tmpref(from
);
2116 static void binder_free_transaction(struct binder_transaction
*t
)
2119 t
->buffer
->transaction
= NULL
;
2121 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2124 static void binder_send_failed_reply(struct binder_transaction
*t
,
2125 uint32_t error_code
)
2127 struct binder_thread
*target_thread
;
2128 struct binder_transaction
*next
;
2130 BUG_ON(t
->flags
& TF_ONE_WAY
);
2132 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2133 if (target_thread
) {
2134 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2135 "send failed reply for transaction %d to %d:%d\n",
2137 target_thread
->proc
->pid
,
2138 target_thread
->pid
);
2140 binder_pop_transaction_ilocked(target_thread
, t
);
2141 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2142 target_thread
->reply_error
.cmd
= error_code
;
2143 binder_enqueue_thread_work_ilocked(
2145 &target_thread
->reply_error
.work
);
2146 wake_up_interruptible(&target_thread
->wait
);
2149 * Cannot get here for normal operation, but
2150 * we can if multiple synchronous transactions
2151 * are sent without blocking for responses.
2152 * Just ignore the 2nd error in this case.
2154 pr_warn("Unexpected reply error: %u\n",
2155 target_thread
->reply_error
.cmd
);
2157 binder_inner_proc_unlock(target_thread
->proc
);
2158 binder_thread_dec_tmpref(target_thread
);
2159 binder_free_transaction(t
);
2162 next
= t
->from_parent
;
2164 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2165 "send failed reply for transaction %d, target dead\n",
2168 binder_free_transaction(t
);
2170 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2171 "reply failed, no target thread at root\n");
2175 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2176 "reply failed, no target thread -- retry %d\n",
2182 * binder_cleanup_transaction() - cleans up undelivered transaction
2183 * @t: transaction that needs to be cleaned up
2184 * @reason: reason the transaction wasn't delivered
2185 * @error_code: error to return to caller (if synchronous call)
2187 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2189 uint32_t error_code
)
2191 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2192 binder_send_failed_reply(t
, error_code
);
2194 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2195 "undelivered transaction %d, %s\n",
2196 t
->debug_id
, reason
);
2197 binder_free_transaction(t
);
2202 * binder_validate_object() - checks for a valid metadata object in a buffer.
2203 * @buffer: binder_buffer that we're parsing.
2204 * @offset: offset in the buffer at which to validate an object.
2206 * Return: If there's a valid metadata object at @offset in @buffer, the
2207 * size of that object. Otherwise, it returns zero.
2209 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2211 /* Check if we can read a header first */
2212 struct binder_object_header
*hdr
;
2213 size_t object_size
= 0;
2215 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
2216 buffer
->data_size
< sizeof(*hdr
) ||
2217 !IS_ALIGNED(offset
, sizeof(u32
)))
2220 /* Ok, now see if we can read a complete object. */
2221 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2222 switch (hdr
->type
) {
2223 case BINDER_TYPE_BINDER
:
2224 case BINDER_TYPE_WEAK_BINDER
:
2225 case BINDER_TYPE_HANDLE
:
2226 case BINDER_TYPE_WEAK_HANDLE
:
2227 object_size
= sizeof(struct flat_binder_object
);
2229 case BINDER_TYPE_FD
:
2230 object_size
= sizeof(struct binder_fd_object
);
2232 case BINDER_TYPE_PTR
:
2233 object_size
= sizeof(struct binder_buffer_object
);
2235 case BINDER_TYPE_FDA
:
2236 object_size
= sizeof(struct binder_fd_array_object
);
2241 if (offset
<= buffer
->data_size
- object_size
&&
2242 buffer
->data_size
>= object_size
)
2249 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2250 * @b: binder_buffer containing the object
2251 * @index: index in offset array at which the binder_buffer_object is
2253 * @start: points to the start of the offset array
2254 * @num_valid: the number of valid offsets in the offset array
2256 * Return: If @index is within the valid range of the offset array
2257 * described by @start and @num_valid, and if there's a valid
2258 * binder_buffer_object at the offset found in index @index
2259 * of the offset array, that object is returned. Otherwise,
2260 * %NULL is returned.
2261 * Note that the offset found in index @index itself is not
2262 * verified; this function assumes that @num_valid elements
2263 * from @start were previously verified to have valid offsets.
2265 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2266 binder_size_t index
,
2267 binder_size_t
*start
,
2268 binder_size_t num_valid
)
2270 struct binder_buffer_object
*buffer_obj
;
2271 binder_size_t
*offp
;
2273 if (index
>= num_valid
)
2276 offp
= start
+ index
;
2277 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2278 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2285 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2286 * @b: transaction buffer
2287 * @objects_start start of objects buffer
2288 * @buffer: binder_buffer_object in which to fix up
2289 * @offset: start offset in @buffer to fix up
2290 * @last_obj: last binder_buffer_object that we fixed up in
2291 * @last_min_offset: minimum fixup offset in @last_obj
2293 * Return: %true if a fixup in buffer @buffer at offset @offset is
2296 * For safety reasons, we only allow fixups inside a buffer to happen
2297 * at increasing offsets; additionally, we only allow fixup on the last
2298 * buffer object that was verified, or one of its parents.
2300 * Example of what is allowed:
2303 * B (parent = A, offset = 0)
2304 * C (parent = A, offset = 16)
2305 * D (parent = C, offset = 0)
2306 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2308 * Examples of what is not allowed:
2310 * Decreasing offsets within the same parent:
2312 * C (parent = A, offset = 16)
2313 * B (parent = A, offset = 0) // decreasing offset within A
2315 * Referring to a parent that wasn't the last object or any of its parents:
2317 * B (parent = A, offset = 0)
2318 * C (parent = A, offset = 0)
2319 * C (parent = A, offset = 16)
2320 * D (parent = B, offset = 0) // B is not A or any of A's parents
2322 static bool binder_validate_fixup(struct binder_buffer
*b
,
2323 binder_size_t
*objects_start
,
2324 struct binder_buffer_object
*buffer
,
2325 binder_size_t fixup_offset
,
2326 struct binder_buffer_object
*last_obj
,
2327 binder_size_t last_min_offset
)
2330 /* Nothing to fix up in */
2334 while (last_obj
!= buffer
) {
2336 * Safe to retrieve the parent of last_obj, since it
2337 * was already previously verified by the driver.
2339 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2341 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2342 last_obj
= (struct binder_buffer_object
*)
2343 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2345 return (fixup_offset
>= last_min_offset
);
2348 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2349 struct binder_buffer
*buffer
,
2350 binder_size_t
*failed_at
)
2352 binder_size_t
*offp
, *off_start
, *off_end
;
2353 int debug_id
= buffer
->debug_id
;
2355 binder_debug(BINDER_DEBUG_TRANSACTION
,
2356 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2357 proc
->pid
, buffer
->debug_id
,
2358 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2360 if (buffer
->target_node
)
2361 binder_dec_node(buffer
->target_node
, 1, 0);
2363 off_start
= (binder_size_t
*)(buffer
->data
+
2364 ALIGN(buffer
->data_size
, sizeof(void *)));
2366 off_end
= failed_at
;
2368 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2369 for (offp
= off_start
; offp
< off_end
; offp
++) {
2370 struct binder_object_header
*hdr
;
2371 size_t object_size
= binder_validate_object(buffer
, *offp
);
2373 if (object_size
== 0) {
2374 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2375 debug_id
, (u64
)*offp
, buffer
->data_size
);
2378 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2379 switch (hdr
->type
) {
2380 case BINDER_TYPE_BINDER
:
2381 case BINDER_TYPE_WEAK_BINDER
: {
2382 struct flat_binder_object
*fp
;
2383 struct binder_node
*node
;
2385 fp
= to_flat_binder_object(hdr
);
2386 node
= binder_get_node(proc
, fp
->binder
);
2388 pr_err("transaction release %d bad node %016llx\n",
2389 debug_id
, (u64
)fp
->binder
);
2392 binder_debug(BINDER_DEBUG_TRANSACTION
,
2393 " node %d u%016llx\n",
2394 node
->debug_id
, (u64
)node
->ptr
);
2395 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2397 binder_put_node(node
);
2399 case BINDER_TYPE_HANDLE
:
2400 case BINDER_TYPE_WEAK_HANDLE
: {
2401 struct flat_binder_object
*fp
;
2402 struct binder_ref_data rdata
;
2405 fp
= to_flat_binder_object(hdr
);
2406 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2407 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2410 pr_err("transaction release %d bad handle %d, ret = %d\n",
2411 debug_id
, fp
->handle
, ret
);
2414 binder_debug(BINDER_DEBUG_TRANSACTION
,
2415 " ref %d desc %d\n",
2416 rdata
.debug_id
, rdata
.desc
);
2419 case BINDER_TYPE_FD
: {
2420 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2422 binder_debug(BINDER_DEBUG_TRANSACTION
,
2423 " fd %d\n", fp
->fd
);
2425 task_close_fd(proc
, fp
->fd
);
2427 case BINDER_TYPE_PTR
:
2429 * Nothing to do here, this will get cleaned up when the
2430 * transaction buffer gets freed
2433 case BINDER_TYPE_FDA
: {
2434 struct binder_fd_array_object
*fda
;
2435 struct binder_buffer_object
*parent
;
2436 uintptr_t parent_buffer
;
2439 binder_size_t fd_buf_size
;
2441 fda
= to_binder_fd_array_object(hdr
);
2442 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2446 pr_err("transaction release %d bad parent offset",
2451 * Since the parent was already fixed up, convert it
2452 * back to kernel address space to access it
2454 parent_buffer
= parent
->buffer
-
2455 binder_alloc_get_user_buffer_offset(
2458 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2459 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2460 pr_err("transaction release %d invalid number of fds (%lld)\n",
2461 debug_id
, (u64
)fda
->num_fds
);
2464 if (fd_buf_size
> parent
->length
||
2465 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2466 /* No space for all file descriptors here. */
2467 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2468 debug_id
, (u64
)fda
->num_fds
);
2471 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2472 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2473 task_close_fd(proc
, fd_array
[fd_index
]);
2476 pr_err("transaction release %d bad object type %x\n",
2477 debug_id
, hdr
->type
);
2483 static int binder_translate_binder(struct flat_binder_object
*fp
,
2484 struct binder_transaction
*t
,
2485 struct binder_thread
*thread
)
2487 struct binder_node
*node
;
2488 struct binder_proc
*proc
= thread
->proc
;
2489 struct binder_proc
*target_proc
= t
->to_proc
;
2490 struct binder_ref_data rdata
;
2493 node
= binder_get_node(proc
, fp
->binder
);
2495 node
= binder_new_node(proc
, fp
);
2499 if (fp
->cookie
!= node
->cookie
) {
2500 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2501 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2502 node
->debug_id
, (u64
)fp
->cookie
,
2507 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2512 ret
= binder_inc_ref_for_node(target_proc
, node
,
2513 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2514 &thread
->todo
, &rdata
);
2518 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2519 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2521 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2523 fp
->handle
= rdata
.desc
;
2526 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2527 binder_debug(BINDER_DEBUG_TRANSACTION
,
2528 " node %d u%016llx -> ref %d desc %d\n",
2529 node
->debug_id
, (u64
)node
->ptr
,
2530 rdata
.debug_id
, rdata
.desc
);
2532 binder_put_node(node
);
2536 static int binder_translate_handle(struct flat_binder_object
*fp
,
2537 struct binder_transaction
*t
,
2538 struct binder_thread
*thread
)
2540 struct binder_proc
*proc
= thread
->proc
;
2541 struct binder_proc
*target_proc
= t
->to_proc
;
2542 struct binder_node
*node
;
2543 struct binder_ref_data src_rdata
;
2546 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2547 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2549 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2550 proc
->pid
, thread
->pid
, fp
->handle
);
2553 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2558 binder_node_lock(node
);
2559 if (node
->proc
== target_proc
) {
2560 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2561 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2563 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2564 fp
->binder
= node
->ptr
;
2565 fp
->cookie
= node
->cookie
;
2567 binder_inner_proc_lock(node
->proc
);
2568 binder_inc_node_nilocked(node
,
2569 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2572 binder_inner_proc_unlock(node
->proc
);
2573 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2574 binder_debug(BINDER_DEBUG_TRANSACTION
,
2575 " ref %d desc %d -> node %d u%016llx\n",
2576 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2578 binder_node_unlock(node
);
2580 struct binder_ref_data dest_rdata
;
2582 binder_node_unlock(node
);
2583 ret
= binder_inc_ref_for_node(target_proc
, node
,
2584 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2590 fp
->handle
= dest_rdata
.desc
;
2592 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2594 binder_debug(BINDER_DEBUG_TRANSACTION
,
2595 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2596 src_rdata
.debug_id
, src_rdata
.desc
,
2597 dest_rdata
.debug_id
, dest_rdata
.desc
,
2601 binder_put_node(node
);
2605 static int binder_translate_fd(int fd
,
2606 struct binder_transaction
*t
,
2607 struct binder_thread
*thread
,
2608 struct binder_transaction
*in_reply_to
)
2610 struct binder_proc
*proc
= thread
->proc
;
2611 struct binder_proc
*target_proc
= t
->to_proc
;
2615 bool target_allows_fd
;
2618 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2620 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2621 if (!target_allows_fd
) {
2622 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2623 proc
->pid
, thread
->pid
,
2624 in_reply_to
? "reply" : "transaction",
2627 goto err_fd_not_accepted
;
2632 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2633 proc
->pid
, thread
->pid
, fd
);
2637 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2643 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2644 if (target_fd
< 0) {
2646 goto err_get_unused_fd
;
2648 task_fd_install(target_proc
, target_fd
, file
);
2649 trace_binder_transaction_fd(t
, fd
, target_fd
);
2650 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2659 err_fd_not_accepted
:
2663 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2664 struct binder_buffer_object
*parent
,
2665 struct binder_transaction
*t
,
2666 struct binder_thread
*thread
,
2667 struct binder_transaction
*in_reply_to
)
2669 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2671 uintptr_t parent_buffer
;
2673 struct binder_proc
*proc
= thread
->proc
;
2674 struct binder_proc
*target_proc
= t
->to_proc
;
2676 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2677 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2678 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2679 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2682 if (fd_buf_size
> parent
->length
||
2683 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2684 /* No space for all file descriptors here. */
2685 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2686 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2690 * Since the parent was already fixed up, convert it
2691 * back to the kernel address space to access it
2693 parent_buffer
= parent
->buffer
-
2694 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2695 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2696 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2697 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2698 proc
->pid
, thread
->pid
);
2701 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2702 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2705 goto err_translate_fd_failed
;
2706 fd_array
[fdi
] = target_fd
;
2710 err_translate_fd_failed
:
2712 * Failed to allocate fd or security error, free fds
2715 num_installed_fds
= fdi
;
2716 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2717 task_close_fd(target_proc
, fd_array
[fdi
]);
2721 static int binder_fixup_parent(struct binder_transaction
*t
,
2722 struct binder_thread
*thread
,
2723 struct binder_buffer_object
*bp
,
2724 binder_size_t
*off_start
,
2725 binder_size_t num_valid
,
2726 struct binder_buffer_object
*last_fixup_obj
,
2727 binder_size_t last_fixup_min_off
)
2729 struct binder_buffer_object
*parent
;
2731 struct binder_buffer
*b
= t
->buffer
;
2732 struct binder_proc
*proc
= thread
->proc
;
2733 struct binder_proc
*target_proc
= t
->to_proc
;
2735 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2738 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2740 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2741 proc
->pid
, thread
->pid
);
2745 if (!binder_validate_fixup(b
, off_start
,
2746 parent
, bp
->parent_offset
,
2748 last_fixup_min_off
)) {
2749 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2750 proc
->pid
, thread
->pid
);
2754 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2755 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2756 /* No space for a pointer here! */
2757 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2758 proc
->pid
, thread
->pid
);
2761 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2762 binder_alloc_get_user_buffer_offset(
2763 &target_proc
->alloc
));
2764 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2770 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2771 * @t: transaction to send
2772 * @proc: process to send the transaction to
2773 * @thread: thread in @proc to send the transaction to (may be NULL)
2775 * This function queues a transaction to the specified process. It will try
2776 * to find a thread in the target process to handle the transaction and
2777 * wake it up. If no thread is found, the work is queued to the proc
2780 * If the @thread parameter is not NULL, the transaction is always queued
2781 * to the waitlist of that specific thread.
2783 * Return: true if the transactions was successfully queued
2784 * false if the target process or thread is dead
2786 static bool binder_proc_transaction(struct binder_transaction
*t
,
2787 struct binder_proc
*proc
,
2788 struct binder_thread
*thread
)
2790 struct binder_node
*node
= t
->buffer
->target_node
;
2791 struct binder_priority node_prio
;
2792 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2793 bool pending_async
= false;
2796 binder_node_lock(node
);
2797 node_prio
.prio
= node
->min_priority
;
2798 node_prio
.sched_policy
= node
->sched_policy
;
2802 if (node
->has_async_transaction
) {
2803 pending_async
= true;
2805 node
->has_async_transaction
= 1;
2809 binder_inner_proc_lock(proc
);
2811 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2812 binder_inner_proc_unlock(proc
);
2813 binder_node_unlock(node
);
2817 if (!thread
&& !pending_async
)
2818 thread
= binder_select_thread_ilocked(proc
);
2821 binder_transaction_priority(thread
->task
, t
, node_prio
,
2823 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2824 } else if (!pending_async
) {
2825 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2827 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2831 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2833 binder_inner_proc_unlock(proc
);
2834 binder_node_unlock(node
);
2840 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2841 * @node: struct binder_node for which to get refs
2842 * @proc: returns @node->proc if valid
2843 * @error: if no @proc then returns BR_DEAD_REPLY
2845 * User-space normally keeps the node alive when creating a transaction
2846 * since it has a reference to the target. The local strong ref keeps it
2847 * alive if the sending process dies before the target process processes
2848 * the transaction. If the source process is malicious or has a reference
2849 * counting bug, relying on the local strong ref can fail.
2851 * Since user-space can cause the local strong ref to go away, we also take
2852 * a tmpref on the node to ensure it survives while we are constructing
2853 * the transaction. We also need a tmpref on the proc while we are
2854 * constructing the transaction, so we take that here as well.
2856 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2857 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2858 * target proc has died, @error is set to BR_DEAD_REPLY
2860 static struct binder_node
*binder_get_node_refs_for_txn(
2861 struct binder_node
*node
,
2862 struct binder_proc
**procp
,
2865 struct binder_node
*target_node
= NULL
;
2867 binder_node_inner_lock(node
);
2870 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2871 binder_inc_node_tmpref_ilocked(node
);
2872 node
->proc
->tmp_ref
++;
2873 *procp
= node
->proc
;
2875 *error
= BR_DEAD_REPLY
;
2876 binder_node_inner_unlock(node
);
2881 static void binder_transaction(struct binder_proc
*proc
,
2882 struct binder_thread
*thread
,
2883 struct binder_transaction_data
*tr
, int reply
,
2884 binder_size_t extra_buffers_size
)
2887 struct binder_transaction
*t
;
2888 struct binder_work
*tcomplete
;
2889 binder_size_t
*offp
, *off_end
, *off_start
;
2890 binder_size_t off_min
;
2891 u8
*sg_bufp
, *sg_buf_end
;
2892 struct binder_proc
*target_proc
= NULL
;
2893 struct binder_thread
*target_thread
= NULL
;
2894 struct binder_node
*target_node
= NULL
;
2895 struct binder_transaction
*in_reply_to
= NULL
;
2896 struct binder_transaction_log_entry
*e
;
2897 uint32_t return_error
= 0;
2898 uint32_t return_error_param
= 0;
2899 uint32_t return_error_line
= 0;
2900 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2901 binder_size_t last_fixup_min_off
= 0;
2902 struct binder_context
*context
= proc
->context
;
2903 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2905 e
= binder_transaction_log_add(&binder_transaction_log
);
2906 e
->debug_id
= t_debug_id
;
2907 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2908 e
->from_proc
= proc
->pid
;
2909 e
->from_thread
= thread
->pid
;
2910 e
->target_handle
= tr
->target
.handle
;
2911 e
->data_size
= tr
->data_size
;
2912 e
->offsets_size
= tr
->offsets_size
;
2913 e
->context_name
= proc
->context
->name
;
2916 binder_inner_proc_lock(proc
);
2917 in_reply_to
= thread
->transaction_stack
;
2918 if (in_reply_to
== NULL
) {
2919 binder_inner_proc_unlock(proc
);
2920 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2921 proc
->pid
, thread
->pid
);
2922 return_error
= BR_FAILED_REPLY
;
2923 return_error_param
= -EPROTO
;
2924 return_error_line
= __LINE__
;
2925 goto err_empty_call_stack
;
2927 if (in_reply_to
->to_thread
!= thread
) {
2928 spin_lock(&in_reply_to
->lock
);
2929 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2930 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2931 in_reply_to
->to_proc
?
2932 in_reply_to
->to_proc
->pid
: 0,
2933 in_reply_to
->to_thread
?
2934 in_reply_to
->to_thread
->pid
: 0);
2935 spin_unlock(&in_reply_to
->lock
);
2936 binder_inner_proc_unlock(proc
);
2937 return_error
= BR_FAILED_REPLY
;
2938 return_error_param
= -EPROTO
;
2939 return_error_line
= __LINE__
;
2941 goto err_bad_call_stack
;
2943 thread
->transaction_stack
= in_reply_to
->to_parent
;
2944 binder_inner_proc_unlock(proc
);
2945 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2946 if (target_thread
== NULL
) {
2947 return_error
= BR_DEAD_REPLY
;
2948 return_error_line
= __LINE__
;
2949 goto err_dead_binder
;
2951 if (target_thread
->transaction_stack
!= in_reply_to
) {
2952 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2953 proc
->pid
, thread
->pid
,
2954 target_thread
->transaction_stack
?
2955 target_thread
->transaction_stack
->debug_id
: 0,
2956 in_reply_to
->debug_id
);
2957 binder_inner_proc_unlock(target_thread
->proc
);
2958 return_error
= BR_FAILED_REPLY
;
2959 return_error_param
= -EPROTO
;
2960 return_error_line
= __LINE__
;
2962 target_thread
= NULL
;
2963 goto err_dead_binder
;
2965 target_proc
= target_thread
->proc
;
2966 target_proc
->tmp_ref
++;
2967 binder_inner_proc_unlock(target_thread
->proc
);
2969 if (tr
->target
.handle
) {
2970 struct binder_ref
*ref
;
2973 * There must already be a strong ref
2974 * on this node. If so, do a strong
2975 * increment on the node to ensure it
2976 * stays alive until the transaction is
2979 binder_proc_lock(proc
);
2980 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2983 target_node
= binder_get_node_refs_for_txn(
2984 ref
->node
, &target_proc
,
2987 binder_user_error("%d:%d got transaction to invalid handle\n",
2988 proc
->pid
, thread
->pid
);
2989 return_error
= BR_FAILED_REPLY
;
2991 binder_proc_unlock(proc
);
2993 mutex_lock(&context
->context_mgr_node_lock
);
2994 target_node
= context
->binder_context_mgr_node
;
2996 target_node
= binder_get_node_refs_for_txn(
2997 target_node
, &target_proc
,
3000 return_error
= BR_DEAD_REPLY
;
3001 mutex_unlock(&context
->context_mgr_node_lock
);
3002 if (target_node
&& target_proc
== proc
) {
3003 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3004 proc
->pid
, thread
->pid
);
3005 return_error
= BR_FAILED_REPLY
;
3006 return_error_param
= -EINVAL
;
3007 return_error_line
= __LINE__
;
3008 goto err_invalid_target_handle
;
3013 * return_error is set above
3015 return_error_param
= -EINVAL
;
3016 return_error_line
= __LINE__
;
3017 goto err_dead_binder
;
3019 e
->to_node
= target_node
->debug_id
;
3020 if (security_binder_transaction(proc
->tsk
,
3021 target_proc
->tsk
) < 0) {
3022 return_error
= BR_FAILED_REPLY
;
3023 return_error_param
= -EPERM
;
3024 return_error_line
= __LINE__
;
3025 goto err_invalid_target_handle
;
3027 binder_inner_proc_lock(proc
);
3028 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3029 struct binder_transaction
*tmp
;
3031 tmp
= thread
->transaction_stack
;
3032 if (tmp
->to_thread
!= thread
) {
3033 spin_lock(&tmp
->lock
);
3034 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3035 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3036 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3038 tmp
->to_thread
->pid
: 0);
3039 spin_unlock(&tmp
->lock
);
3040 binder_inner_proc_unlock(proc
);
3041 return_error
= BR_FAILED_REPLY
;
3042 return_error_param
= -EPROTO
;
3043 return_error_line
= __LINE__
;
3044 goto err_bad_call_stack
;
3047 struct binder_thread
*from
;
3049 spin_lock(&tmp
->lock
);
3051 if (from
&& from
->proc
== target_proc
) {
3052 atomic_inc(&from
->tmp_ref
);
3053 target_thread
= from
;
3054 spin_unlock(&tmp
->lock
);
3057 spin_unlock(&tmp
->lock
);
3058 tmp
= tmp
->from_parent
;
3061 binder_inner_proc_unlock(proc
);
3064 e
->to_thread
= target_thread
->pid
;
3065 e
->to_proc
= target_proc
->pid
;
3067 /* TODO: reuse incoming transaction for reply */
3068 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3070 return_error
= BR_FAILED_REPLY
;
3071 return_error_param
= -ENOMEM
;
3072 return_error_line
= __LINE__
;
3073 goto err_alloc_t_failed
;
3075 binder_stats_created(BINDER_STAT_TRANSACTION
);
3076 spin_lock_init(&t
->lock
);
3078 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3079 if (tcomplete
== NULL
) {
3080 return_error
= BR_FAILED_REPLY
;
3081 return_error_param
= -ENOMEM
;
3082 return_error_line
= __LINE__
;
3083 goto err_alloc_tcomplete_failed
;
3085 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3087 t
->debug_id
= t_debug_id
;
3090 binder_debug(BINDER_DEBUG_TRANSACTION
,
3091 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3092 proc
->pid
, thread
->pid
, t
->debug_id
,
3093 target_proc
->pid
, target_thread
->pid
,
3094 (u64
)tr
->data
.ptr
.buffer
,
3095 (u64
)tr
->data
.ptr
.offsets
,
3096 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3097 (u64
)extra_buffers_size
);
3099 binder_debug(BINDER_DEBUG_TRANSACTION
,
3100 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3101 proc
->pid
, thread
->pid
, t
->debug_id
,
3102 target_proc
->pid
, target_node
->debug_id
,
3103 (u64
)tr
->data
.ptr
.buffer
,
3104 (u64
)tr
->data
.ptr
.offsets
,
3105 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3106 (u64
)extra_buffers_size
);
3108 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3112 t
->sender_euid
= task_euid(proc
->tsk
);
3113 t
->to_proc
= target_proc
;
3114 t
->to_thread
= target_thread
;
3116 t
->flags
= tr
->flags
;
3117 if (!(t
->flags
& TF_ONE_WAY
) &&
3118 binder_supported_policy(current
->policy
)) {
3119 /* Inherit supported policies for synchronous transactions */
3120 t
->priority
.sched_policy
= current
->policy
;
3121 t
->priority
.prio
= current
->normal_prio
;
3123 /* Otherwise, fall back to the default priority */
3124 t
->priority
= target_proc
->default_priority
;
3127 trace_binder_transaction(reply
, t
, target_node
);
3129 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3130 tr
->offsets_size
, extra_buffers_size
,
3131 !reply
&& (t
->flags
& TF_ONE_WAY
));
3132 if (IS_ERR(t
->buffer
)) {
3134 * -ESRCH indicates VMA cleared. The target is dying.
3136 return_error_param
= PTR_ERR(t
->buffer
);
3137 return_error
= return_error_param
== -ESRCH
?
3138 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3139 return_error_line
= __LINE__
;
3141 goto err_binder_alloc_buf_failed
;
3143 t
->buffer
->allow_user_free
= 0;
3144 t
->buffer
->debug_id
= t
->debug_id
;
3145 t
->buffer
->transaction
= t
;
3146 t
->buffer
->target_node
= target_node
;
3147 trace_binder_transaction_alloc_buf(t
->buffer
);
3148 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3149 ALIGN(tr
->data_size
, sizeof(void *)));
3152 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3153 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3154 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3155 proc
->pid
, thread
->pid
);
3156 return_error
= BR_FAILED_REPLY
;
3157 return_error_param
= -EFAULT
;
3158 return_error_line
= __LINE__
;
3159 goto err_copy_data_failed
;
3161 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3162 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3163 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3164 proc
->pid
, thread
->pid
);
3165 return_error
= BR_FAILED_REPLY
;
3166 return_error_param
= -EFAULT
;
3167 return_error_line
= __LINE__
;
3168 goto err_copy_data_failed
;
3170 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3171 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3172 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3173 return_error
= BR_FAILED_REPLY
;
3174 return_error_param
= -EINVAL
;
3175 return_error_line
= __LINE__
;
3176 goto err_bad_offset
;
3178 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3179 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3180 proc
->pid
, thread
->pid
,
3181 (u64
)extra_buffers_size
);
3182 return_error
= BR_FAILED_REPLY
;
3183 return_error_param
= -EINVAL
;
3184 return_error_line
= __LINE__
;
3185 goto err_bad_offset
;
3187 off_end
= (void *)off_start
+ tr
->offsets_size
;
3188 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3189 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3191 for (; offp
< off_end
; offp
++) {
3192 struct binder_object_header
*hdr
;
3193 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3195 if (object_size
== 0 || *offp
< off_min
) {
3196 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3197 proc
->pid
, thread
->pid
, (u64
)*offp
,
3199 (u64
)t
->buffer
->data_size
);
3200 return_error
= BR_FAILED_REPLY
;
3201 return_error_param
= -EINVAL
;
3202 return_error_line
= __LINE__
;
3203 goto err_bad_offset
;
3206 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3207 off_min
= *offp
+ object_size
;
3208 switch (hdr
->type
) {
3209 case BINDER_TYPE_BINDER
:
3210 case BINDER_TYPE_WEAK_BINDER
: {
3211 struct flat_binder_object
*fp
;
3213 fp
= to_flat_binder_object(hdr
);
3214 ret
= binder_translate_binder(fp
, t
, thread
);
3216 return_error
= BR_FAILED_REPLY
;
3217 return_error_param
= ret
;
3218 return_error_line
= __LINE__
;
3219 goto err_translate_failed
;
3222 case BINDER_TYPE_HANDLE
:
3223 case BINDER_TYPE_WEAK_HANDLE
: {
3224 struct flat_binder_object
*fp
;
3226 fp
= to_flat_binder_object(hdr
);
3227 ret
= binder_translate_handle(fp
, t
, thread
);
3229 return_error
= BR_FAILED_REPLY
;
3230 return_error_param
= ret
;
3231 return_error_line
= __LINE__
;
3232 goto err_translate_failed
;
3236 case BINDER_TYPE_FD
: {
3237 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3238 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3241 if (target_fd
< 0) {
3242 return_error
= BR_FAILED_REPLY
;
3243 return_error_param
= target_fd
;
3244 return_error_line
= __LINE__
;
3245 goto err_translate_failed
;
3250 case BINDER_TYPE_FDA
: {
3251 struct binder_fd_array_object
*fda
=
3252 to_binder_fd_array_object(hdr
);
3253 struct binder_buffer_object
*parent
=
3254 binder_validate_ptr(t
->buffer
, fda
->parent
,
3258 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3259 proc
->pid
, thread
->pid
);
3260 return_error
= BR_FAILED_REPLY
;
3261 return_error_param
= -EINVAL
;
3262 return_error_line
= __LINE__
;
3263 goto err_bad_parent
;
3265 if (!binder_validate_fixup(t
->buffer
, off_start
,
3266 parent
, fda
->parent_offset
,
3268 last_fixup_min_off
)) {
3269 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3270 proc
->pid
, thread
->pid
);
3271 return_error
= BR_FAILED_REPLY
;
3272 return_error_param
= -EINVAL
;
3273 return_error_line
= __LINE__
;
3274 goto err_bad_parent
;
3276 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3279 return_error
= BR_FAILED_REPLY
;
3280 return_error_param
= ret
;
3281 return_error_line
= __LINE__
;
3282 goto err_translate_failed
;
3284 last_fixup_obj
= parent
;
3285 last_fixup_min_off
=
3286 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3288 case BINDER_TYPE_PTR
: {
3289 struct binder_buffer_object
*bp
=
3290 to_binder_buffer_object(hdr
);
3291 size_t buf_left
= sg_buf_end
- sg_bufp
;
3293 if (bp
->length
> buf_left
) {
3294 binder_user_error("%d:%d got transaction with too large buffer\n",
3295 proc
->pid
, thread
->pid
);
3296 return_error
= BR_FAILED_REPLY
;
3297 return_error_param
= -EINVAL
;
3298 return_error_line
= __LINE__
;
3299 goto err_bad_offset
;
3301 if (copy_from_user(sg_bufp
,
3302 (const void __user
*)(uintptr_t)
3303 bp
->buffer
, bp
->length
)) {
3304 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3305 proc
->pid
, thread
->pid
);
3306 return_error_param
= -EFAULT
;
3307 return_error
= BR_FAILED_REPLY
;
3308 return_error_line
= __LINE__
;
3309 goto err_copy_data_failed
;
3311 /* Fixup buffer pointer to target proc address space */
3312 bp
->buffer
= (uintptr_t)sg_bufp
+
3313 binder_alloc_get_user_buffer_offset(
3314 &target_proc
->alloc
);
3315 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3317 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3320 last_fixup_min_off
);
3322 return_error
= BR_FAILED_REPLY
;
3323 return_error_param
= ret
;
3324 return_error_line
= __LINE__
;
3325 goto err_translate_failed
;
3327 last_fixup_obj
= bp
;
3328 last_fixup_min_off
= 0;
3331 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3332 proc
->pid
, thread
->pid
, hdr
->type
);
3333 return_error
= BR_FAILED_REPLY
;
3334 return_error_param
= -EINVAL
;
3335 return_error_line
= __LINE__
;
3336 goto err_bad_object_type
;
3339 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3340 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3343 binder_enqueue_thread_work(thread
, tcomplete
);
3344 binder_inner_proc_lock(target_proc
);
3345 if (target_thread
->is_dead
) {
3346 binder_inner_proc_unlock(target_proc
);
3347 goto err_dead_proc_or_thread
;
3349 BUG_ON(t
->buffer
->async_transaction
!= 0);
3350 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3351 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3352 binder_inner_proc_unlock(target_proc
);
3353 wake_up_interruptible_sync(&target_thread
->wait
);
3354 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3355 binder_free_transaction(in_reply_to
);
3356 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3357 BUG_ON(t
->buffer
->async_transaction
!= 0);
3358 binder_inner_proc_lock(proc
);
3360 * Defer the TRANSACTION_COMPLETE, so we don't return to
3361 * userspace immediately; this allows the target process to
3362 * immediately start processing this transaction, reducing
3363 * latency. We will then return the TRANSACTION_COMPLETE when
3364 * the target replies (or there is an error).
3366 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3368 t
->from_parent
= thread
->transaction_stack
;
3369 thread
->transaction_stack
= t
;
3370 binder_inner_proc_unlock(proc
);
3371 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3372 binder_inner_proc_lock(proc
);
3373 binder_pop_transaction_ilocked(thread
, t
);
3374 binder_inner_proc_unlock(proc
);
3375 goto err_dead_proc_or_thread
;
3378 BUG_ON(target_node
== NULL
);
3379 BUG_ON(t
->buffer
->async_transaction
!= 1);
3380 binder_enqueue_thread_work(thread
, tcomplete
);
3381 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3382 goto err_dead_proc_or_thread
;
3385 binder_thread_dec_tmpref(target_thread
);
3386 binder_proc_dec_tmpref(target_proc
);
3388 binder_dec_node_tmpref(target_node
);
3390 * write barrier to synchronize with initialization
3394 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3397 err_dead_proc_or_thread
:
3398 return_error
= BR_DEAD_REPLY
;
3399 return_error_line
= __LINE__
;
3400 binder_dequeue_work(proc
, tcomplete
);
3401 err_translate_failed
:
3402 err_bad_object_type
:
3405 err_copy_data_failed
:
3406 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3407 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3409 binder_dec_node_tmpref(target_node
);
3411 t
->buffer
->transaction
= NULL
;
3412 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3413 err_binder_alloc_buf_failed
:
3415 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3416 err_alloc_tcomplete_failed
:
3418 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3421 err_empty_call_stack
:
3423 err_invalid_target_handle
:
3425 binder_thread_dec_tmpref(target_thread
);
3427 binder_proc_dec_tmpref(target_proc
);
3429 binder_dec_node(target_node
, 1, 0);
3430 binder_dec_node_tmpref(target_node
);
3433 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3434 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3435 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3436 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3440 struct binder_transaction_log_entry
*fe
;
3442 e
->return_error
= return_error
;
3443 e
->return_error_param
= return_error_param
;
3444 e
->return_error_line
= return_error_line
;
3445 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3448 * write barrier to synchronize with initialization
3452 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3453 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3456 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3458 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3459 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3460 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3461 binder_send_failed_reply(in_reply_to
, return_error
);
3463 thread
->return_error
.cmd
= return_error
;
3464 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3468 static int binder_thread_write(struct binder_proc
*proc
,
3469 struct binder_thread
*thread
,
3470 binder_uintptr_t binder_buffer
, size_t size
,
3471 binder_size_t
*consumed
)
3474 struct binder_context
*context
= proc
->context
;
3475 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3476 void __user
*ptr
= buffer
+ *consumed
;
3477 void __user
*end
= buffer
+ size
;
3479 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3482 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3484 ptr
+= sizeof(uint32_t);
3485 trace_binder_command(cmd
);
3486 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3487 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3488 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3489 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3497 const char *debug_string
;
3498 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3499 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3500 struct binder_ref_data rdata
;
3502 if (get_user(target
, (uint32_t __user
*)ptr
))
3505 ptr
+= sizeof(uint32_t);
3507 if (increment
&& !target
) {
3508 struct binder_node
*ctx_mgr_node
;
3509 mutex_lock(&context
->context_mgr_node_lock
);
3510 ctx_mgr_node
= context
->binder_context_mgr_node
;
3512 ret
= binder_inc_ref_for_node(
3514 strong
, NULL
, &rdata
);
3515 mutex_unlock(&context
->context_mgr_node_lock
);
3518 ret
= binder_update_ref_for_handle(
3519 proc
, target
, increment
, strong
,
3521 if (!ret
&& rdata
.desc
!= target
) {
3522 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3523 proc
->pid
, thread
->pid
,
3524 target
, rdata
.desc
);
3528 debug_string
= "IncRefs";
3531 debug_string
= "Acquire";
3534 debug_string
= "Release";
3538 debug_string
= "DecRefs";
3542 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3543 proc
->pid
, thread
->pid
, debug_string
,
3544 strong
, target
, ret
);
3547 binder_debug(BINDER_DEBUG_USER_REFS
,
3548 "%d:%d %s ref %d desc %d s %d w %d\n",
3549 proc
->pid
, thread
->pid
, debug_string
,
3550 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3554 case BC_INCREFS_DONE
:
3555 case BC_ACQUIRE_DONE
: {
3556 binder_uintptr_t node_ptr
;
3557 binder_uintptr_t cookie
;
3558 struct binder_node
*node
;
3561 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3563 ptr
+= sizeof(binder_uintptr_t
);
3564 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3566 ptr
+= sizeof(binder_uintptr_t
);
3567 node
= binder_get_node(proc
, node_ptr
);
3569 binder_user_error("%d:%d %s u%016llx no match\n",
3570 proc
->pid
, thread
->pid
,
3571 cmd
== BC_INCREFS_DONE
?
3577 if (cookie
!= node
->cookie
) {
3578 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3579 proc
->pid
, thread
->pid
,
3580 cmd
== BC_INCREFS_DONE
?
3581 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3582 (u64
)node_ptr
, node
->debug_id
,
3583 (u64
)cookie
, (u64
)node
->cookie
);
3584 binder_put_node(node
);
3587 binder_node_inner_lock(node
);
3588 if (cmd
== BC_ACQUIRE_DONE
) {
3589 if (node
->pending_strong_ref
== 0) {
3590 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3591 proc
->pid
, thread
->pid
,
3593 binder_node_inner_unlock(node
);
3594 binder_put_node(node
);
3597 node
->pending_strong_ref
= 0;
3599 if (node
->pending_weak_ref
== 0) {
3600 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3601 proc
->pid
, thread
->pid
,
3603 binder_node_inner_unlock(node
);
3604 binder_put_node(node
);
3607 node
->pending_weak_ref
= 0;
3609 free_node
= binder_dec_node_nilocked(node
,
3610 cmd
== BC_ACQUIRE_DONE
, 0);
3612 binder_debug(BINDER_DEBUG_USER_REFS
,
3613 "%d:%d %s node %d ls %d lw %d tr %d\n",
3614 proc
->pid
, thread
->pid
,
3615 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3616 node
->debug_id
, node
->local_strong_refs
,
3617 node
->local_weak_refs
, node
->tmp_refs
);
3618 binder_node_inner_unlock(node
);
3619 binder_put_node(node
);
3622 case BC_ATTEMPT_ACQUIRE
:
3623 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3625 case BC_ACQUIRE_RESULT
:
3626 pr_err("BC_ACQUIRE_RESULT not supported\n");
3629 case BC_FREE_BUFFER
: {
3630 binder_uintptr_t data_ptr
;
3631 struct binder_buffer
*buffer
;
3633 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3635 ptr
+= sizeof(binder_uintptr_t
);
3637 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3639 if (buffer
== NULL
) {
3640 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3641 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3644 if (!buffer
->allow_user_free
) {
3645 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3646 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3649 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3650 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3651 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3653 buffer
->transaction
? "active" : "finished");
3655 if (buffer
->transaction
) {
3656 buffer
->transaction
->buffer
= NULL
;
3657 buffer
->transaction
= NULL
;
3659 if (buffer
->async_transaction
&& buffer
->target_node
) {
3660 struct binder_node
*buf_node
;
3661 struct binder_work
*w
;
3663 buf_node
= buffer
->target_node
;
3664 binder_node_inner_lock(buf_node
);
3665 BUG_ON(!buf_node
->has_async_transaction
);
3666 BUG_ON(buf_node
->proc
!= proc
);
3667 w
= binder_dequeue_work_head_ilocked(
3668 &buf_node
->async_todo
);
3670 buf_node
->has_async_transaction
= 0;
3672 binder_enqueue_work_ilocked(
3674 binder_wakeup_proc_ilocked(proc
);
3676 binder_node_inner_unlock(buf_node
);
3678 trace_binder_transaction_buffer_release(buffer
);
3679 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3680 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3684 case BC_TRANSACTION_SG
:
3686 struct binder_transaction_data_sg tr
;
3688 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3691 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3692 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3695 case BC_TRANSACTION
:
3697 struct binder_transaction_data tr
;
3699 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3702 binder_transaction(proc
, thread
, &tr
,
3703 cmd
== BC_REPLY
, 0);
3707 case BC_REGISTER_LOOPER
:
3708 binder_debug(BINDER_DEBUG_THREADS
,
3709 "%d:%d BC_REGISTER_LOOPER\n",
3710 proc
->pid
, thread
->pid
);
3711 binder_inner_proc_lock(proc
);
3712 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3713 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3714 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3715 proc
->pid
, thread
->pid
);
3716 } else if (proc
->requested_threads
== 0) {
3717 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3718 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3719 proc
->pid
, thread
->pid
);
3721 proc
->requested_threads
--;
3722 proc
->requested_threads_started
++;
3724 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3725 binder_inner_proc_unlock(proc
);
3727 case BC_ENTER_LOOPER
:
3728 binder_debug(BINDER_DEBUG_THREADS
,
3729 "%d:%d BC_ENTER_LOOPER\n",
3730 proc
->pid
, thread
->pid
);
3731 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3732 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3733 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3734 proc
->pid
, thread
->pid
);
3736 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3738 case BC_EXIT_LOOPER
:
3739 binder_debug(BINDER_DEBUG_THREADS
,
3740 "%d:%d BC_EXIT_LOOPER\n",
3741 proc
->pid
, thread
->pid
);
3742 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3745 case BC_REQUEST_DEATH_NOTIFICATION
:
3746 case BC_CLEAR_DEATH_NOTIFICATION
: {
3748 binder_uintptr_t cookie
;
3749 struct binder_ref
*ref
;
3750 struct binder_ref_death
*death
= NULL
;
3752 if (get_user(target
, (uint32_t __user
*)ptr
))
3754 ptr
+= sizeof(uint32_t);
3755 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3757 ptr
+= sizeof(binder_uintptr_t
);
3758 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3760 * Allocate memory for death notification
3761 * before taking lock
3763 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3764 if (death
== NULL
) {
3765 WARN_ON(thread
->return_error
.cmd
!=
3767 thread
->return_error
.cmd
= BR_ERROR
;
3768 binder_enqueue_thread_work(
3770 &thread
->return_error
.work
);
3772 BINDER_DEBUG_FAILED_TRANSACTION
,
3773 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3774 proc
->pid
, thread
->pid
);
3778 binder_proc_lock(proc
);
3779 ref
= binder_get_ref_olocked(proc
, target
, false);
3781 binder_user_error("%d:%d %s invalid ref %d\n",
3782 proc
->pid
, thread
->pid
,
3783 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3784 "BC_REQUEST_DEATH_NOTIFICATION" :
3785 "BC_CLEAR_DEATH_NOTIFICATION",
3787 binder_proc_unlock(proc
);
3792 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3793 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3794 proc
->pid
, thread
->pid
,
3795 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3796 "BC_REQUEST_DEATH_NOTIFICATION" :
3797 "BC_CLEAR_DEATH_NOTIFICATION",
3798 (u64
)cookie
, ref
->data
.debug_id
,
3799 ref
->data
.desc
, ref
->data
.strong
,
3800 ref
->data
.weak
, ref
->node
->debug_id
);
3802 binder_node_lock(ref
->node
);
3803 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3805 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3806 proc
->pid
, thread
->pid
);
3807 binder_node_unlock(ref
->node
);
3808 binder_proc_unlock(proc
);
3812 binder_stats_created(BINDER_STAT_DEATH
);
3813 INIT_LIST_HEAD(&death
->work
.entry
);
3814 death
->cookie
= cookie
;
3816 if (ref
->node
->proc
== NULL
) {
3817 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3819 binder_inner_proc_lock(proc
);
3820 binder_enqueue_work_ilocked(
3821 &ref
->death
->work
, &proc
->todo
);
3822 binder_wakeup_proc_ilocked(proc
);
3823 binder_inner_proc_unlock(proc
);
3826 if (ref
->death
== NULL
) {
3827 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3828 proc
->pid
, thread
->pid
);
3829 binder_node_unlock(ref
->node
);
3830 binder_proc_unlock(proc
);
3834 if (death
->cookie
!= cookie
) {
3835 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3836 proc
->pid
, thread
->pid
,
3839 binder_node_unlock(ref
->node
);
3840 binder_proc_unlock(proc
);
3844 binder_inner_proc_lock(proc
);
3845 if (list_empty(&death
->work
.entry
)) {
3846 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3847 if (thread
->looper
&
3848 (BINDER_LOOPER_STATE_REGISTERED
|
3849 BINDER_LOOPER_STATE_ENTERED
))
3850 binder_enqueue_thread_work_ilocked(
3854 binder_enqueue_work_ilocked(
3857 binder_wakeup_proc_ilocked(
3861 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3862 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3864 binder_inner_proc_unlock(proc
);
3866 binder_node_unlock(ref
->node
);
3867 binder_proc_unlock(proc
);
3869 case BC_DEAD_BINDER_DONE
: {
3870 struct binder_work
*w
;
3871 binder_uintptr_t cookie
;
3872 struct binder_ref_death
*death
= NULL
;
3874 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3877 ptr
+= sizeof(cookie
);
3878 binder_inner_proc_lock(proc
);
3879 list_for_each_entry(w
, &proc
->delivered_death
,
3881 struct binder_ref_death
*tmp_death
=
3883 struct binder_ref_death
,
3886 if (tmp_death
->cookie
== cookie
) {
3891 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3892 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3893 proc
->pid
, thread
->pid
, (u64
)cookie
,
3895 if (death
== NULL
) {
3896 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3897 proc
->pid
, thread
->pid
, (u64
)cookie
);
3898 binder_inner_proc_unlock(proc
);
3901 binder_dequeue_work_ilocked(&death
->work
);
3902 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3903 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3904 if (thread
->looper
&
3905 (BINDER_LOOPER_STATE_REGISTERED
|
3906 BINDER_LOOPER_STATE_ENTERED
))
3907 binder_enqueue_thread_work_ilocked(
3908 thread
, &death
->work
);
3910 binder_enqueue_work_ilocked(
3913 binder_wakeup_proc_ilocked(proc
);
3916 binder_inner_proc_unlock(proc
);
3920 pr_err("%d:%d unknown command %d\n",
3921 proc
->pid
, thread
->pid
, cmd
);
3924 *consumed
= ptr
- buffer
;
3929 static void binder_stat_br(struct binder_proc
*proc
,
3930 struct binder_thread
*thread
, uint32_t cmd
)
3932 trace_binder_return(cmd
);
3933 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3934 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3935 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3936 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3940 static int binder_put_node_cmd(struct binder_proc
*proc
,
3941 struct binder_thread
*thread
,
3943 binder_uintptr_t node_ptr
,
3944 binder_uintptr_t node_cookie
,
3946 uint32_t cmd
, const char *cmd_name
)
3948 void __user
*ptr
= *ptrp
;
3950 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3952 ptr
+= sizeof(uint32_t);
3954 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3956 ptr
+= sizeof(binder_uintptr_t
);
3958 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3960 ptr
+= sizeof(binder_uintptr_t
);
3962 binder_stat_br(proc
, thread
, cmd
);
3963 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3964 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3965 (u64
)node_ptr
, (u64
)node_cookie
);
3971 static int binder_wait_for_work(struct binder_thread
*thread
,
3975 struct binder_proc
*proc
= thread
->proc
;
3978 freezer_do_not_count();
3979 binder_inner_proc_lock(proc
);
3981 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3982 if (binder_has_work_ilocked(thread
, do_proc_work
))
3985 list_add(&thread
->waiting_thread_node
,
3986 &proc
->waiting_threads
);
3987 binder_inner_proc_unlock(proc
);
3989 binder_inner_proc_lock(proc
);
3990 list_del_init(&thread
->waiting_thread_node
);
3991 if (signal_pending(current
)) {
3996 finish_wait(&thread
->wait
, &wait
);
3997 binder_inner_proc_unlock(proc
);
4003 static int binder_thread_read(struct binder_proc
*proc
,
4004 struct binder_thread
*thread
,
4005 binder_uintptr_t binder_buffer
, size_t size
,
4006 binder_size_t
*consumed
, int non_block
)
4008 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4009 void __user
*ptr
= buffer
+ *consumed
;
4010 void __user
*end
= buffer
+ size
;
4013 int wait_for_proc_work
;
4015 if (*consumed
== 0) {
4016 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4018 ptr
+= sizeof(uint32_t);
4022 binder_inner_proc_lock(proc
);
4023 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4024 binder_inner_proc_unlock(proc
);
4026 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4028 trace_binder_wait_for_work(wait_for_proc_work
,
4029 !!thread
->transaction_stack
,
4030 !binder_worklist_empty(proc
, &thread
->todo
));
4031 if (wait_for_proc_work
) {
4032 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4033 BINDER_LOOPER_STATE_ENTERED
))) {
4034 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4035 proc
->pid
, thread
->pid
, thread
->looper
);
4036 wait_event_interruptible(binder_user_error_wait
,
4037 binder_stop_on_user_error
< 2);
4039 binder_restore_priority(current
, proc
->default_priority
);
4043 if (!binder_has_work(thread
, wait_for_proc_work
))
4046 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4049 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4056 struct binder_transaction_data tr
;
4057 struct binder_work
*w
= NULL
;
4058 struct list_head
*list
= NULL
;
4059 struct binder_transaction
*t
= NULL
;
4060 struct binder_thread
*t_from
;
4062 binder_inner_proc_lock(proc
);
4063 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4064 list
= &thread
->todo
;
4065 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4069 binder_inner_proc_unlock(proc
);
4072 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4077 if (end
- ptr
< sizeof(tr
) + 4) {
4078 binder_inner_proc_unlock(proc
);
4081 w
= binder_dequeue_work_head_ilocked(list
);
4082 if (binder_worklist_empty_ilocked(&thread
->todo
))
4083 thread
->process_todo
= false;
4086 case BINDER_WORK_TRANSACTION
: {
4087 binder_inner_proc_unlock(proc
);
4088 t
= container_of(w
, struct binder_transaction
, work
);
4090 case BINDER_WORK_RETURN_ERROR
: {
4091 struct binder_error
*e
= container_of(
4092 w
, struct binder_error
, work
);
4094 WARN_ON(e
->cmd
== BR_OK
);
4095 binder_inner_proc_unlock(proc
);
4096 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4099 ptr
+= sizeof(uint32_t);
4101 binder_stat_br(proc
, thread
, e
->cmd
);
4103 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4104 binder_inner_proc_unlock(proc
);
4105 cmd
= BR_TRANSACTION_COMPLETE
;
4106 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4108 ptr
+= sizeof(uint32_t);
4110 binder_stat_br(proc
, thread
, cmd
);
4111 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4112 "%d:%d BR_TRANSACTION_COMPLETE\n",
4113 proc
->pid
, thread
->pid
);
4115 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4117 case BINDER_WORK_NODE
: {
4118 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4120 binder_uintptr_t node_ptr
= node
->ptr
;
4121 binder_uintptr_t node_cookie
= node
->cookie
;
4122 int node_debug_id
= node
->debug_id
;
4125 void __user
*orig_ptr
= ptr
;
4127 BUG_ON(proc
!= node
->proc
);
4128 strong
= node
->internal_strong_refs
||
4129 node
->local_strong_refs
;
4130 weak
= !hlist_empty(&node
->refs
) ||
4131 node
->local_weak_refs
||
4132 node
->tmp_refs
|| strong
;
4133 has_strong_ref
= node
->has_strong_ref
;
4134 has_weak_ref
= node
->has_weak_ref
;
4136 if (weak
&& !has_weak_ref
) {
4137 node
->has_weak_ref
= 1;
4138 node
->pending_weak_ref
= 1;
4139 node
->local_weak_refs
++;
4141 if (strong
&& !has_strong_ref
) {
4142 node
->has_strong_ref
= 1;
4143 node
->pending_strong_ref
= 1;
4144 node
->local_strong_refs
++;
4146 if (!strong
&& has_strong_ref
)
4147 node
->has_strong_ref
= 0;
4148 if (!weak
&& has_weak_ref
)
4149 node
->has_weak_ref
= 0;
4150 if (!weak
&& !strong
) {
4151 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4152 "%d:%d node %d u%016llx c%016llx deleted\n",
4153 proc
->pid
, thread
->pid
,
4157 rb_erase(&node
->rb_node
, &proc
->nodes
);
4158 binder_inner_proc_unlock(proc
);
4159 binder_node_lock(node
);
4161 * Acquire the node lock before freeing the
4162 * node to serialize with other threads that
4163 * may have been holding the node lock while
4164 * decrementing this node (avoids race where
4165 * this thread frees while the other thread
4166 * is unlocking the node after the final
4169 binder_node_unlock(node
);
4170 binder_free_node(node
);
4172 binder_inner_proc_unlock(proc
);
4174 if (weak
&& !has_weak_ref
)
4175 ret
= binder_put_node_cmd(
4176 proc
, thread
, &ptr
, node_ptr
,
4177 node_cookie
, node_debug_id
,
4178 BR_INCREFS
, "BR_INCREFS");
4179 if (!ret
&& strong
&& !has_strong_ref
)
4180 ret
= binder_put_node_cmd(
4181 proc
, thread
, &ptr
, node_ptr
,
4182 node_cookie
, node_debug_id
,
4183 BR_ACQUIRE
, "BR_ACQUIRE");
4184 if (!ret
&& !strong
&& has_strong_ref
)
4185 ret
= binder_put_node_cmd(
4186 proc
, thread
, &ptr
, node_ptr
,
4187 node_cookie
, node_debug_id
,
4188 BR_RELEASE
, "BR_RELEASE");
4189 if (!ret
&& !weak
&& has_weak_ref
)
4190 ret
= binder_put_node_cmd(
4191 proc
, thread
, &ptr
, node_ptr
,
4192 node_cookie
, node_debug_id
,
4193 BR_DECREFS
, "BR_DECREFS");
4194 if (orig_ptr
== ptr
)
4195 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4196 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4197 proc
->pid
, thread
->pid
,
4204 case BINDER_WORK_DEAD_BINDER
:
4205 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4206 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4207 struct binder_ref_death
*death
;
4209 binder_uintptr_t cookie
;
4211 death
= container_of(w
, struct binder_ref_death
, work
);
4212 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4213 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4215 cmd
= BR_DEAD_BINDER
;
4216 cookie
= death
->cookie
;
4218 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4219 "%d:%d %s %016llx\n",
4220 proc
->pid
, thread
->pid
,
4221 cmd
== BR_DEAD_BINDER
?
4223 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4225 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4226 binder_inner_proc_unlock(proc
);
4228 binder_stats_deleted(BINDER_STAT_DEATH
);
4230 binder_enqueue_work_ilocked(
4231 w
, &proc
->delivered_death
);
4232 binder_inner_proc_unlock(proc
);
4234 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4236 ptr
+= sizeof(uint32_t);
4237 if (put_user(cookie
,
4238 (binder_uintptr_t __user
*)ptr
))
4240 ptr
+= sizeof(binder_uintptr_t
);
4241 binder_stat_br(proc
, thread
, cmd
);
4242 if (cmd
== BR_DEAD_BINDER
)
4243 goto done
; /* DEAD_BINDER notifications can cause transactions */
4250 BUG_ON(t
->buffer
== NULL
);
4251 if (t
->buffer
->target_node
) {
4252 struct binder_node
*target_node
= t
->buffer
->target_node
;
4253 struct binder_priority node_prio
;
4255 tr
.target
.ptr
= target_node
->ptr
;
4256 tr
.cookie
= target_node
->cookie
;
4257 node_prio
.sched_policy
= target_node
->sched_policy
;
4258 node_prio
.prio
= target_node
->min_priority
;
4259 binder_transaction_priority(current
, t
, node_prio
,
4260 target_node
->inherit_rt
);
4261 cmd
= BR_TRANSACTION
;
4268 tr
.flags
= t
->flags
;
4269 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4271 t_from
= binder_get_txn_from(t
);
4273 struct task_struct
*sender
= t_from
->proc
->tsk
;
4275 tr
.sender_pid
= task_tgid_nr_ns(sender
,
4276 task_active_pid_ns(current
));
4281 tr
.data_size
= t
->buffer
->data_size
;
4282 tr
.offsets_size
= t
->buffer
->offsets_size
;
4283 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
4284 ((uintptr_t)t
->buffer
->data
+
4285 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4286 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
4287 ALIGN(t
->buffer
->data_size
,
4290 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4292 binder_thread_dec_tmpref(t_from
);
4294 binder_cleanup_transaction(t
, "put_user failed",
4299 ptr
+= sizeof(uint32_t);
4300 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
4302 binder_thread_dec_tmpref(t_from
);
4304 binder_cleanup_transaction(t
, "copy_to_user failed",
4311 trace_binder_transaction_received(t
);
4312 binder_stat_br(proc
, thread
, cmd
);
4313 binder_debug(BINDER_DEBUG_TRANSACTION
,
4314 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4315 proc
->pid
, thread
->pid
,
4316 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4318 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4319 t_from
? t_from
->pid
: 0, cmd
,
4320 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4321 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
4324 binder_thread_dec_tmpref(t_from
);
4325 t
->buffer
->allow_user_free
= 1;
4326 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
4327 binder_inner_proc_lock(thread
->proc
);
4328 t
->to_parent
= thread
->transaction_stack
;
4329 t
->to_thread
= thread
;
4330 thread
->transaction_stack
= t
;
4331 binder_inner_proc_unlock(thread
->proc
);
4333 binder_free_transaction(t
);
4340 *consumed
= ptr
- buffer
;
4341 binder_inner_proc_lock(proc
);
4342 if (proc
->requested_threads
== 0 &&
4343 list_empty(&thread
->proc
->waiting_threads
) &&
4344 proc
->requested_threads_started
< proc
->max_threads
&&
4345 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4346 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4347 /*spawn a new thread if we leave this out */) {
4348 proc
->requested_threads
++;
4349 binder_inner_proc_unlock(proc
);
4350 binder_debug(BINDER_DEBUG_THREADS
,
4351 "%d:%d BR_SPAWN_LOOPER\n",
4352 proc
->pid
, thread
->pid
);
4353 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4355 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4357 binder_inner_proc_unlock(proc
);
4361 static void binder_release_work(struct binder_proc
*proc
,
4362 struct list_head
*list
)
4364 struct binder_work
*w
;
4367 w
= binder_dequeue_work_head(proc
, list
);
4372 case BINDER_WORK_TRANSACTION
: {
4373 struct binder_transaction
*t
;
4375 t
= container_of(w
, struct binder_transaction
, work
);
4377 binder_cleanup_transaction(t
, "process died.",
4380 case BINDER_WORK_RETURN_ERROR
: {
4381 struct binder_error
*e
= container_of(
4382 w
, struct binder_error
, work
);
4384 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4385 "undelivered TRANSACTION_ERROR: %u\n",
4388 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4389 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4390 "undelivered TRANSACTION_COMPLETE\n");
4392 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4394 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4395 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4396 struct binder_ref_death
*death
;
4398 death
= container_of(w
, struct binder_ref_death
, work
);
4399 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4400 "undelivered death notification, %016llx\n",
4401 (u64
)death
->cookie
);
4403 binder_stats_deleted(BINDER_STAT_DEATH
);
4406 pr_err("unexpected work type, %d, not freed\n",
4414 static struct binder_thread
*binder_get_thread_ilocked(
4415 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4417 struct binder_thread
*thread
= NULL
;
4418 struct rb_node
*parent
= NULL
;
4419 struct rb_node
**p
= &proc
->threads
.rb_node
;
4423 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4425 if (current
->pid
< thread
->pid
)
4427 else if (current
->pid
> thread
->pid
)
4428 p
= &(*p
)->rb_right
;
4434 thread
= new_thread
;
4435 binder_stats_created(BINDER_STAT_THREAD
);
4436 thread
->proc
= proc
;
4437 thread
->pid
= current
->pid
;
4438 get_task_struct(current
);
4439 thread
->task
= current
;
4440 atomic_set(&thread
->tmp_ref
, 0);
4441 init_waitqueue_head(&thread
->wait
);
4442 INIT_LIST_HEAD(&thread
->todo
);
4443 rb_link_node(&thread
->rb_node
, parent
, p
);
4444 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4445 thread
->looper_need_return
= true;
4446 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4447 thread
->return_error
.cmd
= BR_OK
;
4448 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4449 thread
->reply_error
.cmd
= BR_OK
;
4450 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4454 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4456 struct binder_thread
*thread
;
4457 struct binder_thread
*new_thread
;
4459 binder_inner_proc_lock(proc
);
4460 thread
= binder_get_thread_ilocked(proc
, NULL
);
4461 binder_inner_proc_unlock(proc
);
4463 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4464 if (new_thread
== NULL
)
4466 binder_inner_proc_lock(proc
);
4467 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4468 binder_inner_proc_unlock(proc
);
4469 if (thread
!= new_thread
)
4475 static void binder_free_proc(struct binder_proc
*proc
)
4477 BUG_ON(!list_empty(&proc
->todo
));
4478 BUG_ON(!list_empty(&proc
->delivered_death
));
4479 binder_alloc_deferred_release(&proc
->alloc
);
4480 put_task_struct(proc
->tsk
);
4481 binder_stats_deleted(BINDER_STAT_PROC
);
4485 static void binder_free_thread(struct binder_thread
*thread
)
4487 BUG_ON(!list_empty(&thread
->todo
));
4488 binder_stats_deleted(BINDER_STAT_THREAD
);
4489 binder_proc_dec_tmpref(thread
->proc
);
4490 put_task_struct(thread
->task
);
4494 static int binder_thread_release(struct binder_proc
*proc
,
4495 struct binder_thread
*thread
)
4497 struct binder_transaction
*t
;
4498 struct binder_transaction
*send_reply
= NULL
;
4499 int active_transactions
= 0;
4500 struct binder_transaction
*last_t
= NULL
;
4502 binder_inner_proc_lock(thread
->proc
);
4504 * take a ref on the proc so it survives
4505 * after we remove this thread from proc->threads.
4506 * The corresponding dec is when we actually
4507 * free the thread in binder_free_thread()
4511 * take a ref on this thread to ensure it
4512 * survives while we are releasing it
4514 atomic_inc(&thread
->tmp_ref
);
4515 rb_erase(&thread
->rb_node
, &proc
->threads
);
4516 t
= thread
->transaction_stack
;
4518 spin_lock(&t
->lock
);
4519 if (t
->to_thread
== thread
)
4522 thread
->is_dead
= true;
4526 active_transactions
++;
4527 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4528 "release %d:%d transaction %d %s, still active\n",
4529 proc
->pid
, thread
->pid
,
4531 (t
->to_thread
== thread
) ? "in" : "out");
4533 if (t
->to_thread
== thread
) {
4535 t
->to_thread
= NULL
;
4537 t
->buffer
->transaction
= NULL
;
4541 } else if (t
->from
== thread
) {
4546 spin_unlock(&last_t
->lock
);
4548 spin_lock(&t
->lock
);
4552 * If this thread used poll, make sure we remove the waitqueue
4553 * from any epoll data structures holding it with POLLFREE.
4554 * waitqueue_active() is safe to use here because we're holding
4557 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4558 waitqueue_active(&thread
->wait
)) {
4559 wake_up_poll(&thread
->wait
, POLLHUP
| POLLFREE
);
4562 binder_inner_proc_unlock(thread
->proc
);
4565 * This is needed to avoid races between wake_up_poll() above and
4566 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4567 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4568 * lock, so we can be sure it's done after calling synchronize_rcu().
4570 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4574 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4575 binder_release_work(proc
, &thread
->todo
);
4576 binder_thread_dec_tmpref(thread
);
4577 return active_transactions
;
4580 static unsigned int binder_poll(struct file
*filp
,
4581 struct poll_table_struct
*wait
)
4583 struct binder_proc
*proc
= filp
->private_data
;
4584 struct binder_thread
*thread
= NULL
;
4585 bool wait_for_proc_work
;
4587 thread
= binder_get_thread(proc
);
4591 binder_inner_proc_lock(thread
->proc
);
4592 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4593 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4595 binder_inner_proc_unlock(thread
->proc
);
4597 poll_wait(filp
, &thread
->wait
, wait
);
4599 if (binder_has_work(thread
, wait_for_proc_work
))
4605 static int binder_ioctl_write_read(struct file
*filp
,
4606 unsigned int cmd
, unsigned long arg
,
4607 struct binder_thread
*thread
)
4610 struct binder_proc
*proc
= filp
->private_data
;
4611 unsigned int size
= _IOC_SIZE(cmd
);
4612 void __user
*ubuf
= (void __user
*)arg
;
4613 struct binder_write_read bwr
;
4615 if (size
!= sizeof(struct binder_write_read
)) {
4619 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4623 binder_debug(BINDER_DEBUG_READ_WRITE
,
4624 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4625 proc
->pid
, thread
->pid
,
4626 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4627 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4629 if (bwr
.write_size
> 0) {
4630 ret
= binder_thread_write(proc
, thread
,
4633 &bwr
.write_consumed
);
4634 trace_binder_write_done(ret
);
4636 bwr
.read_consumed
= 0;
4637 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4642 if (bwr
.read_size
> 0) {
4643 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4646 filp
->f_flags
& O_NONBLOCK
);
4647 trace_binder_read_done(ret
);
4648 binder_inner_proc_lock(proc
);
4649 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4650 binder_wakeup_proc_ilocked(proc
);
4651 binder_inner_proc_unlock(proc
);
4653 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4658 binder_debug(BINDER_DEBUG_READ_WRITE
,
4659 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4660 proc
->pid
, thread
->pid
,
4661 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4662 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4663 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4671 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4674 struct binder_proc
*proc
= filp
->private_data
;
4675 struct binder_context
*context
= proc
->context
;
4676 struct binder_node
*new_node
;
4677 kuid_t curr_euid
= current_euid();
4679 mutex_lock(&context
->context_mgr_node_lock
);
4680 if (context
->binder_context_mgr_node
) {
4681 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4685 ret
= security_binder_set_context_mgr(proc
->tsk
);
4688 if (uid_valid(context
->binder_context_mgr_uid
)) {
4689 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4690 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4691 from_kuid(&init_user_ns
, curr_euid
),
4692 from_kuid(&init_user_ns
,
4693 context
->binder_context_mgr_uid
));
4698 context
->binder_context_mgr_uid
= curr_euid
;
4700 new_node
= binder_new_node(proc
, NULL
);
4705 binder_node_lock(new_node
);
4706 new_node
->local_weak_refs
++;
4707 new_node
->local_strong_refs
++;
4708 new_node
->has_strong_ref
= 1;
4709 new_node
->has_weak_ref
= 1;
4710 context
->binder_context_mgr_node
= new_node
;
4711 binder_node_unlock(new_node
);
4712 binder_put_node(new_node
);
4714 mutex_unlock(&context
->context_mgr_node_lock
);
4718 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4719 struct binder_node_debug_info
*info
)
4722 binder_uintptr_t ptr
= info
->ptr
;
4724 memset(info
, 0, sizeof(*info
));
4726 binder_inner_proc_lock(proc
);
4727 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4728 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4730 if (node
->ptr
> ptr
) {
4731 info
->ptr
= node
->ptr
;
4732 info
->cookie
= node
->cookie
;
4733 info
->has_strong_ref
= node
->has_strong_ref
;
4734 info
->has_weak_ref
= node
->has_weak_ref
;
4738 binder_inner_proc_unlock(proc
);
4743 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4746 struct binder_proc
*proc
= filp
->private_data
;
4747 struct binder_thread
*thread
;
4748 unsigned int size
= _IOC_SIZE(cmd
);
4749 void __user
*ubuf
= (void __user
*)arg
;
4751 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4752 proc->pid, current->pid, cmd, arg);*/
4754 binder_selftest_alloc(&proc
->alloc
);
4756 trace_binder_ioctl(cmd
, arg
);
4758 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4762 thread
= binder_get_thread(proc
);
4763 if (thread
== NULL
) {
4769 case BINDER_WRITE_READ
:
4770 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4774 case BINDER_SET_MAX_THREADS
: {
4777 if (copy_from_user(&max_threads
, ubuf
,
4778 sizeof(max_threads
))) {
4782 binder_inner_proc_lock(proc
);
4783 proc
->max_threads
= max_threads
;
4784 binder_inner_proc_unlock(proc
);
4787 case BINDER_SET_CONTEXT_MGR
:
4788 ret
= binder_ioctl_set_ctx_mgr(filp
);
4792 case BINDER_THREAD_EXIT
:
4793 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4794 proc
->pid
, thread
->pid
);
4795 binder_thread_release(proc
, thread
);
4798 case BINDER_VERSION
: {
4799 struct binder_version __user
*ver
= ubuf
;
4801 if (size
!= sizeof(struct binder_version
)) {
4805 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4806 &ver
->protocol_version
)) {
4812 case BINDER_GET_NODE_DEBUG_INFO
: {
4813 struct binder_node_debug_info info
;
4815 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4820 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4824 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4837 thread
->looper_need_return
= false;
4838 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4839 if (ret
&& ret
!= -ERESTARTSYS
)
4840 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4842 trace_binder_ioctl_done(ret
);
4846 static void binder_vma_open(struct vm_area_struct
*vma
)
4848 struct binder_proc
*proc
= vma
->vm_private_data
;
4850 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4851 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4852 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4853 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4854 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4857 static void binder_vma_close(struct vm_area_struct
*vma
)
4859 struct binder_proc
*proc
= vma
->vm_private_data
;
4861 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4862 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4863 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4864 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4865 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4866 binder_alloc_vma_close(&proc
->alloc
);
4867 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4870 static int binder_vm_fault(struct vm_fault
*vmf
)
4872 return VM_FAULT_SIGBUS
;
4875 static const struct vm_operations_struct binder_vm_ops
= {
4876 .open
= binder_vma_open
,
4877 .close
= binder_vma_close
,
4878 .fault
= binder_vm_fault
,
4881 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4884 struct binder_proc
*proc
= filp
->private_data
;
4885 const char *failure_string
;
4887 if (proc
->tsk
!= current
->group_leader
)
4890 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4891 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4893 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4894 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4895 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4896 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4897 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4899 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4901 failure_string
= "bad vm_flags";
4904 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4905 vma
->vm_ops
= &binder_vm_ops
;
4906 vma
->vm_private_data
= proc
;
4908 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4911 mutex_lock(&proc
->files_lock
);
4912 proc
->files
= get_files_struct(current
);
4913 mutex_unlock(&proc
->files_lock
);
4917 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4918 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4922 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4924 struct binder_proc
*proc
;
4925 struct binder_device
*binder_dev
;
4927 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4928 current
->group_leader
->pid
, current
->pid
);
4930 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4933 spin_lock_init(&proc
->inner_lock
);
4934 spin_lock_init(&proc
->outer_lock
);
4935 get_task_struct(current
->group_leader
);
4936 proc
->tsk
= current
->group_leader
;
4937 mutex_init(&proc
->files_lock
);
4938 INIT_LIST_HEAD(&proc
->todo
);
4939 if (binder_supported_policy(current
->policy
)) {
4940 proc
->default_priority
.sched_policy
= current
->policy
;
4941 proc
->default_priority
.prio
= current
->normal_prio
;
4943 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
4944 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
4947 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4949 proc
->context
= &binder_dev
->context
;
4950 binder_alloc_init(&proc
->alloc
);
4952 binder_stats_created(BINDER_STAT_PROC
);
4953 proc
->pid
= current
->group_leader
->pid
;
4954 INIT_LIST_HEAD(&proc
->delivered_death
);
4955 INIT_LIST_HEAD(&proc
->waiting_threads
);
4956 filp
->private_data
= proc
;
4958 mutex_lock(&binder_procs_lock
);
4959 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4960 mutex_unlock(&binder_procs_lock
);
4962 if (binder_debugfs_dir_entry_proc
) {
4965 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4967 * proc debug entries are shared between contexts, so
4968 * this will fail if the process tries to open the driver
4969 * again with a different context. The priting code will
4970 * anyway print all contexts that a given PID has, so this
4973 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4974 binder_debugfs_dir_entry_proc
,
4975 (void *)(unsigned long)proc
->pid
,
4982 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4984 struct binder_proc
*proc
= filp
->private_data
;
4986 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4991 static void binder_deferred_flush(struct binder_proc
*proc
)
4996 binder_inner_proc_lock(proc
);
4997 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4998 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5000 thread
->looper_need_return
= true;
5001 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5002 wake_up_interruptible(&thread
->wait
);
5006 binder_inner_proc_unlock(proc
);
5008 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5009 "binder_flush: %d woke %d threads\n", proc
->pid
,
5013 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5015 struct binder_proc
*proc
= filp
->private_data
;
5017 debugfs_remove(proc
->debugfs_entry
);
5018 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5023 static int binder_node_release(struct binder_node
*node
, int refs
)
5025 struct binder_ref
*ref
;
5027 struct binder_proc
*proc
= node
->proc
;
5029 binder_release_work(proc
, &node
->async_todo
);
5031 binder_node_lock(node
);
5032 binder_inner_proc_lock(proc
);
5033 binder_dequeue_work_ilocked(&node
->work
);
5035 * The caller must have taken a temporary ref on the node,
5037 BUG_ON(!node
->tmp_refs
);
5038 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5039 binder_inner_proc_unlock(proc
);
5040 binder_node_unlock(node
);
5041 binder_free_node(node
);
5047 node
->local_strong_refs
= 0;
5048 node
->local_weak_refs
= 0;
5049 binder_inner_proc_unlock(proc
);
5051 spin_lock(&binder_dead_nodes_lock
);
5052 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5053 spin_unlock(&binder_dead_nodes_lock
);
5055 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5058 * Need the node lock to synchronize
5059 * with new notification requests and the
5060 * inner lock to synchronize with queued
5061 * death notifications.
5063 binder_inner_proc_lock(ref
->proc
);
5065 binder_inner_proc_unlock(ref
->proc
);
5071 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5072 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5073 binder_enqueue_work_ilocked(&ref
->death
->work
,
5075 binder_wakeup_proc_ilocked(ref
->proc
);
5076 binder_inner_proc_unlock(ref
->proc
);
5079 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5080 "node %d now dead, refs %d, death %d\n",
5081 node
->debug_id
, refs
, death
);
5082 binder_node_unlock(node
);
5083 binder_put_node(node
);
5088 static void binder_deferred_release(struct binder_proc
*proc
)
5090 struct binder_context
*context
= proc
->context
;
5092 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5094 BUG_ON(proc
->files
);
5096 mutex_lock(&binder_procs_lock
);
5097 hlist_del(&proc
->proc_node
);
5098 mutex_unlock(&binder_procs_lock
);
5100 mutex_lock(&context
->context_mgr_node_lock
);
5101 if (context
->binder_context_mgr_node
&&
5102 context
->binder_context_mgr_node
->proc
== proc
) {
5103 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5104 "%s: %d context_mgr_node gone\n",
5105 __func__
, proc
->pid
);
5106 context
->binder_context_mgr_node
= NULL
;
5108 mutex_unlock(&context
->context_mgr_node_lock
);
5109 binder_inner_proc_lock(proc
);
5111 * Make sure proc stays alive after we
5112 * remove all the threads
5116 proc
->is_dead
= true;
5118 active_transactions
= 0;
5119 while ((n
= rb_first(&proc
->threads
))) {
5120 struct binder_thread
*thread
;
5122 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5123 binder_inner_proc_unlock(proc
);
5125 active_transactions
+= binder_thread_release(proc
, thread
);
5126 binder_inner_proc_lock(proc
);
5131 while ((n
= rb_first(&proc
->nodes
))) {
5132 struct binder_node
*node
;
5134 node
= rb_entry(n
, struct binder_node
, rb_node
);
5137 * take a temporary ref on the node before
5138 * calling binder_node_release() which will either
5139 * kfree() the node or call binder_put_node()
5141 binder_inc_node_tmpref_ilocked(node
);
5142 rb_erase(&node
->rb_node
, &proc
->nodes
);
5143 binder_inner_proc_unlock(proc
);
5144 incoming_refs
= binder_node_release(node
, incoming_refs
);
5145 binder_inner_proc_lock(proc
);
5147 binder_inner_proc_unlock(proc
);
5150 binder_proc_lock(proc
);
5151 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5152 struct binder_ref
*ref
;
5154 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5156 binder_cleanup_ref_olocked(ref
);
5157 binder_proc_unlock(proc
);
5158 binder_free_ref(ref
);
5159 binder_proc_lock(proc
);
5161 binder_proc_unlock(proc
);
5163 binder_release_work(proc
, &proc
->todo
);
5164 binder_release_work(proc
, &proc
->delivered_death
);
5166 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5167 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5168 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5169 outgoing_refs
, active_transactions
);
5171 binder_proc_dec_tmpref(proc
);
5174 static void binder_deferred_func(struct work_struct
*work
)
5176 struct binder_proc
*proc
;
5177 struct files_struct
*files
;
5182 mutex_lock(&binder_deferred_lock
);
5183 if (!hlist_empty(&binder_deferred_list
)) {
5184 proc
= hlist_entry(binder_deferred_list
.first
,
5185 struct binder_proc
, deferred_work_node
);
5186 hlist_del_init(&proc
->deferred_work_node
);
5187 defer
= proc
->deferred_work
;
5188 proc
->deferred_work
= 0;
5193 mutex_unlock(&binder_deferred_lock
);
5196 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5197 mutex_lock(&proc
->files_lock
);
5198 files
= proc
->files
;
5201 mutex_unlock(&proc
->files_lock
);
5204 if (defer
& BINDER_DEFERRED_FLUSH
)
5205 binder_deferred_flush(proc
);
5207 if (defer
& BINDER_DEFERRED_RELEASE
)
5208 binder_deferred_release(proc
); /* frees proc */
5211 put_files_struct(files
);
5214 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5217 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5219 mutex_lock(&binder_deferred_lock
);
5220 proc
->deferred_work
|= defer
;
5221 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5222 hlist_add_head(&proc
->deferred_work_node
,
5223 &binder_deferred_list
);
5224 schedule_work(&binder_deferred_work
);
5226 mutex_unlock(&binder_deferred_lock
);
5229 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5230 struct binder_proc
*proc
,
5232 struct binder_transaction
*t
)
5234 struct binder_proc
*to_proc
;
5235 struct binder_buffer
*buffer
= t
->buffer
;
5237 spin_lock(&t
->lock
);
5238 to_proc
= t
->to_proc
;
5240 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5241 prefix
, t
->debug_id
, t
,
5242 t
->from
? t
->from
->proc
->pid
: 0,
5243 t
->from
? t
->from
->pid
: 0,
5244 to_proc
? to_proc
->pid
: 0,
5245 t
->to_thread
? t
->to_thread
->pid
: 0,
5246 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5247 t
->priority
.prio
, t
->need_reply
);
5248 spin_unlock(&t
->lock
);
5250 if (proc
!= to_proc
) {
5252 * Can only safely deref buffer if we are holding the
5253 * correct proc inner lock for this node
5259 if (buffer
== NULL
) {
5260 seq_puts(m
, " buffer free\n");
5263 if (buffer
->target_node
)
5264 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5265 seq_printf(m
, " size %zd:%zd data %pK\n",
5266 buffer
->data_size
, buffer
->offsets_size
,
5270 static void print_binder_work_ilocked(struct seq_file
*m
,
5271 struct binder_proc
*proc
,
5273 const char *transaction_prefix
,
5274 struct binder_work
*w
)
5276 struct binder_node
*node
;
5277 struct binder_transaction
*t
;
5280 case BINDER_WORK_TRANSACTION
:
5281 t
= container_of(w
, struct binder_transaction
, work
);
5282 print_binder_transaction_ilocked(
5283 m
, proc
, transaction_prefix
, t
);
5285 case BINDER_WORK_RETURN_ERROR
: {
5286 struct binder_error
*e
= container_of(
5287 w
, struct binder_error
, work
);
5289 seq_printf(m
, "%stransaction error: %u\n",
5292 case BINDER_WORK_TRANSACTION_COMPLETE
:
5293 seq_printf(m
, "%stransaction complete\n", prefix
);
5295 case BINDER_WORK_NODE
:
5296 node
= container_of(w
, struct binder_node
, work
);
5297 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5298 prefix
, node
->debug_id
,
5299 (u64
)node
->ptr
, (u64
)node
->cookie
);
5301 case BINDER_WORK_DEAD_BINDER
:
5302 seq_printf(m
, "%shas dead binder\n", prefix
);
5304 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5305 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5307 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5308 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5311 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5316 static void print_binder_thread_ilocked(struct seq_file
*m
,
5317 struct binder_thread
*thread
,
5320 struct binder_transaction
*t
;
5321 struct binder_work
*w
;
5322 size_t start_pos
= m
->count
;
5325 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5326 thread
->pid
, thread
->looper
,
5327 thread
->looper_need_return
,
5328 atomic_read(&thread
->tmp_ref
));
5329 header_pos
= m
->count
;
5330 t
= thread
->transaction_stack
;
5332 if (t
->from
== thread
) {
5333 print_binder_transaction_ilocked(m
, thread
->proc
,
5334 " outgoing transaction", t
);
5336 } else if (t
->to_thread
== thread
) {
5337 print_binder_transaction_ilocked(m
, thread
->proc
,
5338 " incoming transaction", t
);
5341 print_binder_transaction_ilocked(m
, thread
->proc
,
5342 " bad transaction", t
);
5346 list_for_each_entry(w
, &thread
->todo
, entry
) {
5347 print_binder_work_ilocked(m
, thread
->proc
, " ",
5348 " pending transaction", w
);
5350 if (!print_always
&& m
->count
== header_pos
)
5351 m
->count
= start_pos
;
5354 static void print_binder_node_nilocked(struct seq_file
*m
,
5355 struct binder_node
*node
)
5357 struct binder_ref
*ref
;
5358 struct binder_work
*w
;
5362 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5365 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5366 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5367 node
->sched_policy
, node
->min_priority
,
5368 node
->has_strong_ref
, node
->has_weak_ref
,
5369 node
->local_strong_refs
, node
->local_weak_refs
,
5370 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5372 seq_puts(m
, " proc");
5373 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5374 seq_printf(m
, " %d", ref
->proc
->pid
);
5378 list_for_each_entry(w
, &node
->async_todo
, entry
)
5379 print_binder_work_ilocked(m
, node
->proc
, " ",
5380 " pending async transaction", w
);
5384 static void print_binder_ref_olocked(struct seq_file
*m
,
5385 struct binder_ref
*ref
)
5387 binder_node_lock(ref
->node
);
5388 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5389 ref
->data
.debug_id
, ref
->data
.desc
,
5390 ref
->node
->proc
? "" : "dead ",
5391 ref
->node
->debug_id
, ref
->data
.strong
,
5392 ref
->data
.weak
, ref
->death
);
5393 binder_node_unlock(ref
->node
);
5396 static void print_binder_proc(struct seq_file
*m
,
5397 struct binder_proc
*proc
, int print_all
)
5399 struct binder_work
*w
;
5401 size_t start_pos
= m
->count
;
5403 struct binder_node
*last_node
= NULL
;
5405 seq_printf(m
, "proc %d\n", proc
->pid
);
5406 seq_printf(m
, "context %s\n", proc
->context
->name
);
5407 header_pos
= m
->count
;
5409 binder_inner_proc_lock(proc
);
5410 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5411 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5412 rb_node
), print_all
);
5414 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5415 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5418 * take a temporary reference on the node so it
5419 * survives and isn't removed from the tree
5420 * while we print it.
5422 binder_inc_node_tmpref_ilocked(node
);
5423 /* Need to drop inner lock to take node lock */
5424 binder_inner_proc_unlock(proc
);
5426 binder_put_node(last_node
);
5427 binder_node_inner_lock(node
);
5428 print_binder_node_nilocked(m
, node
);
5429 binder_node_inner_unlock(node
);
5431 binder_inner_proc_lock(proc
);
5433 binder_inner_proc_unlock(proc
);
5435 binder_put_node(last_node
);
5438 binder_proc_lock(proc
);
5439 for (n
= rb_first(&proc
->refs_by_desc
);
5442 print_binder_ref_olocked(m
, rb_entry(n
,
5445 binder_proc_unlock(proc
);
5447 binder_alloc_print_allocated(m
, &proc
->alloc
);
5448 binder_inner_proc_lock(proc
);
5449 list_for_each_entry(w
, &proc
->todo
, entry
)
5450 print_binder_work_ilocked(m
, proc
, " ",
5451 " pending transaction", w
);
5452 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5453 seq_puts(m
, " has delivered dead binder\n");
5456 binder_inner_proc_unlock(proc
);
5457 if (!print_all
&& m
->count
== header_pos
)
5458 m
->count
= start_pos
;
5461 static const char * const binder_return_strings
[] = {
5466 "BR_ACQUIRE_RESULT",
5468 "BR_TRANSACTION_COMPLETE",
5473 "BR_ATTEMPT_ACQUIRE",
5478 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5482 static const char * const binder_command_strings
[] = {
5485 "BC_ACQUIRE_RESULT",
5493 "BC_ATTEMPT_ACQUIRE",
5494 "BC_REGISTER_LOOPER",
5497 "BC_REQUEST_DEATH_NOTIFICATION",
5498 "BC_CLEAR_DEATH_NOTIFICATION",
5499 "BC_DEAD_BINDER_DONE",
5500 "BC_TRANSACTION_SG",
5504 static const char * const binder_objstat_strings
[] = {
5511 "transaction_complete"
5514 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5515 struct binder_stats
*stats
)
5519 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5520 ARRAY_SIZE(binder_command_strings
));
5521 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5522 int temp
= atomic_read(&stats
->bc
[i
]);
5525 seq_printf(m
, "%s%s: %d\n", prefix
,
5526 binder_command_strings
[i
], temp
);
5529 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5530 ARRAY_SIZE(binder_return_strings
));
5531 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5532 int temp
= atomic_read(&stats
->br
[i
]);
5535 seq_printf(m
, "%s%s: %d\n", prefix
,
5536 binder_return_strings
[i
], temp
);
5539 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5540 ARRAY_SIZE(binder_objstat_strings
));
5541 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5542 ARRAY_SIZE(stats
->obj_deleted
));
5543 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5544 int created
= atomic_read(&stats
->obj_created
[i
]);
5545 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5547 if (created
|| deleted
)
5548 seq_printf(m
, "%s%s: active %d total %d\n",
5550 binder_objstat_strings
[i
],
5556 static void print_binder_proc_stats(struct seq_file
*m
,
5557 struct binder_proc
*proc
)
5559 struct binder_work
*w
;
5560 struct binder_thread
*thread
;
5562 int count
, strong
, weak
, ready_threads
;
5563 size_t free_async_space
=
5564 binder_alloc_get_free_async_space(&proc
->alloc
);
5566 seq_printf(m
, "proc %d\n", proc
->pid
);
5567 seq_printf(m
, "context %s\n", proc
->context
->name
);
5570 binder_inner_proc_lock(proc
);
5571 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5574 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5577 seq_printf(m
, " threads: %d\n", count
);
5578 seq_printf(m
, " requested threads: %d+%d/%d\n"
5579 " ready threads %d\n"
5580 " free async space %zd\n", proc
->requested_threads
,
5581 proc
->requested_threads_started
, proc
->max_threads
,
5585 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5587 binder_inner_proc_unlock(proc
);
5588 seq_printf(m
, " nodes: %d\n", count
);
5592 binder_proc_lock(proc
);
5593 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5594 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5597 strong
+= ref
->data
.strong
;
5598 weak
+= ref
->data
.weak
;
5600 binder_proc_unlock(proc
);
5601 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5603 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5604 seq_printf(m
, " buffers: %d\n", count
);
5606 binder_alloc_print_pages(m
, &proc
->alloc
);
5609 binder_inner_proc_lock(proc
);
5610 list_for_each_entry(w
, &proc
->todo
, entry
) {
5611 if (w
->type
== BINDER_WORK_TRANSACTION
)
5614 binder_inner_proc_unlock(proc
);
5615 seq_printf(m
, " pending transactions: %d\n", count
);
5617 print_binder_stats(m
, " ", &proc
->stats
);
5621 static int binder_state_show(struct seq_file
*m
, void *unused
)
5623 struct binder_proc
*proc
;
5624 struct binder_node
*node
;
5625 struct binder_node
*last_node
= NULL
;
5627 seq_puts(m
, "binder state:\n");
5629 spin_lock(&binder_dead_nodes_lock
);
5630 if (!hlist_empty(&binder_dead_nodes
))
5631 seq_puts(m
, "dead nodes:\n");
5632 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5634 * take a temporary reference on the node so it
5635 * survives and isn't removed from the list
5636 * while we print it.
5639 spin_unlock(&binder_dead_nodes_lock
);
5641 binder_put_node(last_node
);
5642 binder_node_lock(node
);
5643 print_binder_node_nilocked(m
, node
);
5644 binder_node_unlock(node
);
5646 spin_lock(&binder_dead_nodes_lock
);
5648 spin_unlock(&binder_dead_nodes_lock
);
5650 binder_put_node(last_node
);
5652 mutex_lock(&binder_procs_lock
);
5653 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5654 print_binder_proc(m
, proc
, 1);
5655 mutex_unlock(&binder_procs_lock
);
5660 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5662 struct binder_proc
*proc
;
5664 seq_puts(m
, "binder stats:\n");
5666 print_binder_stats(m
, "", &binder_stats
);
5668 mutex_lock(&binder_procs_lock
);
5669 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5670 print_binder_proc_stats(m
, proc
);
5671 mutex_unlock(&binder_procs_lock
);
5676 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5678 struct binder_proc
*proc
;
5680 seq_puts(m
, "binder transactions:\n");
5681 mutex_lock(&binder_procs_lock
);
5682 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5683 print_binder_proc(m
, proc
, 0);
5684 mutex_unlock(&binder_procs_lock
);
5689 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5691 struct binder_proc
*itr
;
5692 int pid
= (unsigned long)m
->private;
5694 mutex_lock(&binder_procs_lock
);
5695 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5696 if (itr
->pid
== pid
) {
5697 seq_puts(m
, "binder proc state:\n");
5698 print_binder_proc(m
, itr
, 1);
5701 mutex_unlock(&binder_procs_lock
);
5706 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5707 struct binder_transaction_log_entry
*e
)
5709 int debug_id
= READ_ONCE(e
->debug_id_done
);
5711 * read barrier to guarantee debug_id_done read before
5712 * we print the log values
5716 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5717 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5718 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5719 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5720 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5721 e
->return_error
, e
->return_error_param
,
5722 e
->return_error_line
);
5724 * read-barrier to guarantee read of debug_id_done after
5725 * done printing the fields of the entry
5728 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5729 "\n" : " (incomplete)\n");
5732 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5734 struct binder_transaction_log
*log
= m
->private;
5735 unsigned int log_cur
= atomic_read(&log
->cur
);
5740 count
= log_cur
+ 1;
5741 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5742 0 : count
% ARRAY_SIZE(log
->entry
);
5743 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5744 count
= ARRAY_SIZE(log
->entry
);
5745 for (i
= 0; i
< count
; i
++) {
5746 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5748 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5753 static const struct file_operations binder_fops
= {
5754 .owner
= THIS_MODULE
,
5755 .poll
= binder_poll
,
5756 .unlocked_ioctl
= binder_ioctl
,
5757 .compat_ioctl
= binder_ioctl
,
5758 .mmap
= binder_mmap
,
5759 .open
= binder_open
,
5760 .flush
= binder_flush
,
5761 .release
= binder_release
,
5764 BINDER_DEBUG_ENTRY(state
);
5765 BINDER_DEBUG_ENTRY(stats
);
5766 BINDER_DEBUG_ENTRY(transactions
);
5767 BINDER_DEBUG_ENTRY(transaction_log
);
5769 static int __init
init_binder_device(const char *name
)
5772 struct binder_device
*binder_device
;
5774 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5778 binder_device
->miscdev
.fops
= &binder_fops
;
5779 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5780 binder_device
->miscdev
.name
= name
;
5782 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5783 binder_device
->context
.name
= name
;
5784 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5786 ret
= misc_register(&binder_device
->miscdev
);
5788 kfree(binder_device
);
5792 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5797 static int __init
binder_init(void)
5800 char *device_name
, *device_names
, *device_tmp
;
5801 struct binder_device
*device
;
5802 struct hlist_node
*tmp
;
5804 binder_alloc_shrinker_init();
5806 atomic_set(&binder_transaction_log
.cur
, ~0U);
5807 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5809 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5810 if (binder_debugfs_dir_entry_root
)
5811 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5812 binder_debugfs_dir_entry_root
);
5814 if (binder_debugfs_dir_entry_root
) {
5815 debugfs_create_file("state",
5817 binder_debugfs_dir_entry_root
,
5819 &binder_state_fops
);
5820 debugfs_create_file("stats",
5822 binder_debugfs_dir_entry_root
,
5824 &binder_stats_fops
);
5825 debugfs_create_file("transactions",
5827 binder_debugfs_dir_entry_root
,
5829 &binder_transactions_fops
);
5830 debugfs_create_file("transaction_log",
5832 binder_debugfs_dir_entry_root
,
5833 &binder_transaction_log
,
5834 &binder_transaction_log_fops
);
5835 debugfs_create_file("failed_transaction_log",
5837 binder_debugfs_dir_entry_root
,
5838 &binder_transaction_log_failed
,
5839 &binder_transaction_log_fops
);
5843 * Copy the module_parameter string, because we don't want to
5844 * tokenize it in-place.
5846 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5847 if (!device_names
) {
5849 goto err_alloc_device_names_failed
;
5851 strcpy(device_names
, binder_devices_param
);
5853 device_tmp
= device_names
;
5854 while ((device_name
= strsep(&device_tmp
, ","))) {
5855 ret
= init_binder_device(device_name
);
5857 goto err_init_binder_device_failed
;
5862 err_init_binder_device_failed
:
5863 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5864 misc_deregister(&device
->miscdev
);
5865 hlist_del(&device
->hlist
);
5869 kfree(device_names
);
5871 err_alloc_device_names_failed
:
5872 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5877 device_initcall(binder_init
);
5879 #define CREATE_TRACE_POINTS
5880 #include "binder_trace.h"
5882 MODULE_LICENSE("GPL v2");