3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #include <uapi/linux/android/binder.h>
76 #include <uapi/linux/sched/types.h>
77 #include "binder_alloc.h"
78 #include "binder_trace.h"
80 static HLIST_HEAD(binder_deferred_list
);
81 static DEFINE_MUTEX(binder_deferred_lock
);
83 static HLIST_HEAD(binder_devices
);
84 static HLIST_HEAD(binder_procs
);
85 static DEFINE_MUTEX(binder_procs_lock
);
87 static HLIST_HEAD(binder_dead_nodes
);
88 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
90 static struct dentry
*binder_debugfs_dir_entry_root
;
91 static struct dentry
*binder_debugfs_dir_entry_proc
;
92 static atomic_t binder_last_id
;
94 #define BINDER_DEBUG_ENTRY(name) \
95 static int binder_##name##_open(struct inode *inode, struct file *file) \
97 return single_open(file, binder_##name##_show, inode->i_private); \
100 static const struct file_operations binder_##name##_fops = { \
101 .owner = THIS_MODULE, \
102 .open = binder_##name##_open, \
104 .llseek = seq_lseek, \
105 .release = single_release, \
108 static int binder_proc_show(struct seq_file
*m
, void *unused
);
109 BINDER_DEBUG_ENTRY(proc
);
111 /* This is only defined in include/asm-arm/sizes.h */
117 #define SZ_4M 0x400000
120 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
123 BINDER_DEBUG_USER_ERROR
= 1U << 0,
124 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
125 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
126 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
127 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
128 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
129 BINDER_DEBUG_READ_WRITE
= 1U << 6,
130 BINDER_DEBUG_USER_REFS
= 1U << 7,
131 BINDER_DEBUG_THREADS
= 1U << 8,
132 BINDER_DEBUG_TRANSACTION
= 1U << 9,
133 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
134 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
135 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
136 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
137 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
139 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
140 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
141 module_param_named(debug_mask
, binder_debug_mask
, uint
, 0644);
143 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
144 module_param_named(devices
, binder_devices_param
, charp
, 0444);
146 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
147 static int binder_stop_on_user_error
;
149 static int binder_set_stop_on_user_error(const char *val
,
150 const struct kernel_param
*kp
)
154 ret
= param_set_int(val
, kp
);
155 if (binder_stop_on_user_error
< 2)
156 wake_up(&binder_user_error_wait
);
159 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
160 param_get_int
, &binder_stop_on_user_error
, 0644);
162 #define binder_debug(mask, x...) \
164 if (binder_debug_mask & mask) \
168 #define binder_user_error(x...) \
170 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
172 if (binder_stop_on_user_error) \
173 binder_stop_on_user_error = 2; \
176 #define to_flat_binder_object(hdr) \
177 container_of(hdr, struct flat_binder_object, hdr)
179 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
181 #define to_binder_buffer_object(hdr) \
182 container_of(hdr, struct binder_buffer_object, hdr)
184 #define to_binder_fd_array_object(hdr) \
185 container_of(hdr, struct binder_fd_array_object, hdr)
187 enum binder_stat_types
{
193 BINDER_STAT_TRANSACTION
,
194 BINDER_STAT_TRANSACTION_COMPLETE
,
198 struct binder_stats
{
199 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
200 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
201 atomic_t obj_created
[BINDER_STAT_COUNT
];
202 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
205 static struct binder_stats binder_stats
;
207 static inline void binder_stats_deleted(enum binder_stat_types type
)
209 atomic_inc(&binder_stats
.obj_deleted
[type
]);
212 static inline void binder_stats_created(enum binder_stat_types type
)
214 atomic_inc(&binder_stats
.obj_created
[type
]);
217 struct binder_transaction_log_entry
{
229 int return_error_line
;
230 uint32_t return_error
;
231 uint32_t return_error_param
;
232 const char *context_name
;
234 struct binder_transaction_log
{
237 struct binder_transaction_log_entry entry
[32];
239 static struct binder_transaction_log binder_transaction_log
;
240 static struct binder_transaction_log binder_transaction_log_failed
;
242 static struct binder_transaction_log_entry
*binder_transaction_log_add(
243 struct binder_transaction_log
*log
)
245 struct binder_transaction_log_entry
*e
;
246 unsigned int cur
= atomic_inc_return(&log
->cur
);
248 if (cur
>= ARRAY_SIZE(log
->entry
))
250 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
251 WRITE_ONCE(e
->debug_id_done
, 0);
253 * write-barrier to synchronize access to e->debug_id_done.
254 * We make sure the initialized 0 value is seen before
255 * memset() other fields are zeroed by memset.
258 memset(e
, 0, sizeof(*e
));
262 struct binder_context
{
263 struct binder_node
*binder_context_mgr_node
;
264 struct mutex context_mgr_node_lock
;
266 kuid_t binder_context_mgr_uid
;
270 struct binder_device
{
271 struct hlist_node hlist
;
272 struct miscdevice miscdev
;
273 struct binder_context context
;
277 * struct binder_work - work enqueued on a worklist
278 * @entry: node enqueued on list
279 * @type: type of work to be performed
281 * There are separate work lists for proc, thread, and node (async).
284 struct list_head entry
;
287 BINDER_WORK_TRANSACTION
= 1,
288 BINDER_WORK_TRANSACTION_COMPLETE
,
289 BINDER_WORK_RETURN_ERROR
,
291 BINDER_WORK_DEAD_BINDER
,
292 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
293 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
297 struct binder_error
{
298 struct binder_work work
;
303 * struct binder_node - binder node bookkeeping
304 * @debug_id: unique ID for debugging
305 * (invariant after initialized)
306 * @lock: lock for node fields
307 * @work: worklist element for node work
308 * (protected by @proc->inner_lock)
309 * @rb_node: element for proc->nodes tree
310 * (protected by @proc->inner_lock)
311 * @dead_node: element for binder_dead_nodes list
312 * (protected by binder_dead_nodes_lock)
313 * @proc: binder_proc that owns this node
314 * (invariant after initialized)
315 * @refs: list of references on this node
316 * (protected by @lock)
317 * @internal_strong_refs: used to take strong references when
318 * initiating a transaction
319 * (protected by @proc->inner_lock if @proc
321 * @local_weak_refs: weak user refs from local process
322 * (protected by @proc->inner_lock if @proc
324 * @local_strong_refs: strong user refs from local process
325 * (protected by @proc->inner_lock if @proc
327 * @tmp_refs: temporary kernel refs
328 * (protected by @proc->inner_lock while @proc
329 * is valid, and by binder_dead_nodes_lock
330 * if @proc is NULL. During inc/dec and node release
331 * it is also protected by @lock to provide safety
332 * as the node dies and @proc becomes NULL)
333 * @ptr: userspace pointer for node
334 * (invariant, no lock needed)
335 * @cookie: userspace cookie for node
336 * (invariant, no lock needed)
337 * @has_strong_ref: userspace notified of strong ref
338 * (protected by @proc->inner_lock if @proc
340 * @pending_strong_ref: userspace has acked notification of strong ref
341 * (protected by @proc->inner_lock if @proc
343 * @has_weak_ref: userspace notified of weak ref
344 * (protected by @proc->inner_lock if @proc
346 * @pending_weak_ref: userspace has acked notification of weak ref
347 * (protected by @proc->inner_lock if @proc
349 * @has_async_transaction: async transaction to node in progress
350 * (protected by @lock)
351 * @sched_policy: minimum scheduling policy for node
352 * (invariant after initialized)
353 * @accept_fds: file descriptor operations supported for node
354 * (invariant after initialized)
355 * @min_priority: minimum scheduling priority
356 * (invariant after initialized)
357 * @inherit_rt: inherit RT scheduling policy from caller
358 * (invariant after initialized)
359 * @async_todo: list of async work items
360 * (protected by @proc->inner_lock)
362 * Bookkeeping structure for binder nodes.
367 struct binder_work work
;
369 struct rb_node rb_node
;
370 struct hlist_node dead_node
;
372 struct binder_proc
*proc
;
373 struct hlist_head refs
;
374 int internal_strong_refs
;
376 int local_strong_refs
;
378 binder_uintptr_t ptr
;
379 binder_uintptr_t cookie
;
382 * bitfield elements protected by
386 u8 pending_strong_ref
:1;
388 u8 pending_weak_ref
:1;
392 * invariant after initialization
399 bool has_async_transaction
;
400 struct list_head async_todo
;
403 struct binder_ref_death
{
405 * @work: worklist element for death notifications
406 * (protected by inner_lock of the proc that
407 * this ref belongs to)
409 struct binder_work work
;
410 binder_uintptr_t cookie
;
414 * struct binder_ref_data - binder_ref counts and id
415 * @debug_id: unique ID for the ref
416 * @desc: unique userspace handle for ref
417 * @strong: strong ref count (debugging only if not locked)
418 * @weak: weak ref count (debugging only if not locked)
420 * Structure to hold ref count and ref id information. Since
421 * the actual ref can only be accessed with a lock, this structure
422 * is used to return information about the ref to callers of
423 * ref inc/dec functions.
425 struct binder_ref_data
{
433 * struct binder_ref - struct to track references on nodes
434 * @data: binder_ref_data containing id, handle, and current refcounts
435 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
436 * @rb_node_node: node for lookup by @node in proc's rb_tree
437 * @node_entry: list entry for node->refs list in target node
438 * (protected by @node->lock)
439 * @proc: binder_proc containing ref
440 * @node: binder_node of target node. When cleaning up a
441 * ref for deletion in binder_cleanup_ref, a non-NULL
442 * @node indicates the node must be freed
443 * @death: pointer to death notification (ref_death) if requested
444 * (protected by @node->lock)
446 * Structure to track references from procA to target node (on procB). This
447 * structure is unsafe to access without holding @proc->outer_lock.
450 /* Lookups needed: */
451 /* node + proc => ref (transaction) */
452 /* desc + proc => ref (transaction, inc/dec ref) */
453 /* node => refs + procs (proc exit) */
454 struct binder_ref_data data
;
455 struct rb_node rb_node_desc
;
456 struct rb_node rb_node_node
;
457 struct hlist_node node_entry
;
458 struct binder_proc
*proc
;
459 struct binder_node
*node
;
460 struct binder_ref_death
*death
;
463 enum binder_deferred_state
{
464 BINDER_DEFERRED_PUT_FILES
= 0x01,
465 BINDER_DEFERRED_FLUSH
= 0x02,
466 BINDER_DEFERRED_RELEASE
= 0x04,
470 * struct binder_priority - scheduler policy and priority
471 * @sched_policy scheduler policy
472 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
474 * The binder driver supports inheriting the following scheduler policies:
480 struct binder_priority
{
481 unsigned int sched_policy
;
486 * struct binder_proc - binder process bookkeeping
487 * @proc_node: element for binder_procs list
488 * @threads: rbtree of binder_threads in this proc
489 * (protected by @inner_lock)
490 * @nodes: rbtree of binder nodes associated with
491 * this proc ordered by node->ptr
492 * (protected by @inner_lock)
493 * @refs_by_desc: rbtree of refs ordered by ref->desc
494 * (protected by @outer_lock)
495 * @refs_by_node: rbtree of refs ordered by ref->node
496 * (protected by @outer_lock)
497 * @waiting_threads: threads currently waiting for proc work
498 * (protected by @inner_lock)
499 * @pid PID of group_leader of process
500 * (invariant after initialized)
501 * @tsk task_struct for group_leader of process
502 * (invariant after initialized)
503 * @files files_struct for process
504 * (protected by @files_lock)
505 * @files_lock mutex to protect @files
506 * @deferred_work_node: element for binder_deferred_list
507 * (protected by binder_deferred_lock)
508 * @deferred_work: bitmap of deferred work to perform
509 * (protected by binder_deferred_lock)
510 * @is_dead: process is dead and awaiting free
511 * when outstanding transactions are cleaned up
512 * (protected by @inner_lock)
513 * @todo: list of work for this process
514 * (protected by @inner_lock)
515 * @stats: per-process binder statistics
516 * (atomics, no lock needed)
517 * @delivered_death: list of delivered death notification
518 * (protected by @inner_lock)
519 * @max_threads: cap on number of binder threads
520 * (protected by @inner_lock)
521 * @requested_threads: number of binder threads requested but not
522 * yet started. In current implementation, can
524 * (protected by @inner_lock)
525 * @requested_threads_started: number binder threads started
526 * (protected by @inner_lock)
527 * @tmp_ref: temporary reference to indicate proc is in use
528 * (protected by @inner_lock)
529 * @default_priority: default scheduler priority
530 * (invariant after initialized)
531 * @debugfs_entry: debugfs node
532 * @alloc: binder allocator bookkeeping
533 * @context: binder_context for this proc
534 * (invariant after initialized)
535 * @inner_lock: can nest under outer_lock and/or node lock
536 * @outer_lock: no nesting under innor or node lock
537 * Lock order: 1) outer, 2) node, 3) inner
539 * Bookkeeping structure for binder processes
542 struct hlist_node proc_node
;
543 struct rb_root threads
;
544 struct rb_root nodes
;
545 struct rb_root refs_by_desc
;
546 struct rb_root refs_by_node
;
547 struct list_head waiting_threads
;
549 struct task_struct
*tsk
;
550 struct files_struct
*files
;
551 struct mutex files_lock
;
552 struct hlist_node deferred_work_node
;
556 struct list_head todo
;
557 struct binder_stats stats
;
558 struct list_head delivered_death
;
560 int requested_threads
;
561 int requested_threads_started
;
563 struct binder_priority default_priority
;
564 struct dentry
*debugfs_entry
;
565 struct binder_alloc alloc
;
566 struct binder_context
*context
;
567 spinlock_t inner_lock
;
568 spinlock_t outer_lock
;
572 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
573 BINDER_LOOPER_STATE_ENTERED
= 0x02,
574 BINDER_LOOPER_STATE_EXITED
= 0x04,
575 BINDER_LOOPER_STATE_INVALID
= 0x08,
576 BINDER_LOOPER_STATE_WAITING
= 0x10,
577 BINDER_LOOPER_STATE_POLL
= 0x20,
581 * struct binder_thread - binder thread bookkeeping
582 * @proc: binder process for this thread
583 * (invariant after initialization)
584 * @rb_node: element for proc->threads rbtree
585 * (protected by @proc->inner_lock)
586 * @waiting_thread_node: element for @proc->waiting_threads list
587 * (protected by @proc->inner_lock)
588 * @pid: PID for this thread
589 * (invariant after initialization)
590 * @looper: bitmap of looping state
591 * (only accessed by this thread)
592 * @looper_needs_return: looping thread needs to exit driver
594 * @transaction_stack: stack of in-progress transactions for this thread
595 * (protected by @proc->inner_lock)
596 * @todo: list of work to do for this thread
597 * (protected by @proc->inner_lock)
598 * @process_todo: whether work in @todo should be processed
599 * (protected by @proc->inner_lock)
600 * @return_error: transaction errors reported by this thread
601 * (only accessed by this thread)
602 * @reply_error: transaction errors reported by target thread
603 * (protected by @proc->inner_lock)
604 * @wait: wait queue for thread work
605 * @stats: per-thread statistics
606 * (atomics, no lock needed)
607 * @tmp_ref: temporary reference to indicate thread is in use
608 * (atomic since @proc->inner_lock cannot
609 * always be acquired)
610 * @is_dead: thread is dead and awaiting free
611 * when outstanding transactions are cleaned up
612 * (protected by @proc->inner_lock)
613 * @task: struct task_struct for this thread
615 * Bookkeeping structure for binder threads.
617 struct binder_thread
{
618 struct binder_proc
*proc
;
619 struct rb_node rb_node
;
620 struct list_head waiting_thread_node
;
622 int looper
; /* only modified by this thread */
623 bool looper_need_return
; /* can be written by other thread */
624 struct binder_transaction
*transaction_stack
;
625 struct list_head todo
;
627 struct binder_error return_error
;
628 struct binder_error reply_error
;
629 wait_queue_head_t wait
;
630 struct binder_stats stats
;
633 struct task_struct
*task
;
636 struct binder_transaction
{
638 struct binder_work work
;
639 struct binder_thread
*from
;
640 struct binder_transaction
*from_parent
;
641 struct binder_proc
*to_proc
;
642 struct binder_thread
*to_thread
;
643 struct binder_transaction
*to_parent
;
644 unsigned need_reply
:1;
645 /* unsigned is_dead:1; */ /* not used at the moment */
647 struct binder_buffer
*buffer
;
650 struct binder_priority priority
;
651 struct binder_priority saved_priority
;
652 bool set_priority_called
;
655 * @lock: protects @from, @to_proc, and @to_thread
657 * @from, @to_proc, and @to_thread can be set to NULL
658 * during thread teardown
664 * binder_proc_lock() - Acquire outer lock for given binder_proc
665 * @proc: struct binder_proc to acquire
667 * Acquires proc->outer_lock. Used to protect binder_ref
668 * structures associated with the given proc.
670 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
672 _binder_proc_lock(struct binder_proc
*proc
, int line
)
674 binder_debug(BINDER_DEBUG_SPINLOCKS
,
675 "%s: line=%d\n", __func__
, line
);
676 spin_lock(&proc
->outer_lock
);
680 * binder_proc_unlock() - Release spinlock for given binder_proc
681 * @proc: struct binder_proc to acquire
683 * Release lock acquired via binder_proc_lock()
685 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
687 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
689 binder_debug(BINDER_DEBUG_SPINLOCKS
,
690 "%s: line=%d\n", __func__
, line
);
691 spin_unlock(&proc
->outer_lock
);
695 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
696 * @proc: struct binder_proc to acquire
698 * Acquires proc->inner_lock. Used to protect todo lists
700 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
702 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
704 binder_debug(BINDER_DEBUG_SPINLOCKS
,
705 "%s: line=%d\n", __func__
, line
);
706 spin_lock(&proc
->inner_lock
);
710 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
711 * @proc: struct binder_proc to acquire
713 * Release lock acquired via binder_inner_proc_lock()
715 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
717 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
719 binder_debug(BINDER_DEBUG_SPINLOCKS
,
720 "%s: line=%d\n", __func__
, line
);
721 spin_unlock(&proc
->inner_lock
);
725 * binder_node_lock() - Acquire spinlock for given binder_node
726 * @node: struct binder_node to acquire
728 * Acquires node->lock. Used to protect binder_node fields
730 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
732 _binder_node_lock(struct binder_node
*node
, int line
)
734 binder_debug(BINDER_DEBUG_SPINLOCKS
,
735 "%s: line=%d\n", __func__
, line
);
736 spin_lock(&node
->lock
);
740 * binder_node_unlock() - Release spinlock for given binder_proc
741 * @node: struct binder_node to acquire
743 * Release lock acquired via binder_node_lock()
745 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
747 _binder_node_unlock(struct binder_node
*node
, int line
)
749 binder_debug(BINDER_DEBUG_SPINLOCKS
,
750 "%s: line=%d\n", __func__
, line
);
751 spin_unlock(&node
->lock
);
755 * binder_node_inner_lock() - Acquire node and inner locks
756 * @node: struct binder_node to acquire
758 * Acquires node->lock. If node->proc also acquires
759 * proc->inner_lock. Used to protect binder_node fields
761 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
763 _binder_node_inner_lock(struct binder_node
*node
, int line
)
765 binder_debug(BINDER_DEBUG_SPINLOCKS
,
766 "%s: line=%d\n", __func__
, line
);
767 spin_lock(&node
->lock
);
769 binder_inner_proc_lock(node
->proc
);
773 * binder_node_unlock() - Release node and inner locks
774 * @node: struct binder_node to acquire
776 * Release lock acquired via binder_node_lock()
778 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
780 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
782 struct binder_proc
*proc
= node
->proc
;
784 binder_debug(BINDER_DEBUG_SPINLOCKS
,
785 "%s: line=%d\n", __func__
, line
);
787 binder_inner_proc_unlock(proc
);
788 spin_unlock(&node
->lock
);
791 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
793 return list_empty(list
);
797 * binder_worklist_empty() - Check if no items on the work list
798 * @proc: binder_proc associated with list
799 * @list: list to check
801 * Return: true if there are no items on list, else false
803 static bool binder_worklist_empty(struct binder_proc
*proc
,
804 struct list_head
*list
)
808 binder_inner_proc_lock(proc
);
809 ret
= binder_worklist_empty_ilocked(list
);
810 binder_inner_proc_unlock(proc
);
815 * binder_enqueue_work_ilocked() - Add an item to the work list
816 * @work: struct binder_work to add to list
817 * @target_list: list to add work to
819 * Adds the work to the specified list. Asserts that work
820 * is not already on a list.
822 * Requires the proc->inner_lock to be held.
825 binder_enqueue_work_ilocked(struct binder_work
*work
,
826 struct list_head
*target_list
)
828 BUG_ON(target_list
== NULL
);
829 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
830 list_add_tail(&work
->entry
, target_list
);
834 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
835 * @thread: thread to queue work to
836 * @work: struct binder_work to add to list
838 * Adds the work to the todo list of the thread. Doesn't set the process_todo
839 * flag, which means that (if it wasn't already set) the thread will go to
840 * sleep without handling this work when it calls read.
842 * Requires the proc->inner_lock to be held.
845 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
846 struct binder_work
*work
)
848 binder_enqueue_work_ilocked(work
, &thread
->todo
);
852 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
853 * @thread: thread to queue work to
854 * @work: struct binder_work to add to list
856 * Adds the work to the todo list of the thread, and enables processing
859 * Requires the proc->inner_lock to be held.
862 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
863 struct binder_work
*work
)
865 binder_enqueue_work_ilocked(work
, &thread
->todo
);
866 thread
->process_todo
= true;
870 * binder_enqueue_thread_work() - Add an item to the thread work list
871 * @thread: thread to queue work to
872 * @work: struct binder_work to add to list
874 * Adds the work to the todo list of the thread, and enables processing
878 binder_enqueue_thread_work(struct binder_thread
*thread
,
879 struct binder_work
*work
)
881 binder_inner_proc_lock(thread
->proc
);
882 binder_enqueue_thread_work_ilocked(thread
, work
);
883 binder_inner_proc_unlock(thread
->proc
);
887 binder_dequeue_work_ilocked(struct binder_work
*work
)
889 list_del_init(&work
->entry
);
893 * binder_dequeue_work() - Removes an item from the work list
894 * @proc: binder_proc associated with list
895 * @work: struct binder_work to remove from list
897 * Removes the specified work item from whatever list it is on.
898 * Can safely be called if work is not on any list.
901 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
903 binder_inner_proc_lock(proc
);
904 binder_dequeue_work_ilocked(work
);
905 binder_inner_proc_unlock(proc
);
908 static struct binder_work
*binder_dequeue_work_head_ilocked(
909 struct list_head
*list
)
911 struct binder_work
*w
;
913 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
915 list_del_init(&w
->entry
);
920 * binder_dequeue_work_head() - Dequeues the item at head of list
921 * @proc: binder_proc associated with list
922 * @list: list to dequeue head
924 * Removes the head of the list if there are items on the list
926 * Return: pointer dequeued binder_work, NULL if list was empty
928 static struct binder_work
*binder_dequeue_work_head(
929 struct binder_proc
*proc
,
930 struct list_head
*list
)
932 struct binder_work
*w
;
934 binder_inner_proc_lock(proc
);
935 w
= binder_dequeue_work_head_ilocked(list
);
936 binder_inner_proc_unlock(proc
);
941 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
942 static void binder_free_thread(struct binder_thread
*thread
);
943 static void binder_free_proc(struct binder_proc
*proc
);
944 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
946 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
948 unsigned long rlim_cur
;
952 mutex_lock(&proc
->files_lock
);
953 if (proc
->files
== NULL
) {
957 if (!lock_task_sighand(proc
->tsk
, &irqs
)) {
961 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
962 unlock_task_sighand(proc
->tsk
, &irqs
);
964 ret
= __alloc_fd(proc
->files
, 0, rlim_cur
, flags
);
966 mutex_unlock(&proc
->files_lock
);
971 * copied from fd_install
973 static void task_fd_install(
974 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
976 mutex_lock(&proc
->files_lock
);
978 __fd_install(proc
->files
, fd
, file
);
979 mutex_unlock(&proc
->files_lock
);
983 * copied from sys_close
985 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
989 mutex_lock(&proc
->files_lock
);
990 if (proc
->files
== NULL
) {
994 retval
= __close_fd(proc
->files
, fd
);
995 /* can't restart close syscall because file table entry was cleared */
996 if (unlikely(retval
== -ERESTARTSYS
||
997 retval
== -ERESTARTNOINTR
||
998 retval
== -ERESTARTNOHAND
||
999 retval
== -ERESTART_RESTARTBLOCK
))
1002 mutex_unlock(&proc
->files_lock
);
1006 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
1009 return thread
->process_todo
||
1010 thread
->looper_need_return
||
1012 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
1015 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
1019 binder_inner_proc_lock(thread
->proc
);
1020 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
1021 binder_inner_proc_unlock(thread
->proc
);
1026 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
1028 return !thread
->transaction_stack
&&
1029 binder_worklist_empty_ilocked(&thread
->todo
) &&
1030 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
1031 BINDER_LOOPER_STATE_REGISTERED
));
1034 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
1038 struct binder_thread
*thread
;
1040 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
1041 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
1042 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
1043 binder_available_for_proc_work_ilocked(thread
)) {
1045 wake_up_interruptible_sync(&thread
->wait
);
1047 wake_up_interruptible(&thread
->wait
);
1053 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1054 * @proc: process to select a thread from
1056 * Note that calling this function moves the thread off the waiting_threads
1057 * list, so it can only be woken up by the caller of this function, or a
1058 * signal. Therefore, callers *should* always wake up the thread this function
1061 * Return: If there's a thread currently waiting for process work,
1062 * returns that thread. Otherwise returns NULL.
1064 static struct binder_thread
*
1065 binder_select_thread_ilocked(struct binder_proc
*proc
)
1067 struct binder_thread
*thread
;
1069 assert_spin_locked(&proc
->inner_lock
);
1070 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1071 struct binder_thread
,
1072 waiting_thread_node
);
1075 list_del_init(&thread
->waiting_thread_node
);
1081 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1082 * @proc: process to wake up a thread in
1083 * @thread: specific thread to wake-up (may be NULL)
1084 * @sync: whether to do a synchronous wake-up
1086 * This function wakes up a thread in the @proc process.
1087 * The caller may provide a specific thread to wake-up in
1088 * the @thread parameter. If @thread is NULL, this function
1089 * will wake up threads that have called poll().
1091 * Note that for this function to work as expected, callers
1092 * should first call binder_select_thread() to find a thread
1093 * to handle the work (if they don't have a thread already),
1094 * and pass the result into the @thread parameter.
1096 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1097 struct binder_thread
*thread
,
1100 assert_spin_locked(&proc
->inner_lock
);
1104 wake_up_interruptible_sync(&thread
->wait
);
1106 wake_up_interruptible(&thread
->wait
);
1110 /* Didn't find a thread waiting for proc work; this can happen
1112 * 1. All threads are busy handling transactions
1113 * In that case, one of those threads should call back into
1114 * the kernel driver soon and pick up this work.
1115 * 2. Threads are using the (e)poll interface, in which case
1116 * they may be blocked on the waitqueue without having been
1117 * added to waiting_threads. For this case, we just iterate
1118 * over all threads not handling transaction work, and
1119 * wake them all up. We wake all because we don't know whether
1120 * a thread that called into (e)poll is handling non-binder
1123 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1126 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1128 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1130 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1133 static bool is_rt_policy(int policy
)
1135 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1138 static bool is_fair_policy(int policy
)
1140 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1143 static bool binder_supported_policy(int policy
)
1145 return is_fair_policy(policy
) || is_rt_policy(policy
);
1148 static int to_userspace_prio(int policy
, int kernel_priority
)
1150 if (is_fair_policy(policy
))
1151 return PRIO_TO_NICE(kernel_priority
);
1153 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1156 static int to_kernel_prio(int policy
, int user_priority
)
1158 if (is_fair_policy(policy
))
1159 return NICE_TO_PRIO(user_priority
);
1161 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1164 static void binder_do_set_priority(struct task_struct
*task
,
1165 struct binder_priority desired
,
1168 int priority
; /* user-space prio value */
1170 unsigned int policy
= desired
.sched_policy
;
1172 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1175 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1177 priority
= to_userspace_prio(policy
, desired
.prio
);
1179 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1180 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1182 if (max_rtprio
== 0) {
1183 policy
= SCHED_NORMAL
;
1184 priority
= MIN_NICE
;
1185 } else if (priority
> max_rtprio
) {
1186 priority
= max_rtprio
;
1190 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1191 long min_nice
= rlimit_to_nice(task_rlimit(task
, RLIMIT_NICE
));
1193 if (min_nice
> MAX_NICE
) {
1194 binder_user_error("%d RLIMIT_NICE not set\n",
1197 } else if (priority
< min_nice
) {
1198 priority
= min_nice
;
1202 if (policy
!= desired
.sched_policy
||
1203 to_kernel_prio(policy
, priority
) != desired
.prio
)
1204 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1205 "%d: priority %d not allowed, using %d instead\n",
1206 task
->pid
, desired
.prio
,
1207 to_kernel_prio(policy
, priority
));
1209 trace_binder_set_priority(task
->tgid
, task
->pid
, task
->normal_prio
,
1210 to_kernel_prio(policy
, priority
),
1213 /* Set the actual priority */
1214 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1215 struct sched_param params
;
1217 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1219 sched_setscheduler_nocheck(task
,
1220 policy
| SCHED_RESET_ON_FORK
,
1223 if (is_fair_policy(policy
))
1224 set_user_nice(task
, priority
);
1227 static void binder_set_priority(struct task_struct
*task
,
1228 struct binder_priority desired
)
1230 binder_do_set_priority(task
, desired
, /* verify = */ true);
1233 static void binder_restore_priority(struct task_struct
*task
,
1234 struct binder_priority desired
)
1236 binder_do_set_priority(task
, desired
, /* verify = */ false);
1239 static void binder_transaction_priority(struct task_struct
*task
,
1240 struct binder_transaction
*t
,
1241 struct binder_priority node_prio
,
1244 struct binder_priority desired_prio
= t
->priority
;
1246 if (t
->set_priority_called
)
1249 t
->set_priority_called
= true;
1250 t
->saved_priority
.sched_policy
= task
->policy
;
1251 t
->saved_priority
.prio
= task
->normal_prio
;
1253 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1254 desired_prio
.prio
= NICE_TO_PRIO(0);
1255 desired_prio
.sched_policy
= SCHED_NORMAL
;
1258 if (node_prio
.prio
< t
->priority
.prio
||
1259 (node_prio
.prio
== t
->priority
.prio
&&
1260 node_prio
.sched_policy
== SCHED_FIFO
)) {
1262 * In case the minimum priority on the node is
1263 * higher (lower value), use that priority. If
1264 * the priority is the same, but the node uses
1265 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1266 * run unbounded, unlike SCHED_RR.
1268 desired_prio
= node_prio
;
1271 binder_set_priority(task
, desired_prio
);
1274 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1275 binder_uintptr_t ptr
)
1277 struct rb_node
*n
= proc
->nodes
.rb_node
;
1278 struct binder_node
*node
;
1280 assert_spin_locked(&proc
->inner_lock
);
1283 node
= rb_entry(n
, struct binder_node
, rb_node
);
1285 if (ptr
< node
->ptr
)
1287 else if (ptr
> node
->ptr
)
1291 * take an implicit weak reference
1292 * to ensure node stays alive until
1293 * call to binder_put_node()
1295 binder_inc_node_tmpref_ilocked(node
);
1302 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1303 binder_uintptr_t ptr
)
1305 struct binder_node
*node
;
1307 binder_inner_proc_lock(proc
);
1308 node
= binder_get_node_ilocked(proc
, ptr
);
1309 binder_inner_proc_unlock(proc
);
1313 static struct binder_node
*binder_init_node_ilocked(
1314 struct binder_proc
*proc
,
1315 struct binder_node
*new_node
,
1316 struct flat_binder_object
*fp
)
1318 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1319 struct rb_node
*parent
= NULL
;
1320 struct binder_node
*node
;
1321 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1322 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1323 __u32 flags
= fp
? fp
->flags
: 0;
1326 assert_spin_locked(&proc
->inner_lock
);
1331 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1333 if (ptr
< node
->ptr
)
1335 else if (ptr
> node
->ptr
)
1336 p
= &(*p
)->rb_right
;
1339 * A matching node is already in
1340 * the rb tree. Abandon the init
1343 binder_inc_node_tmpref_ilocked(node
);
1348 binder_stats_created(BINDER_STAT_NODE
);
1350 rb_link_node(&node
->rb_node
, parent
, p
);
1351 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1352 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1355 node
->cookie
= cookie
;
1356 node
->work
.type
= BINDER_WORK_NODE
;
1357 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1358 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_SCHED_POLICY_MASK
) >>
1359 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1360 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1361 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1362 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1363 spin_lock_init(&node
->lock
);
1364 INIT_LIST_HEAD(&node
->work
.entry
);
1365 INIT_LIST_HEAD(&node
->async_todo
);
1366 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1367 "%d:%d node %d u%016llx c%016llx created\n",
1368 proc
->pid
, current
->pid
, node
->debug_id
,
1369 (u64
)node
->ptr
, (u64
)node
->cookie
);
1374 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1375 struct flat_binder_object
*fp
)
1377 struct binder_node
*node
;
1378 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1382 binder_inner_proc_lock(proc
);
1383 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1384 binder_inner_proc_unlock(proc
);
1385 if (node
!= new_node
)
1387 * The node was already added by another thread
1394 static void binder_free_node(struct binder_node
*node
)
1397 binder_stats_deleted(BINDER_STAT_NODE
);
1400 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1402 struct list_head
*target_list
)
1404 struct binder_proc
*proc
= node
->proc
;
1406 assert_spin_locked(&node
->lock
);
1408 assert_spin_locked(&proc
->inner_lock
);
1411 if (target_list
== NULL
&&
1412 node
->internal_strong_refs
== 0 &&
1414 node
== node
->proc
->context
->binder_context_mgr_node
&&
1415 node
->has_strong_ref
)) {
1416 pr_err("invalid inc strong node for %d\n",
1420 node
->internal_strong_refs
++;
1422 node
->local_strong_refs
++;
1423 if (!node
->has_strong_ref
&& target_list
) {
1424 binder_dequeue_work_ilocked(&node
->work
);
1426 * Note: this function is the only place where we queue
1427 * directly to a thread->todo without using the
1428 * corresponding binder_enqueue_thread_work() helper
1429 * functions; in this case it's ok to not set the
1430 * process_todo flag, since we know this node work will
1431 * always be followed by other work that starts queue
1432 * processing: in case of synchronous transactions, a
1433 * BR_REPLY or BR_ERROR; in case of oneway
1434 * transactions, a BR_TRANSACTION_COMPLETE.
1436 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1440 node
->local_weak_refs
++;
1441 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1442 if (target_list
== NULL
) {
1443 pr_err("invalid inc weak node for %d\n",
1450 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1456 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1457 struct list_head
*target_list
)
1461 binder_node_inner_lock(node
);
1462 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1463 binder_node_inner_unlock(node
);
1468 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1469 int strong
, int internal
)
1471 struct binder_proc
*proc
= node
->proc
;
1473 assert_spin_locked(&node
->lock
);
1475 assert_spin_locked(&proc
->inner_lock
);
1478 node
->internal_strong_refs
--;
1480 node
->local_strong_refs
--;
1481 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1485 node
->local_weak_refs
--;
1486 if (node
->local_weak_refs
|| node
->tmp_refs
||
1487 !hlist_empty(&node
->refs
))
1491 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1492 if (list_empty(&node
->work
.entry
)) {
1493 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1494 binder_wakeup_proc_ilocked(proc
);
1497 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1498 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1500 binder_dequeue_work_ilocked(&node
->work
);
1501 rb_erase(&node
->rb_node
, &proc
->nodes
);
1502 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1503 "refless node %d deleted\n",
1506 BUG_ON(!list_empty(&node
->work
.entry
));
1507 spin_lock(&binder_dead_nodes_lock
);
1509 * tmp_refs could have changed so
1512 if (node
->tmp_refs
) {
1513 spin_unlock(&binder_dead_nodes_lock
);
1516 hlist_del(&node
->dead_node
);
1517 spin_unlock(&binder_dead_nodes_lock
);
1518 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1519 "dead node %d deleted\n",
1528 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1532 binder_node_inner_lock(node
);
1533 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1534 binder_node_inner_unlock(node
);
1536 binder_free_node(node
);
1539 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1542 * No call to binder_inc_node() is needed since we
1543 * don't need to inform userspace of any changes to
1550 * binder_inc_node_tmpref() - take a temporary reference on node
1551 * @node: node to reference
1553 * Take reference on node to prevent the node from being freed
1554 * while referenced only by a local variable. The inner lock is
1555 * needed to serialize with the node work on the queue (which
1556 * isn't needed after the node is dead). If the node is dead
1557 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1558 * node->tmp_refs against dead-node-only cases where the node
1559 * lock cannot be acquired (eg traversing the dead node list to
1562 static void binder_inc_node_tmpref(struct binder_node
*node
)
1564 binder_node_lock(node
);
1566 binder_inner_proc_lock(node
->proc
);
1568 spin_lock(&binder_dead_nodes_lock
);
1569 binder_inc_node_tmpref_ilocked(node
);
1571 binder_inner_proc_unlock(node
->proc
);
1573 spin_unlock(&binder_dead_nodes_lock
);
1574 binder_node_unlock(node
);
1578 * binder_dec_node_tmpref() - remove a temporary reference on node
1579 * @node: node to reference
1581 * Release temporary reference on node taken via binder_inc_node_tmpref()
1583 static void binder_dec_node_tmpref(struct binder_node
*node
)
1587 binder_node_inner_lock(node
);
1589 spin_lock(&binder_dead_nodes_lock
);
1591 BUG_ON(node
->tmp_refs
< 0);
1593 spin_unlock(&binder_dead_nodes_lock
);
1595 * Call binder_dec_node() to check if all refcounts are 0
1596 * and cleanup is needed. Calling with strong=0 and internal=1
1597 * causes no actual reference to be released in binder_dec_node().
1598 * If that changes, a change is needed here too.
1600 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1601 binder_node_inner_unlock(node
);
1603 binder_free_node(node
);
1606 static void binder_put_node(struct binder_node
*node
)
1608 binder_dec_node_tmpref(node
);
1611 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1612 u32 desc
, bool need_strong_ref
)
1614 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1615 struct binder_ref
*ref
;
1618 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1620 if (desc
< ref
->data
.desc
) {
1622 } else if (desc
> ref
->data
.desc
) {
1624 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1625 binder_user_error("tried to use weak ref as strong ref\n");
1635 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1636 * @proc: binder_proc that owns the ref
1637 * @node: binder_node of target
1638 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1640 * Look up the ref for the given node and return it if it exists
1642 * If it doesn't exist and the caller provides a newly allocated
1643 * ref, initialize the fields of the newly allocated ref and insert
1644 * into the given proc rb_trees and node refs list.
1646 * Return: the ref for node. It is possible that another thread
1647 * allocated/initialized the ref first in which case the
1648 * returned ref would be different than the passed-in
1649 * new_ref. new_ref must be kfree'd by the caller in
1652 static struct binder_ref
*binder_get_ref_for_node_olocked(
1653 struct binder_proc
*proc
,
1654 struct binder_node
*node
,
1655 struct binder_ref
*new_ref
)
1657 struct binder_context
*context
= proc
->context
;
1658 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1659 struct rb_node
*parent
= NULL
;
1660 struct binder_ref
*ref
;
1665 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1667 if (node
< ref
->node
)
1669 else if (node
> ref
->node
)
1670 p
= &(*p
)->rb_right
;
1677 binder_stats_created(BINDER_STAT_REF
);
1678 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1679 new_ref
->proc
= proc
;
1680 new_ref
->node
= node
;
1681 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1682 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1684 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1685 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1686 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1687 if (ref
->data
.desc
> new_ref
->data
.desc
)
1689 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1692 p
= &proc
->refs_by_desc
.rb_node
;
1695 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1697 if (new_ref
->data
.desc
< ref
->data
.desc
)
1699 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1700 p
= &(*p
)->rb_right
;
1704 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1705 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1707 binder_node_lock(node
);
1708 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1710 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1711 "%d new ref %d desc %d for node %d\n",
1712 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1714 binder_node_unlock(node
);
1718 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1720 bool delete_node
= false;
1722 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1723 "%d delete ref %d desc %d for node %d\n",
1724 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1725 ref
->node
->debug_id
);
1727 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1728 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1730 binder_node_inner_lock(ref
->node
);
1731 if (ref
->data
.strong
)
1732 binder_dec_node_nilocked(ref
->node
, 1, 1);
1734 hlist_del(&ref
->node_entry
);
1735 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1736 binder_node_inner_unlock(ref
->node
);
1738 * Clear ref->node unless we want the caller to free the node
1742 * The caller uses ref->node to determine
1743 * whether the node needs to be freed. Clear
1744 * it since the node is still alive.
1750 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1751 "%d delete ref %d desc %d has death notification\n",
1752 ref
->proc
->pid
, ref
->data
.debug_id
,
1754 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1755 binder_stats_deleted(BINDER_STAT_DEATH
);
1757 binder_stats_deleted(BINDER_STAT_REF
);
1761 * binder_inc_ref_olocked() - increment the ref for given handle
1762 * @ref: ref to be incremented
1763 * @strong: if true, strong increment, else weak
1764 * @target_list: list to queue node work on
1766 * Increment the ref. @ref->proc->outer_lock must be held on entry
1768 * Return: 0, if successful, else errno
1770 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1771 struct list_head
*target_list
)
1776 if (ref
->data
.strong
== 0) {
1777 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1783 if (ref
->data
.weak
== 0) {
1784 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1794 * binder_dec_ref() - dec the ref for given handle
1795 * @ref: ref to be decremented
1796 * @strong: if true, strong decrement, else weak
1798 * Decrement the ref.
1800 * Return: true if ref is cleaned up and ready to be freed
1802 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1805 if (ref
->data
.strong
== 0) {
1806 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1807 ref
->proc
->pid
, ref
->data
.debug_id
,
1808 ref
->data
.desc
, ref
->data
.strong
,
1813 if (ref
->data
.strong
== 0)
1814 binder_dec_node(ref
->node
, strong
, 1);
1816 if (ref
->data
.weak
== 0) {
1817 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1818 ref
->proc
->pid
, ref
->data
.debug_id
,
1819 ref
->data
.desc
, ref
->data
.strong
,
1825 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1826 binder_cleanup_ref_olocked(ref
);
1833 * binder_get_node_from_ref() - get the node from the given proc/desc
1834 * @proc: proc containing the ref
1835 * @desc: the handle associated with the ref
1836 * @need_strong_ref: if true, only return node if ref is strong
1837 * @rdata: the id/refcount data for the ref
1839 * Given a proc and ref handle, return the associated binder_node
1841 * Return: a binder_node or NULL if not found or not strong when strong required
1843 static struct binder_node
*binder_get_node_from_ref(
1844 struct binder_proc
*proc
,
1845 u32 desc
, bool need_strong_ref
,
1846 struct binder_ref_data
*rdata
)
1848 struct binder_node
*node
;
1849 struct binder_ref
*ref
;
1851 binder_proc_lock(proc
);
1852 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1857 * Take an implicit reference on the node to ensure
1858 * it stays alive until the call to binder_put_node()
1860 binder_inc_node_tmpref(node
);
1863 binder_proc_unlock(proc
);
1868 binder_proc_unlock(proc
);
1873 * binder_free_ref() - free the binder_ref
1876 * Free the binder_ref. Free the binder_node indicated by ref->node
1877 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1879 static void binder_free_ref(struct binder_ref
*ref
)
1882 binder_free_node(ref
->node
);
1888 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1889 * @proc: proc containing the ref
1890 * @desc: the handle associated with the ref
1891 * @increment: true=inc reference, false=dec reference
1892 * @strong: true=strong reference, false=weak reference
1893 * @rdata: the id/refcount data for the ref
1895 * Given a proc and ref handle, increment or decrement the ref
1896 * according to "increment" arg.
1898 * Return: 0 if successful, else errno
1900 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1901 uint32_t desc
, bool increment
, bool strong
,
1902 struct binder_ref_data
*rdata
)
1905 struct binder_ref
*ref
;
1906 bool delete_ref
= false;
1908 binder_proc_lock(proc
);
1909 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1915 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1917 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1921 binder_proc_unlock(proc
);
1924 binder_free_ref(ref
);
1928 binder_proc_unlock(proc
);
1933 * binder_dec_ref_for_handle() - dec the ref for given handle
1934 * @proc: proc containing the ref
1935 * @desc: the handle associated with the ref
1936 * @strong: true=strong reference, false=weak reference
1937 * @rdata: the id/refcount data for the ref
1939 * Just calls binder_update_ref_for_handle() to decrement the ref.
1941 * Return: 0 if successful, else errno
1943 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1944 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1946 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1951 * binder_inc_ref_for_node() - increment the ref for given proc/node
1952 * @proc: proc containing the ref
1953 * @node: target node
1954 * @strong: true=strong reference, false=weak reference
1955 * @target_list: worklist to use if node is incremented
1956 * @rdata: the id/refcount data for the ref
1958 * Given a proc and node, increment the ref. Create the ref if it
1959 * doesn't already exist
1961 * Return: 0 if successful, else errno
1963 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1964 struct binder_node
*node
,
1966 struct list_head
*target_list
,
1967 struct binder_ref_data
*rdata
)
1969 struct binder_ref
*ref
;
1970 struct binder_ref
*new_ref
= NULL
;
1973 binder_proc_lock(proc
);
1974 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1976 binder_proc_unlock(proc
);
1977 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1980 binder_proc_lock(proc
);
1981 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1983 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1985 binder_proc_unlock(proc
);
1986 if (new_ref
&& ref
!= new_ref
)
1988 * Another thread created the ref first so
1989 * free the one we allocated
1995 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1996 struct binder_transaction
*t
)
1998 BUG_ON(!target_thread
);
1999 assert_spin_locked(&target_thread
->proc
->inner_lock
);
2000 BUG_ON(target_thread
->transaction_stack
!= t
);
2001 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
2002 target_thread
->transaction_stack
=
2003 target_thread
->transaction_stack
->from_parent
;
2008 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2009 * @thread: thread to decrement
2011 * A thread needs to be kept alive while being used to create or
2012 * handle a transaction. binder_get_txn_from() is used to safely
2013 * extract t->from from a binder_transaction and keep the thread
2014 * indicated by t->from from being freed. When done with that
2015 * binder_thread, this function is called to decrement the
2016 * tmp_ref and free if appropriate (thread has been released
2017 * and no transaction being processed by the driver)
2019 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
2022 * atomic is used to protect the counter value while
2023 * it cannot reach zero or thread->is_dead is false
2025 binder_inner_proc_lock(thread
->proc
);
2026 atomic_dec(&thread
->tmp_ref
);
2027 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
2028 binder_inner_proc_unlock(thread
->proc
);
2029 binder_free_thread(thread
);
2032 binder_inner_proc_unlock(thread
->proc
);
2036 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2037 * @proc: proc to decrement
2039 * A binder_proc needs to be kept alive while being used to create or
2040 * handle a transaction. proc->tmp_ref is incremented when
2041 * creating a new transaction or the binder_proc is currently in-use
2042 * by threads that are being released. When done with the binder_proc,
2043 * this function is called to decrement the counter and free the
2044 * proc if appropriate (proc has been released, all threads have
2045 * been released and not currenly in-use to process a transaction).
2047 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
2049 binder_inner_proc_lock(proc
);
2051 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
2053 binder_inner_proc_unlock(proc
);
2054 binder_free_proc(proc
);
2057 binder_inner_proc_unlock(proc
);
2061 * binder_get_txn_from() - safely extract the "from" thread in transaction
2062 * @t: binder transaction for t->from
2064 * Atomically return the "from" thread and increment the tmp_ref
2065 * count for the thread to ensure it stays alive until
2066 * binder_thread_dec_tmpref() is called.
2068 * Return: the value of t->from
2070 static struct binder_thread
*binder_get_txn_from(
2071 struct binder_transaction
*t
)
2073 struct binder_thread
*from
;
2075 spin_lock(&t
->lock
);
2078 atomic_inc(&from
->tmp_ref
);
2079 spin_unlock(&t
->lock
);
2084 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2085 * @t: binder transaction for t->from
2087 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2088 * to guarantee that the thread cannot be released while operating on it.
2089 * The caller must call binder_inner_proc_unlock() to release the inner lock
2090 * as well as call binder_dec_thread_txn() to release the reference.
2092 * Return: the value of t->from
2094 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2095 struct binder_transaction
*t
)
2097 struct binder_thread
*from
;
2099 from
= binder_get_txn_from(t
);
2102 binder_inner_proc_lock(from
->proc
);
2104 BUG_ON(from
!= t
->from
);
2107 binder_inner_proc_unlock(from
->proc
);
2108 binder_thread_dec_tmpref(from
);
2112 static void binder_free_transaction(struct binder_transaction
*t
)
2115 t
->buffer
->transaction
= NULL
;
2117 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2120 static void binder_send_failed_reply(struct binder_transaction
*t
,
2121 uint32_t error_code
)
2123 struct binder_thread
*target_thread
;
2124 struct binder_transaction
*next
;
2126 BUG_ON(t
->flags
& TF_ONE_WAY
);
2128 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2129 if (target_thread
) {
2130 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2131 "send failed reply for transaction %d to %d:%d\n",
2133 target_thread
->proc
->pid
,
2134 target_thread
->pid
);
2136 binder_pop_transaction_ilocked(target_thread
, t
);
2137 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2138 target_thread
->reply_error
.cmd
= error_code
;
2139 binder_enqueue_thread_work_ilocked(
2141 &target_thread
->reply_error
.work
);
2142 wake_up_interruptible(&target_thread
->wait
);
2145 * Cannot get here for normal operation, but
2146 * we can if multiple synchronous transactions
2147 * are sent without blocking for responses.
2148 * Just ignore the 2nd error in this case.
2150 pr_warn("Unexpected reply error: %u\n",
2151 target_thread
->reply_error
.cmd
);
2153 binder_inner_proc_unlock(target_thread
->proc
);
2154 binder_thread_dec_tmpref(target_thread
);
2155 binder_free_transaction(t
);
2158 next
= t
->from_parent
;
2160 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2161 "send failed reply for transaction %d, target dead\n",
2164 binder_free_transaction(t
);
2166 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2167 "reply failed, no target thread at root\n");
2171 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2172 "reply failed, no target thread -- retry %d\n",
2178 * binder_cleanup_transaction() - cleans up undelivered transaction
2179 * @t: transaction that needs to be cleaned up
2180 * @reason: reason the transaction wasn't delivered
2181 * @error_code: error to return to caller (if synchronous call)
2183 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2185 uint32_t error_code
)
2187 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2188 binder_send_failed_reply(t
, error_code
);
2190 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2191 "undelivered transaction %d, %s\n",
2192 t
->debug_id
, reason
);
2193 binder_free_transaction(t
);
2198 * binder_validate_object() - checks for a valid metadata object in a buffer.
2199 * @buffer: binder_buffer that we're parsing.
2200 * @offset: offset in the buffer at which to validate an object.
2202 * Return: If there's a valid metadata object at @offset in @buffer, the
2203 * size of that object. Otherwise, it returns zero.
2205 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2207 /* Check if we can read a header first */
2208 struct binder_object_header
*hdr
;
2209 size_t object_size
= 0;
2211 if (buffer
->data_size
< sizeof(*hdr
) ||
2212 offset
> buffer
->data_size
- sizeof(*hdr
) ||
2213 !IS_ALIGNED(offset
, sizeof(u32
)))
2216 /* Ok, now see if we can read a complete object. */
2217 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2218 switch (hdr
->type
) {
2219 case BINDER_TYPE_BINDER
:
2220 case BINDER_TYPE_WEAK_BINDER
:
2221 case BINDER_TYPE_HANDLE
:
2222 case BINDER_TYPE_WEAK_HANDLE
:
2223 object_size
= sizeof(struct flat_binder_object
);
2225 case BINDER_TYPE_FD
:
2226 object_size
= sizeof(struct binder_fd_object
);
2228 case BINDER_TYPE_PTR
:
2229 object_size
= sizeof(struct binder_buffer_object
);
2231 case BINDER_TYPE_FDA
:
2232 object_size
= sizeof(struct binder_fd_array_object
);
2237 if (offset
<= buffer
->data_size
- object_size
&&
2238 buffer
->data_size
>= object_size
)
2245 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2246 * @b: binder_buffer containing the object
2247 * @index: index in offset array at which the binder_buffer_object is
2249 * @start: points to the start of the offset array
2250 * @num_valid: the number of valid offsets in the offset array
2252 * Return: If @index is within the valid range of the offset array
2253 * described by @start and @num_valid, and if there's a valid
2254 * binder_buffer_object at the offset found in index @index
2255 * of the offset array, that object is returned. Otherwise,
2256 * %NULL is returned.
2257 * Note that the offset found in index @index itself is not
2258 * verified; this function assumes that @num_valid elements
2259 * from @start were previously verified to have valid offsets.
2261 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2262 binder_size_t index
,
2263 binder_size_t
*start
,
2264 binder_size_t num_valid
)
2266 struct binder_buffer_object
*buffer_obj
;
2267 binder_size_t
*offp
;
2269 if (index
>= num_valid
)
2272 offp
= start
+ index
;
2273 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2274 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2281 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2282 * @b: transaction buffer
2283 * @objects_start start of objects buffer
2284 * @buffer: binder_buffer_object in which to fix up
2285 * @offset: start offset in @buffer to fix up
2286 * @last_obj: last binder_buffer_object that we fixed up in
2287 * @last_min_offset: minimum fixup offset in @last_obj
2289 * Return: %true if a fixup in buffer @buffer at offset @offset is
2292 * For safety reasons, we only allow fixups inside a buffer to happen
2293 * at increasing offsets; additionally, we only allow fixup on the last
2294 * buffer object that was verified, or one of its parents.
2296 * Example of what is allowed:
2299 * B (parent = A, offset = 0)
2300 * C (parent = A, offset = 16)
2301 * D (parent = C, offset = 0)
2302 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2304 * Examples of what is not allowed:
2306 * Decreasing offsets within the same parent:
2308 * C (parent = A, offset = 16)
2309 * B (parent = A, offset = 0) // decreasing offset within A
2311 * Referring to a parent that wasn't the last object or any of its parents:
2313 * B (parent = A, offset = 0)
2314 * C (parent = A, offset = 0)
2315 * C (parent = A, offset = 16)
2316 * D (parent = B, offset = 0) // B is not A or any of A's parents
2318 static bool binder_validate_fixup(struct binder_buffer
*b
,
2319 binder_size_t
*objects_start
,
2320 struct binder_buffer_object
*buffer
,
2321 binder_size_t fixup_offset
,
2322 struct binder_buffer_object
*last_obj
,
2323 binder_size_t last_min_offset
)
2326 /* Nothing to fix up in */
2330 while (last_obj
!= buffer
) {
2332 * Safe to retrieve the parent of last_obj, since it
2333 * was already previously verified by the driver.
2335 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2337 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2338 last_obj
= (struct binder_buffer_object
*)
2339 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2341 return (fixup_offset
>= last_min_offset
);
2344 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2345 struct binder_buffer
*buffer
,
2346 binder_size_t
*failed_at
)
2348 binder_size_t
*offp
, *off_start
, *off_end
;
2349 int debug_id
= buffer
->debug_id
;
2351 binder_debug(BINDER_DEBUG_TRANSACTION
,
2352 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2353 proc
->pid
, buffer
->debug_id
,
2354 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2356 if (buffer
->target_node
)
2357 binder_dec_node(buffer
->target_node
, 1, 0);
2359 off_start
= (binder_size_t
*)(buffer
->data
+
2360 ALIGN(buffer
->data_size
, sizeof(void *)));
2362 off_end
= failed_at
;
2364 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2365 for (offp
= off_start
; offp
< off_end
; offp
++) {
2366 struct binder_object_header
*hdr
;
2367 size_t object_size
= binder_validate_object(buffer
, *offp
);
2369 if (object_size
== 0) {
2370 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2371 debug_id
, (u64
)*offp
, buffer
->data_size
);
2374 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2375 switch (hdr
->type
) {
2376 case BINDER_TYPE_BINDER
:
2377 case BINDER_TYPE_WEAK_BINDER
: {
2378 struct flat_binder_object
*fp
;
2379 struct binder_node
*node
;
2381 fp
= to_flat_binder_object(hdr
);
2382 node
= binder_get_node(proc
, fp
->binder
);
2384 pr_err("transaction release %d bad node %016llx\n",
2385 debug_id
, (u64
)fp
->binder
);
2388 binder_debug(BINDER_DEBUG_TRANSACTION
,
2389 " node %d u%016llx\n",
2390 node
->debug_id
, (u64
)node
->ptr
);
2391 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2393 binder_put_node(node
);
2395 case BINDER_TYPE_HANDLE
:
2396 case BINDER_TYPE_WEAK_HANDLE
: {
2397 struct flat_binder_object
*fp
;
2398 struct binder_ref_data rdata
;
2401 fp
= to_flat_binder_object(hdr
);
2402 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2403 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2406 pr_err("transaction release %d bad handle %d, ret = %d\n",
2407 debug_id
, fp
->handle
, ret
);
2410 binder_debug(BINDER_DEBUG_TRANSACTION
,
2411 " ref %d desc %d\n",
2412 rdata
.debug_id
, rdata
.desc
);
2415 case BINDER_TYPE_FD
: {
2416 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2418 binder_debug(BINDER_DEBUG_TRANSACTION
,
2419 " fd %d\n", fp
->fd
);
2421 task_close_fd(proc
, fp
->fd
);
2423 case BINDER_TYPE_PTR
:
2425 * Nothing to do here, this will get cleaned up when the
2426 * transaction buffer gets freed
2429 case BINDER_TYPE_FDA
: {
2430 struct binder_fd_array_object
*fda
;
2431 struct binder_buffer_object
*parent
;
2432 uintptr_t parent_buffer
;
2435 binder_size_t fd_buf_size
;
2437 fda
= to_binder_fd_array_object(hdr
);
2438 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2442 pr_err("transaction release %d bad parent offset",
2447 * Since the parent was already fixed up, convert it
2448 * back to kernel address space to access it
2450 parent_buffer
= parent
->buffer
-
2451 binder_alloc_get_user_buffer_offset(
2454 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2455 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2456 pr_err("transaction release %d invalid number of fds (%lld)\n",
2457 debug_id
, (u64
)fda
->num_fds
);
2460 if (fd_buf_size
> parent
->length
||
2461 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2462 /* No space for all file descriptors here. */
2463 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2464 debug_id
, (u64
)fda
->num_fds
);
2467 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2468 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2469 task_close_fd(proc
, fd_array
[fd_index
]);
2472 pr_err("transaction release %d bad object type %x\n",
2473 debug_id
, hdr
->type
);
2479 static int binder_translate_binder(struct flat_binder_object
*fp
,
2480 struct binder_transaction
*t
,
2481 struct binder_thread
*thread
)
2483 struct binder_node
*node
;
2484 struct binder_proc
*proc
= thread
->proc
;
2485 struct binder_proc
*target_proc
= t
->to_proc
;
2486 struct binder_ref_data rdata
;
2489 node
= binder_get_node(proc
, fp
->binder
);
2491 node
= binder_new_node(proc
, fp
);
2495 if (fp
->cookie
!= node
->cookie
) {
2496 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2497 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2498 node
->debug_id
, (u64
)fp
->cookie
,
2503 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2508 ret
= binder_inc_ref_for_node(target_proc
, node
,
2509 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2510 &thread
->todo
, &rdata
);
2514 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2515 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2517 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2519 fp
->handle
= rdata
.desc
;
2522 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2523 binder_debug(BINDER_DEBUG_TRANSACTION
,
2524 " node %d u%016llx -> ref %d desc %d\n",
2525 node
->debug_id
, (u64
)node
->ptr
,
2526 rdata
.debug_id
, rdata
.desc
);
2528 binder_put_node(node
);
2532 static int binder_translate_handle(struct flat_binder_object
*fp
,
2533 struct binder_transaction
*t
,
2534 struct binder_thread
*thread
)
2536 struct binder_proc
*proc
= thread
->proc
;
2537 struct binder_proc
*target_proc
= t
->to_proc
;
2538 struct binder_node
*node
;
2539 struct binder_ref_data src_rdata
;
2542 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2543 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2545 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2546 proc
->pid
, thread
->pid
, fp
->handle
);
2549 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2554 binder_node_lock(node
);
2555 if (node
->proc
== target_proc
) {
2556 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2557 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2559 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2560 fp
->binder
= node
->ptr
;
2561 fp
->cookie
= node
->cookie
;
2563 binder_inner_proc_lock(node
->proc
);
2564 binder_inc_node_nilocked(node
,
2565 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2568 binder_inner_proc_unlock(node
->proc
);
2569 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2570 binder_debug(BINDER_DEBUG_TRANSACTION
,
2571 " ref %d desc %d -> node %d u%016llx\n",
2572 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2574 binder_node_unlock(node
);
2576 struct binder_ref_data dest_rdata
;
2578 binder_node_unlock(node
);
2579 ret
= binder_inc_ref_for_node(target_proc
, node
,
2580 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2586 fp
->handle
= dest_rdata
.desc
;
2588 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2590 binder_debug(BINDER_DEBUG_TRANSACTION
,
2591 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2592 src_rdata
.debug_id
, src_rdata
.desc
,
2593 dest_rdata
.debug_id
, dest_rdata
.desc
,
2597 binder_put_node(node
);
2601 static int binder_translate_fd(int fd
,
2602 struct binder_transaction
*t
,
2603 struct binder_thread
*thread
,
2604 struct binder_transaction
*in_reply_to
)
2606 struct binder_proc
*proc
= thread
->proc
;
2607 struct binder_proc
*target_proc
= t
->to_proc
;
2611 bool target_allows_fd
;
2614 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2616 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2617 if (!target_allows_fd
) {
2618 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2619 proc
->pid
, thread
->pid
,
2620 in_reply_to
? "reply" : "transaction",
2623 goto err_fd_not_accepted
;
2628 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2629 proc
->pid
, thread
->pid
, fd
);
2633 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2639 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2640 if (target_fd
< 0) {
2642 goto err_get_unused_fd
;
2644 task_fd_install(target_proc
, target_fd
, file
);
2645 trace_binder_transaction_fd(t
, fd
, target_fd
);
2646 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2655 err_fd_not_accepted
:
2659 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2660 struct binder_buffer_object
*parent
,
2661 struct binder_transaction
*t
,
2662 struct binder_thread
*thread
,
2663 struct binder_transaction
*in_reply_to
)
2665 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2667 uintptr_t parent_buffer
;
2669 struct binder_proc
*proc
= thread
->proc
;
2670 struct binder_proc
*target_proc
= t
->to_proc
;
2672 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2673 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2674 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2675 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2678 if (fd_buf_size
> parent
->length
||
2679 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2680 /* No space for all file descriptors here. */
2681 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2682 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2686 * Since the parent was already fixed up, convert it
2687 * back to the kernel address space to access it
2689 parent_buffer
= parent
->buffer
-
2690 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2691 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2692 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2693 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2694 proc
->pid
, thread
->pid
);
2697 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2698 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2701 goto err_translate_fd_failed
;
2702 fd_array
[fdi
] = target_fd
;
2706 err_translate_fd_failed
:
2708 * Failed to allocate fd or security error, free fds
2711 num_installed_fds
= fdi
;
2712 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2713 task_close_fd(target_proc
, fd_array
[fdi
]);
2717 static int binder_fixup_parent(struct binder_transaction
*t
,
2718 struct binder_thread
*thread
,
2719 struct binder_buffer_object
*bp
,
2720 binder_size_t
*off_start
,
2721 binder_size_t num_valid
,
2722 struct binder_buffer_object
*last_fixup_obj
,
2723 binder_size_t last_fixup_min_off
)
2725 struct binder_buffer_object
*parent
;
2727 struct binder_buffer
*b
= t
->buffer
;
2728 struct binder_proc
*proc
= thread
->proc
;
2729 struct binder_proc
*target_proc
= t
->to_proc
;
2731 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2734 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2736 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2737 proc
->pid
, thread
->pid
);
2741 if (!binder_validate_fixup(b
, off_start
,
2742 parent
, bp
->parent_offset
,
2744 last_fixup_min_off
)) {
2745 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2746 proc
->pid
, thread
->pid
);
2750 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2751 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2752 /* No space for a pointer here! */
2753 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2754 proc
->pid
, thread
->pid
);
2757 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2758 binder_alloc_get_user_buffer_offset(
2759 &target_proc
->alloc
));
2760 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2766 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2767 * @t: transaction to send
2768 * @proc: process to send the transaction to
2769 * @thread: thread in @proc to send the transaction to (may be NULL)
2771 * This function queues a transaction to the specified process. It will try
2772 * to find a thread in the target process to handle the transaction and
2773 * wake it up. If no thread is found, the work is queued to the proc
2776 * If the @thread parameter is not NULL, the transaction is always queued
2777 * to the waitlist of that specific thread.
2779 * Return: true if the transactions was successfully queued
2780 * false if the target process or thread is dead
2782 static bool binder_proc_transaction(struct binder_transaction
*t
,
2783 struct binder_proc
*proc
,
2784 struct binder_thread
*thread
)
2786 struct binder_node
*node
= t
->buffer
->target_node
;
2787 struct binder_priority node_prio
;
2788 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2789 bool pending_async
= false;
2792 binder_node_lock(node
);
2793 node_prio
.prio
= node
->min_priority
;
2794 node_prio
.sched_policy
= node
->sched_policy
;
2798 if (node
->has_async_transaction
) {
2799 pending_async
= true;
2801 node
->has_async_transaction
= true;
2805 binder_inner_proc_lock(proc
);
2807 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2808 binder_inner_proc_unlock(proc
);
2809 binder_node_unlock(node
);
2813 if (!thread
&& !pending_async
)
2814 thread
= binder_select_thread_ilocked(proc
);
2817 binder_transaction_priority(thread
->task
, t
, node_prio
,
2819 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2820 } else if (!pending_async
) {
2821 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2823 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2827 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2829 binder_inner_proc_unlock(proc
);
2830 binder_node_unlock(node
);
2836 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2837 * @node: struct binder_node for which to get refs
2838 * @proc: returns @node->proc if valid
2839 * @error: if no @proc then returns BR_DEAD_REPLY
2841 * User-space normally keeps the node alive when creating a transaction
2842 * since it has a reference to the target. The local strong ref keeps it
2843 * alive if the sending process dies before the target process processes
2844 * the transaction. If the source process is malicious or has a reference
2845 * counting bug, relying on the local strong ref can fail.
2847 * Since user-space can cause the local strong ref to go away, we also take
2848 * a tmpref on the node to ensure it survives while we are constructing
2849 * the transaction. We also need a tmpref on the proc while we are
2850 * constructing the transaction, so we take that here as well.
2852 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2853 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2854 * target proc has died, @error is set to BR_DEAD_REPLY
2856 static struct binder_node
*binder_get_node_refs_for_txn(
2857 struct binder_node
*node
,
2858 struct binder_proc
**procp
,
2861 struct binder_node
*target_node
= NULL
;
2863 binder_node_inner_lock(node
);
2866 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2867 binder_inc_node_tmpref_ilocked(node
);
2868 node
->proc
->tmp_ref
++;
2869 *procp
= node
->proc
;
2871 *error
= BR_DEAD_REPLY
;
2872 binder_node_inner_unlock(node
);
2877 static void binder_transaction(struct binder_proc
*proc
,
2878 struct binder_thread
*thread
,
2879 struct binder_transaction_data
*tr
, int reply
,
2880 binder_size_t extra_buffers_size
)
2883 struct binder_transaction
*t
;
2884 struct binder_work
*tcomplete
;
2885 binder_size_t
*offp
, *off_end
, *off_start
;
2886 binder_size_t off_min
;
2887 u8
*sg_bufp
, *sg_buf_end
;
2888 struct binder_proc
*target_proc
= NULL
;
2889 struct binder_thread
*target_thread
= NULL
;
2890 struct binder_node
*target_node
= NULL
;
2891 struct binder_transaction
*in_reply_to
= NULL
;
2892 struct binder_transaction_log_entry
*e
;
2893 uint32_t return_error
= 0;
2894 uint32_t return_error_param
= 0;
2895 uint32_t return_error_line
= 0;
2896 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2897 binder_size_t last_fixup_min_off
= 0;
2898 struct binder_context
*context
= proc
->context
;
2899 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2901 e
= binder_transaction_log_add(&binder_transaction_log
);
2902 e
->debug_id
= t_debug_id
;
2903 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2904 e
->from_proc
= proc
->pid
;
2905 e
->from_thread
= thread
->pid
;
2906 e
->target_handle
= tr
->target
.handle
;
2907 e
->data_size
= tr
->data_size
;
2908 e
->offsets_size
= tr
->offsets_size
;
2909 e
->context_name
= proc
->context
->name
;
2912 binder_inner_proc_lock(proc
);
2913 in_reply_to
= thread
->transaction_stack
;
2914 if (in_reply_to
== NULL
) {
2915 binder_inner_proc_unlock(proc
);
2916 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2917 proc
->pid
, thread
->pid
);
2918 return_error
= BR_FAILED_REPLY
;
2919 return_error_param
= -EPROTO
;
2920 return_error_line
= __LINE__
;
2921 goto err_empty_call_stack
;
2923 if (in_reply_to
->to_thread
!= thread
) {
2924 spin_lock(&in_reply_to
->lock
);
2925 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2926 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2927 in_reply_to
->to_proc
?
2928 in_reply_to
->to_proc
->pid
: 0,
2929 in_reply_to
->to_thread
?
2930 in_reply_to
->to_thread
->pid
: 0);
2931 spin_unlock(&in_reply_to
->lock
);
2932 binder_inner_proc_unlock(proc
);
2933 return_error
= BR_FAILED_REPLY
;
2934 return_error_param
= -EPROTO
;
2935 return_error_line
= __LINE__
;
2937 goto err_bad_call_stack
;
2939 thread
->transaction_stack
= in_reply_to
->to_parent
;
2940 binder_inner_proc_unlock(proc
);
2941 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2942 if (target_thread
== NULL
) {
2943 return_error
= BR_DEAD_REPLY
;
2944 return_error_line
= __LINE__
;
2945 goto err_dead_binder
;
2947 if (target_thread
->transaction_stack
!= in_reply_to
) {
2948 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2949 proc
->pid
, thread
->pid
,
2950 target_thread
->transaction_stack
?
2951 target_thread
->transaction_stack
->debug_id
: 0,
2952 in_reply_to
->debug_id
);
2953 binder_inner_proc_unlock(target_thread
->proc
);
2954 return_error
= BR_FAILED_REPLY
;
2955 return_error_param
= -EPROTO
;
2956 return_error_line
= __LINE__
;
2958 target_thread
= NULL
;
2959 goto err_dead_binder
;
2961 target_proc
= target_thread
->proc
;
2962 target_proc
->tmp_ref
++;
2963 binder_inner_proc_unlock(target_thread
->proc
);
2965 if (tr
->target
.handle
) {
2966 struct binder_ref
*ref
;
2969 * There must already be a strong ref
2970 * on this node. If so, do a strong
2971 * increment on the node to ensure it
2972 * stays alive until the transaction is
2975 binder_proc_lock(proc
);
2976 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2979 target_node
= binder_get_node_refs_for_txn(
2980 ref
->node
, &target_proc
,
2983 binder_user_error("%d:%d got transaction to invalid handle\n",
2984 proc
->pid
, thread
->pid
);
2985 return_error
= BR_FAILED_REPLY
;
2987 binder_proc_unlock(proc
);
2989 mutex_lock(&context
->context_mgr_node_lock
);
2990 target_node
= context
->binder_context_mgr_node
;
2992 target_node
= binder_get_node_refs_for_txn(
2993 target_node
, &target_proc
,
2996 return_error
= BR_DEAD_REPLY
;
2997 mutex_unlock(&context
->context_mgr_node_lock
);
2998 if (target_node
&& target_proc
== proc
) {
2999 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3000 proc
->pid
, thread
->pid
);
3001 return_error
= BR_FAILED_REPLY
;
3002 return_error_param
= -EINVAL
;
3003 return_error_line
= __LINE__
;
3004 goto err_invalid_target_handle
;
3009 * return_error is set above
3011 return_error_param
= -EINVAL
;
3012 return_error_line
= __LINE__
;
3013 goto err_dead_binder
;
3015 e
->to_node
= target_node
->debug_id
;
3016 if (security_binder_transaction(proc
->tsk
,
3017 target_proc
->tsk
) < 0) {
3018 return_error
= BR_FAILED_REPLY
;
3019 return_error_param
= -EPERM
;
3020 return_error_line
= __LINE__
;
3021 goto err_invalid_target_handle
;
3023 binder_inner_proc_lock(proc
);
3024 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3025 struct binder_transaction
*tmp
;
3027 tmp
= thread
->transaction_stack
;
3028 if (tmp
->to_thread
!= thread
) {
3029 spin_lock(&tmp
->lock
);
3030 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3031 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3032 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3034 tmp
->to_thread
->pid
: 0);
3035 spin_unlock(&tmp
->lock
);
3036 binder_inner_proc_unlock(proc
);
3037 return_error
= BR_FAILED_REPLY
;
3038 return_error_param
= -EPROTO
;
3039 return_error_line
= __LINE__
;
3040 goto err_bad_call_stack
;
3043 struct binder_thread
*from
;
3045 spin_lock(&tmp
->lock
);
3047 if (from
&& from
->proc
== target_proc
) {
3048 atomic_inc(&from
->tmp_ref
);
3049 target_thread
= from
;
3050 spin_unlock(&tmp
->lock
);
3053 spin_unlock(&tmp
->lock
);
3054 tmp
= tmp
->from_parent
;
3057 binder_inner_proc_unlock(proc
);
3060 e
->to_thread
= target_thread
->pid
;
3061 e
->to_proc
= target_proc
->pid
;
3063 /* TODO: reuse incoming transaction for reply */
3064 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3066 return_error
= BR_FAILED_REPLY
;
3067 return_error_param
= -ENOMEM
;
3068 return_error_line
= __LINE__
;
3069 goto err_alloc_t_failed
;
3071 binder_stats_created(BINDER_STAT_TRANSACTION
);
3072 spin_lock_init(&t
->lock
);
3074 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3075 if (tcomplete
== NULL
) {
3076 return_error
= BR_FAILED_REPLY
;
3077 return_error_param
= -ENOMEM
;
3078 return_error_line
= __LINE__
;
3079 goto err_alloc_tcomplete_failed
;
3081 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3083 t
->debug_id
= t_debug_id
;
3086 binder_debug(BINDER_DEBUG_TRANSACTION
,
3087 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3088 proc
->pid
, thread
->pid
, t
->debug_id
,
3089 target_proc
->pid
, target_thread
->pid
,
3090 (u64
)tr
->data
.ptr
.buffer
,
3091 (u64
)tr
->data
.ptr
.offsets
,
3092 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3093 (u64
)extra_buffers_size
);
3095 binder_debug(BINDER_DEBUG_TRANSACTION
,
3096 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3097 proc
->pid
, thread
->pid
, t
->debug_id
,
3098 target_proc
->pid
, target_node
->debug_id
,
3099 (u64
)tr
->data
.ptr
.buffer
,
3100 (u64
)tr
->data
.ptr
.offsets
,
3101 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3102 (u64
)extra_buffers_size
);
3104 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3108 t
->sender_euid
= task_euid(proc
->tsk
);
3109 t
->to_proc
= target_proc
;
3110 t
->to_thread
= target_thread
;
3112 t
->flags
= tr
->flags
;
3113 if (!(t
->flags
& TF_ONE_WAY
) &&
3114 binder_supported_policy(current
->policy
)) {
3115 /* Inherit supported policies for synchronous transactions */
3116 t
->priority
.sched_policy
= current
->policy
;
3117 t
->priority
.prio
= current
->normal_prio
;
3119 /* Otherwise, fall back to the default priority */
3120 t
->priority
= target_proc
->default_priority
;
3123 trace_binder_transaction(reply
, t
, target_node
);
3125 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3126 tr
->offsets_size
, extra_buffers_size
,
3127 !reply
&& (t
->flags
& TF_ONE_WAY
));
3128 if (IS_ERR(t
->buffer
)) {
3130 * -ESRCH indicates VMA cleared. The target is dying.
3132 return_error_param
= PTR_ERR(t
->buffer
);
3133 return_error
= return_error_param
== -ESRCH
?
3134 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3135 return_error_line
= __LINE__
;
3137 goto err_binder_alloc_buf_failed
;
3139 t
->buffer
->allow_user_free
= 0;
3140 t
->buffer
->debug_id
= t
->debug_id
;
3141 t
->buffer
->transaction
= t
;
3142 t
->buffer
->target_node
= target_node
;
3143 trace_binder_transaction_alloc_buf(t
->buffer
);
3144 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3145 ALIGN(tr
->data_size
, sizeof(void *)));
3148 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3149 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3150 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3151 proc
->pid
, thread
->pid
);
3152 return_error
= BR_FAILED_REPLY
;
3153 return_error_param
= -EFAULT
;
3154 return_error_line
= __LINE__
;
3155 goto err_copy_data_failed
;
3157 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3158 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3159 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3160 proc
->pid
, thread
->pid
);
3161 return_error
= BR_FAILED_REPLY
;
3162 return_error_param
= -EFAULT
;
3163 return_error_line
= __LINE__
;
3164 goto err_copy_data_failed
;
3166 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3167 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3168 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3169 return_error
= BR_FAILED_REPLY
;
3170 return_error_param
= -EINVAL
;
3171 return_error_line
= __LINE__
;
3172 goto err_bad_offset
;
3174 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3175 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3176 proc
->pid
, thread
->pid
,
3177 (u64
)extra_buffers_size
);
3178 return_error
= BR_FAILED_REPLY
;
3179 return_error_param
= -EINVAL
;
3180 return_error_line
= __LINE__
;
3181 goto err_bad_offset
;
3183 off_end
= (void *)off_start
+ tr
->offsets_size
;
3184 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3185 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3187 for (; offp
< off_end
; offp
++) {
3188 struct binder_object_header
*hdr
;
3189 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3191 if (object_size
== 0 || *offp
< off_min
) {
3192 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3193 proc
->pid
, thread
->pid
, (u64
)*offp
,
3195 (u64
)t
->buffer
->data_size
);
3196 return_error
= BR_FAILED_REPLY
;
3197 return_error_param
= -EINVAL
;
3198 return_error_line
= __LINE__
;
3199 goto err_bad_offset
;
3202 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3203 off_min
= *offp
+ object_size
;
3204 switch (hdr
->type
) {
3205 case BINDER_TYPE_BINDER
:
3206 case BINDER_TYPE_WEAK_BINDER
: {
3207 struct flat_binder_object
*fp
;
3209 fp
= to_flat_binder_object(hdr
);
3210 ret
= binder_translate_binder(fp
, t
, thread
);
3212 return_error
= BR_FAILED_REPLY
;
3213 return_error_param
= ret
;
3214 return_error_line
= __LINE__
;
3215 goto err_translate_failed
;
3218 case BINDER_TYPE_HANDLE
:
3219 case BINDER_TYPE_WEAK_HANDLE
: {
3220 struct flat_binder_object
*fp
;
3222 fp
= to_flat_binder_object(hdr
);
3223 ret
= binder_translate_handle(fp
, t
, thread
);
3225 return_error
= BR_FAILED_REPLY
;
3226 return_error_param
= ret
;
3227 return_error_line
= __LINE__
;
3228 goto err_translate_failed
;
3232 case BINDER_TYPE_FD
: {
3233 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3234 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3237 if (target_fd
< 0) {
3238 return_error
= BR_FAILED_REPLY
;
3239 return_error_param
= target_fd
;
3240 return_error_line
= __LINE__
;
3241 goto err_translate_failed
;
3246 case BINDER_TYPE_FDA
: {
3247 struct binder_fd_array_object
*fda
=
3248 to_binder_fd_array_object(hdr
);
3249 struct binder_buffer_object
*parent
=
3250 binder_validate_ptr(t
->buffer
, fda
->parent
,
3254 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3255 proc
->pid
, thread
->pid
);
3256 return_error
= BR_FAILED_REPLY
;
3257 return_error_param
= -EINVAL
;
3258 return_error_line
= __LINE__
;
3259 goto err_bad_parent
;
3261 if (!binder_validate_fixup(t
->buffer
, off_start
,
3262 parent
, fda
->parent_offset
,
3264 last_fixup_min_off
)) {
3265 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3266 proc
->pid
, thread
->pid
);
3267 return_error
= BR_FAILED_REPLY
;
3268 return_error_param
= -EINVAL
;
3269 return_error_line
= __LINE__
;
3270 goto err_bad_parent
;
3272 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3275 return_error
= BR_FAILED_REPLY
;
3276 return_error_param
= ret
;
3277 return_error_line
= __LINE__
;
3278 goto err_translate_failed
;
3280 last_fixup_obj
= parent
;
3281 last_fixup_min_off
=
3282 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3284 case BINDER_TYPE_PTR
: {
3285 struct binder_buffer_object
*bp
=
3286 to_binder_buffer_object(hdr
);
3287 size_t buf_left
= sg_buf_end
- sg_bufp
;
3289 if (bp
->length
> buf_left
) {
3290 binder_user_error("%d:%d got transaction with too large buffer\n",
3291 proc
->pid
, thread
->pid
);
3292 return_error
= BR_FAILED_REPLY
;
3293 return_error_param
= -EINVAL
;
3294 return_error_line
= __LINE__
;
3295 goto err_bad_offset
;
3297 if (copy_from_user(sg_bufp
,
3298 (const void __user
*)(uintptr_t)
3299 bp
->buffer
, bp
->length
)) {
3300 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3301 proc
->pid
, thread
->pid
);
3302 return_error_param
= -EFAULT
;
3303 return_error
= BR_FAILED_REPLY
;
3304 return_error_line
= __LINE__
;
3305 goto err_copy_data_failed
;
3307 /* Fixup buffer pointer to target proc address space */
3308 bp
->buffer
= (uintptr_t)sg_bufp
+
3309 binder_alloc_get_user_buffer_offset(
3310 &target_proc
->alloc
);
3311 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3313 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3316 last_fixup_min_off
);
3318 return_error
= BR_FAILED_REPLY
;
3319 return_error_param
= ret
;
3320 return_error_line
= __LINE__
;
3321 goto err_translate_failed
;
3323 last_fixup_obj
= bp
;
3324 last_fixup_min_off
= 0;
3327 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3328 proc
->pid
, thread
->pid
, hdr
->type
);
3329 return_error
= BR_FAILED_REPLY
;
3330 return_error_param
= -EINVAL
;
3331 return_error_line
= __LINE__
;
3332 goto err_bad_object_type
;
3335 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3336 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3339 binder_enqueue_thread_work(thread
, tcomplete
);
3340 binder_inner_proc_lock(target_proc
);
3341 if (target_thread
->is_dead
) {
3342 binder_inner_proc_unlock(target_proc
);
3343 goto err_dead_proc_or_thread
;
3345 BUG_ON(t
->buffer
->async_transaction
!= 0);
3346 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3347 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3348 binder_inner_proc_unlock(target_proc
);
3349 wake_up_interruptible_sync(&target_thread
->wait
);
3350 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3351 binder_free_transaction(in_reply_to
);
3352 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3353 BUG_ON(t
->buffer
->async_transaction
!= 0);
3354 binder_inner_proc_lock(proc
);
3356 * Defer the TRANSACTION_COMPLETE, so we don't return to
3357 * userspace immediately; this allows the target process to
3358 * immediately start processing this transaction, reducing
3359 * latency. We will then return the TRANSACTION_COMPLETE when
3360 * the target replies (or there is an error).
3362 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3364 t
->from_parent
= thread
->transaction_stack
;
3365 thread
->transaction_stack
= t
;
3366 binder_inner_proc_unlock(proc
);
3367 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3368 binder_inner_proc_lock(proc
);
3369 binder_pop_transaction_ilocked(thread
, t
);
3370 binder_inner_proc_unlock(proc
);
3371 goto err_dead_proc_or_thread
;
3374 BUG_ON(target_node
== NULL
);
3375 BUG_ON(t
->buffer
->async_transaction
!= 1);
3376 binder_enqueue_thread_work(thread
, tcomplete
);
3377 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3378 goto err_dead_proc_or_thread
;
3381 binder_thread_dec_tmpref(target_thread
);
3382 binder_proc_dec_tmpref(target_proc
);
3384 binder_dec_node_tmpref(target_node
);
3386 * write barrier to synchronize with initialization
3390 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3393 err_dead_proc_or_thread
:
3394 return_error
= BR_DEAD_REPLY
;
3395 return_error_line
= __LINE__
;
3396 binder_dequeue_work(proc
, tcomplete
);
3397 err_translate_failed
:
3398 err_bad_object_type
:
3401 err_copy_data_failed
:
3402 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3403 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3405 binder_dec_node_tmpref(target_node
);
3407 t
->buffer
->transaction
= NULL
;
3408 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3409 err_binder_alloc_buf_failed
:
3411 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3412 err_alloc_tcomplete_failed
:
3414 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3417 err_empty_call_stack
:
3419 err_invalid_target_handle
:
3421 binder_thread_dec_tmpref(target_thread
);
3423 binder_proc_dec_tmpref(target_proc
);
3425 binder_dec_node(target_node
, 1, 0);
3426 binder_dec_node_tmpref(target_node
);
3429 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3430 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3431 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3432 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3436 struct binder_transaction_log_entry
*fe
;
3438 e
->return_error
= return_error
;
3439 e
->return_error_param
= return_error_param
;
3440 e
->return_error_line
= return_error_line
;
3441 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3444 * write barrier to synchronize with initialization
3448 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3449 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3452 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3454 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3455 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3456 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3457 binder_send_failed_reply(in_reply_to
, return_error
);
3459 thread
->return_error
.cmd
= return_error
;
3460 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3464 static int binder_thread_write(struct binder_proc
*proc
,
3465 struct binder_thread
*thread
,
3466 binder_uintptr_t binder_buffer
, size_t size
,
3467 binder_size_t
*consumed
)
3470 struct binder_context
*context
= proc
->context
;
3471 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3472 void __user
*ptr
= buffer
+ *consumed
;
3473 void __user
*end
= buffer
+ size
;
3475 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3478 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3480 ptr
+= sizeof(uint32_t);
3481 trace_binder_command(cmd
);
3482 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3483 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3484 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3485 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3493 const char *debug_string
;
3494 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3495 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3496 struct binder_ref_data rdata
;
3498 if (get_user(target
, (uint32_t __user
*)ptr
))
3501 ptr
+= sizeof(uint32_t);
3503 if (increment
&& !target
) {
3504 struct binder_node
*ctx_mgr_node
;
3505 mutex_lock(&context
->context_mgr_node_lock
);
3506 ctx_mgr_node
= context
->binder_context_mgr_node
;
3508 ret
= binder_inc_ref_for_node(
3510 strong
, NULL
, &rdata
);
3511 mutex_unlock(&context
->context_mgr_node_lock
);
3514 ret
= binder_update_ref_for_handle(
3515 proc
, target
, increment
, strong
,
3517 if (!ret
&& rdata
.desc
!= target
) {
3518 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3519 proc
->pid
, thread
->pid
,
3520 target
, rdata
.desc
);
3524 debug_string
= "IncRefs";
3527 debug_string
= "Acquire";
3530 debug_string
= "Release";
3534 debug_string
= "DecRefs";
3538 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3539 proc
->pid
, thread
->pid
, debug_string
,
3540 strong
, target
, ret
);
3543 binder_debug(BINDER_DEBUG_USER_REFS
,
3544 "%d:%d %s ref %d desc %d s %d w %d\n",
3545 proc
->pid
, thread
->pid
, debug_string
,
3546 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3550 case BC_INCREFS_DONE
:
3551 case BC_ACQUIRE_DONE
: {
3552 binder_uintptr_t node_ptr
;
3553 binder_uintptr_t cookie
;
3554 struct binder_node
*node
;
3557 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3559 ptr
+= sizeof(binder_uintptr_t
);
3560 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3562 ptr
+= sizeof(binder_uintptr_t
);
3563 node
= binder_get_node(proc
, node_ptr
);
3565 binder_user_error("%d:%d %s u%016llx no match\n",
3566 proc
->pid
, thread
->pid
,
3567 cmd
== BC_INCREFS_DONE
?
3573 if (cookie
!= node
->cookie
) {
3574 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3575 proc
->pid
, thread
->pid
,
3576 cmd
== BC_INCREFS_DONE
?
3577 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3578 (u64
)node_ptr
, node
->debug_id
,
3579 (u64
)cookie
, (u64
)node
->cookie
);
3580 binder_put_node(node
);
3583 binder_node_inner_lock(node
);
3584 if (cmd
== BC_ACQUIRE_DONE
) {
3585 if (node
->pending_strong_ref
== 0) {
3586 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3587 proc
->pid
, thread
->pid
,
3589 binder_node_inner_unlock(node
);
3590 binder_put_node(node
);
3593 node
->pending_strong_ref
= 0;
3595 if (node
->pending_weak_ref
== 0) {
3596 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3597 proc
->pid
, thread
->pid
,
3599 binder_node_inner_unlock(node
);
3600 binder_put_node(node
);
3603 node
->pending_weak_ref
= 0;
3605 free_node
= binder_dec_node_nilocked(node
,
3606 cmd
== BC_ACQUIRE_DONE
, 0);
3608 binder_debug(BINDER_DEBUG_USER_REFS
,
3609 "%d:%d %s node %d ls %d lw %d tr %d\n",
3610 proc
->pid
, thread
->pid
,
3611 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3612 node
->debug_id
, node
->local_strong_refs
,
3613 node
->local_weak_refs
, node
->tmp_refs
);
3614 binder_node_inner_unlock(node
);
3615 binder_put_node(node
);
3618 case BC_ATTEMPT_ACQUIRE
:
3619 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3621 case BC_ACQUIRE_RESULT
:
3622 pr_err("BC_ACQUIRE_RESULT not supported\n");
3625 case BC_FREE_BUFFER
: {
3626 binder_uintptr_t data_ptr
;
3627 struct binder_buffer
*buffer
;
3629 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3631 ptr
+= sizeof(binder_uintptr_t
);
3633 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3635 if (buffer
== NULL
) {
3636 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3637 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3640 if (!buffer
->allow_user_free
) {
3641 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3642 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3645 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3646 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3647 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3649 buffer
->transaction
? "active" : "finished");
3651 if (buffer
->transaction
) {
3652 buffer
->transaction
->buffer
= NULL
;
3653 buffer
->transaction
= NULL
;
3655 if (buffer
->async_transaction
&& buffer
->target_node
) {
3656 struct binder_node
*buf_node
;
3657 struct binder_work
*w
;
3659 buf_node
= buffer
->target_node
;
3660 binder_node_inner_lock(buf_node
);
3661 BUG_ON(!buf_node
->has_async_transaction
);
3662 BUG_ON(buf_node
->proc
!= proc
);
3663 w
= binder_dequeue_work_head_ilocked(
3664 &buf_node
->async_todo
);
3666 buf_node
->has_async_transaction
= false;
3668 binder_enqueue_work_ilocked(
3670 binder_wakeup_proc_ilocked(proc
);
3672 binder_node_inner_unlock(buf_node
);
3674 trace_binder_transaction_buffer_release(buffer
);
3675 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3676 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3680 case BC_TRANSACTION_SG
:
3682 struct binder_transaction_data_sg tr
;
3684 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3687 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3688 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3691 case BC_TRANSACTION
:
3693 struct binder_transaction_data tr
;
3695 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3698 binder_transaction(proc
, thread
, &tr
,
3699 cmd
== BC_REPLY
, 0);
3703 case BC_REGISTER_LOOPER
:
3704 binder_debug(BINDER_DEBUG_THREADS
,
3705 "%d:%d BC_REGISTER_LOOPER\n",
3706 proc
->pid
, thread
->pid
);
3707 binder_inner_proc_lock(proc
);
3708 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3709 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3710 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3711 proc
->pid
, thread
->pid
);
3712 } else if (proc
->requested_threads
== 0) {
3713 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3714 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3715 proc
->pid
, thread
->pid
);
3717 proc
->requested_threads
--;
3718 proc
->requested_threads_started
++;
3720 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3721 binder_inner_proc_unlock(proc
);
3723 case BC_ENTER_LOOPER
:
3724 binder_debug(BINDER_DEBUG_THREADS
,
3725 "%d:%d BC_ENTER_LOOPER\n",
3726 proc
->pid
, thread
->pid
);
3727 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3728 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3729 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3730 proc
->pid
, thread
->pid
);
3732 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3734 case BC_EXIT_LOOPER
:
3735 binder_debug(BINDER_DEBUG_THREADS
,
3736 "%d:%d BC_EXIT_LOOPER\n",
3737 proc
->pid
, thread
->pid
);
3738 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3741 case BC_REQUEST_DEATH_NOTIFICATION
:
3742 case BC_CLEAR_DEATH_NOTIFICATION
: {
3744 binder_uintptr_t cookie
;
3745 struct binder_ref
*ref
;
3746 struct binder_ref_death
*death
= NULL
;
3748 if (get_user(target
, (uint32_t __user
*)ptr
))
3750 ptr
+= sizeof(uint32_t);
3751 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3753 ptr
+= sizeof(binder_uintptr_t
);
3754 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3756 * Allocate memory for death notification
3757 * before taking lock
3759 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3760 if (death
== NULL
) {
3761 WARN_ON(thread
->return_error
.cmd
!=
3763 thread
->return_error
.cmd
= BR_ERROR
;
3764 binder_enqueue_thread_work(
3766 &thread
->return_error
.work
);
3768 BINDER_DEBUG_FAILED_TRANSACTION
,
3769 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3770 proc
->pid
, thread
->pid
);
3774 binder_proc_lock(proc
);
3775 ref
= binder_get_ref_olocked(proc
, target
, false);
3777 binder_user_error("%d:%d %s invalid ref %d\n",
3778 proc
->pid
, thread
->pid
,
3779 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3780 "BC_REQUEST_DEATH_NOTIFICATION" :
3781 "BC_CLEAR_DEATH_NOTIFICATION",
3783 binder_proc_unlock(proc
);
3788 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3789 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3790 proc
->pid
, thread
->pid
,
3791 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3792 "BC_REQUEST_DEATH_NOTIFICATION" :
3793 "BC_CLEAR_DEATH_NOTIFICATION",
3794 (u64
)cookie
, ref
->data
.debug_id
,
3795 ref
->data
.desc
, ref
->data
.strong
,
3796 ref
->data
.weak
, ref
->node
->debug_id
);
3798 binder_node_lock(ref
->node
);
3799 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3801 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3802 proc
->pid
, thread
->pid
);
3803 binder_node_unlock(ref
->node
);
3804 binder_proc_unlock(proc
);
3808 binder_stats_created(BINDER_STAT_DEATH
);
3809 INIT_LIST_HEAD(&death
->work
.entry
);
3810 death
->cookie
= cookie
;
3812 if (ref
->node
->proc
== NULL
) {
3813 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3815 binder_inner_proc_lock(proc
);
3816 binder_enqueue_work_ilocked(
3817 &ref
->death
->work
, &proc
->todo
);
3818 binder_wakeup_proc_ilocked(proc
);
3819 binder_inner_proc_unlock(proc
);
3822 if (ref
->death
== NULL
) {
3823 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3824 proc
->pid
, thread
->pid
);
3825 binder_node_unlock(ref
->node
);
3826 binder_proc_unlock(proc
);
3830 if (death
->cookie
!= cookie
) {
3831 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3832 proc
->pid
, thread
->pid
,
3835 binder_node_unlock(ref
->node
);
3836 binder_proc_unlock(proc
);
3840 binder_inner_proc_lock(proc
);
3841 if (list_empty(&death
->work
.entry
)) {
3842 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3843 if (thread
->looper
&
3844 (BINDER_LOOPER_STATE_REGISTERED
|
3845 BINDER_LOOPER_STATE_ENTERED
))
3846 binder_enqueue_thread_work_ilocked(
3850 binder_enqueue_work_ilocked(
3853 binder_wakeup_proc_ilocked(
3857 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3858 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3860 binder_inner_proc_unlock(proc
);
3862 binder_node_unlock(ref
->node
);
3863 binder_proc_unlock(proc
);
3865 case BC_DEAD_BINDER_DONE
: {
3866 struct binder_work
*w
;
3867 binder_uintptr_t cookie
;
3868 struct binder_ref_death
*death
= NULL
;
3870 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3873 ptr
+= sizeof(cookie
);
3874 binder_inner_proc_lock(proc
);
3875 list_for_each_entry(w
, &proc
->delivered_death
,
3877 struct binder_ref_death
*tmp_death
=
3879 struct binder_ref_death
,
3882 if (tmp_death
->cookie
== cookie
) {
3887 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3888 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3889 proc
->pid
, thread
->pid
, (u64
)cookie
,
3891 if (death
== NULL
) {
3892 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3893 proc
->pid
, thread
->pid
, (u64
)cookie
);
3894 binder_inner_proc_unlock(proc
);
3897 binder_dequeue_work_ilocked(&death
->work
);
3898 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3899 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3900 if (thread
->looper
&
3901 (BINDER_LOOPER_STATE_REGISTERED
|
3902 BINDER_LOOPER_STATE_ENTERED
))
3903 binder_enqueue_thread_work_ilocked(
3904 thread
, &death
->work
);
3906 binder_enqueue_work_ilocked(
3909 binder_wakeup_proc_ilocked(proc
);
3912 binder_inner_proc_unlock(proc
);
3916 pr_err("%d:%d unknown command %d\n",
3917 proc
->pid
, thread
->pid
, cmd
);
3920 *consumed
= ptr
- buffer
;
3925 static void binder_stat_br(struct binder_proc
*proc
,
3926 struct binder_thread
*thread
, uint32_t cmd
)
3928 trace_binder_return(cmd
);
3929 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3930 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3931 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3932 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3936 static int binder_put_node_cmd(struct binder_proc
*proc
,
3937 struct binder_thread
*thread
,
3939 binder_uintptr_t node_ptr
,
3940 binder_uintptr_t node_cookie
,
3942 uint32_t cmd
, const char *cmd_name
)
3944 void __user
*ptr
= *ptrp
;
3946 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3948 ptr
+= sizeof(uint32_t);
3950 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3952 ptr
+= sizeof(binder_uintptr_t
);
3954 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3956 ptr
+= sizeof(binder_uintptr_t
);
3958 binder_stat_br(proc
, thread
, cmd
);
3959 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3960 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3961 (u64
)node_ptr
, (u64
)node_cookie
);
3967 static int binder_wait_for_work(struct binder_thread
*thread
,
3971 struct binder_proc
*proc
= thread
->proc
;
3974 freezer_do_not_count();
3975 binder_inner_proc_lock(proc
);
3977 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3978 if (binder_has_work_ilocked(thread
, do_proc_work
))
3981 list_add(&thread
->waiting_thread_node
,
3982 &proc
->waiting_threads
);
3983 binder_inner_proc_unlock(proc
);
3985 binder_inner_proc_lock(proc
);
3986 list_del_init(&thread
->waiting_thread_node
);
3987 if (signal_pending(current
)) {
3992 finish_wait(&thread
->wait
, &wait
);
3993 binder_inner_proc_unlock(proc
);
3999 static int binder_thread_read(struct binder_proc
*proc
,
4000 struct binder_thread
*thread
,
4001 binder_uintptr_t binder_buffer
, size_t size
,
4002 binder_size_t
*consumed
, int non_block
)
4004 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4005 void __user
*ptr
= buffer
+ *consumed
;
4006 void __user
*end
= buffer
+ size
;
4009 int wait_for_proc_work
;
4011 if (*consumed
== 0) {
4012 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4014 ptr
+= sizeof(uint32_t);
4018 binder_inner_proc_lock(proc
);
4019 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4020 binder_inner_proc_unlock(proc
);
4022 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4024 trace_binder_wait_for_work(wait_for_proc_work
,
4025 !!thread
->transaction_stack
,
4026 !binder_worklist_empty(proc
, &thread
->todo
));
4027 if (wait_for_proc_work
) {
4028 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4029 BINDER_LOOPER_STATE_ENTERED
))) {
4030 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4031 proc
->pid
, thread
->pid
, thread
->looper
);
4032 wait_event_interruptible(binder_user_error_wait
,
4033 binder_stop_on_user_error
< 2);
4035 binder_restore_priority(current
, proc
->default_priority
);
4039 if (!binder_has_work(thread
, wait_for_proc_work
))
4042 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4045 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4052 struct binder_transaction_data tr
;
4053 struct binder_work
*w
= NULL
;
4054 struct list_head
*list
= NULL
;
4055 struct binder_transaction
*t
= NULL
;
4056 struct binder_thread
*t_from
;
4058 binder_inner_proc_lock(proc
);
4059 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4060 list
= &thread
->todo
;
4061 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4065 binder_inner_proc_unlock(proc
);
4068 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4073 if (end
- ptr
< sizeof(tr
) + 4) {
4074 binder_inner_proc_unlock(proc
);
4077 w
= binder_dequeue_work_head_ilocked(list
);
4078 if (binder_worklist_empty_ilocked(&thread
->todo
))
4079 thread
->process_todo
= false;
4082 case BINDER_WORK_TRANSACTION
: {
4083 binder_inner_proc_unlock(proc
);
4084 t
= container_of(w
, struct binder_transaction
, work
);
4086 case BINDER_WORK_RETURN_ERROR
: {
4087 struct binder_error
*e
= container_of(
4088 w
, struct binder_error
, work
);
4090 WARN_ON(e
->cmd
== BR_OK
);
4091 binder_inner_proc_unlock(proc
);
4092 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4096 ptr
+= sizeof(uint32_t);
4098 binder_stat_br(proc
, thread
, e
->cmd
);
4100 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4101 binder_inner_proc_unlock(proc
);
4102 cmd
= BR_TRANSACTION_COMPLETE
;
4103 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4105 ptr
+= sizeof(uint32_t);
4107 binder_stat_br(proc
, thread
, cmd
);
4108 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4109 "%d:%d BR_TRANSACTION_COMPLETE\n",
4110 proc
->pid
, thread
->pid
);
4112 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4114 case BINDER_WORK_NODE
: {
4115 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4117 binder_uintptr_t node_ptr
= node
->ptr
;
4118 binder_uintptr_t node_cookie
= node
->cookie
;
4119 int node_debug_id
= node
->debug_id
;
4122 void __user
*orig_ptr
= ptr
;
4124 BUG_ON(proc
!= node
->proc
);
4125 strong
= node
->internal_strong_refs
||
4126 node
->local_strong_refs
;
4127 weak
= !hlist_empty(&node
->refs
) ||
4128 node
->local_weak_refs
||
4129 node
->tmp_refs
|| strong
;
4130 has_strong_ref
= node
->has_strong_ref
;
4131 has_weak_ref
= node
->has_weak_ref
;
4133 if (weak
&& !has_weak_ref
) {
4134 node
->has_weak_ref
= 1;
4135 node
->pending_weak_ref
= 1;
4136 node
->local_weak_refs
++;
4138 if (strong
&& !has_strong_ref
) {
4139 node
->has_strong_ref
= 1;
4140 node
->pending_strong_ref
= 1;
4141 node
->local_strong_refs
++;
4143 if (!strong
&& has_strong_ref
)
4144 node
->has_strong_ref
= 0;
4145 if (!weak
&& has_weak_ref
)
4146 node
->has_weak_ref
= 0;
4147 if (!weak
&& !strong
) {
4148 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4149 "%d:%d node %d u%016llx c%016llx deleted\n",
4150 proc
->pid
, thread
->pid
,
4154 rb_erase(&node
->rb_node
, &proc
->nodes
);
4155 binder_inner_proc_unlock(proc
);
4156 binder_node_lock(node
);
4158 * Acquire the node lock before freeing the
4159 * node to serialize with other threads that
4160 * may have been holding the node lock while
4161 * decrementing this node (avoids race where
4162 * this thread frees while the other thread
4163 * is unlocking the node after the final
4166 binder_node_unlock(node
);
4167 binder_free_node(node
);
4169 binder_inner_proc_unlock(proc
);
4171 if (weak
&& !has_weak_ref
)
4172 ret
= binder_put_node_cmd(
4173 proc
, thread
, &ptr
, node_ptr
,
4174 node_cookie
, node_debug_id
,
4175 BR_INCREFS
, "BR_INCREFS");
4176 if (!ret
&& strong
&& !has_strong_ref
)
4177 ret
= binder_put_node_cmd(
4178 proc
, thread
, &ptr
, node_ptr
,
4179 node_cookie
, node_debug_id
,
4180 BR_ACQUIRE
, "BR_ACQUIRE");
4181 if (!ret
&& !strong
&& has_strong_ref
)
4182 ret
= binder_put_node_cmd(
4183 proc
, thread
, &ptr
, node_ptr
,
4184 node_cookie
, node_debug_id
,
4185 BR_RELEASE
, "BR_RELEASE");
4186 if (!ret
&& !weak
&& has_weak_ref
)
4187 ret
= binder_put_node_cmd(
4188 proc
, thread
, &ptr
, node_ptr
,
4189 node_cookie
, node_debug_id
,
4190 BR_DECREFS
, "BR_DECREFS");
4191 if (orig_ptr
== ptr
)
4192 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4193 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4194 proc
->pid
, thread
->pid
,
4201 case BINDER_WORK_DEAD_BINDER
:
4202 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4203 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4204 struct binder_ref_death
*death
;
4206 binder_uintptr_t cookie
;
4208 death
= container_of(w
, struct binder_ref_death
, work
);
4209 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4210 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4212 cmd
= BR_DEAD_BINDER
;
4213 cookie
= death
->cookie
;
4215 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4216 "%d:%d %s %016llx\n",
4217 proc
->pid
, thread
->pid
,
4218 cmd
== BR_DEAD_BINDER
?
4220 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4222 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4223 binder_inner_proc_unlock(proc
);
4225 binder_stats_deleted(BINDER_STAT_DEATH
);
4227 binder_enqueue_work_ilocked(
4228 w
, &proc
->delivered_death
);
4229 binder_inner_proc_unlock(proc
);
4231 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4233 ptr
+= sizeof(uint32_t);
4234 if (put_user(cookie
,
4235 (binder_uintptr_t __user
*)ptr
))
4237 ptr
+= sizeof(binder_uintptr_t
);
4238 binder_stat_br(proc
, thread
, cmd
);
4239 if (cmd
== BR_DEAD_BINDER
)
4240 goto done
; /* DEAD_BINDER notifications can cause transactions */
4247 BUG_ON(t
->buffer
== NULL
);
4248 if (t
->buffer
->target_node
) {
4249 struct binder_node
*target_node
= t
->buffer
->target_node
;
4250 struct binder_priority node_prio
;
4252 tr
.target
.ptr
= target_node
->ptr
;
4253 tr
.cookie
= target_node
->cookie
;
4254 node_prio
.sched_policy
= target_node
->sched_policy
;
4255 node_prio
.prio
= target_node
->min_priority
;
4256 binder_transaction_priority(current
, t
, node_prio
,
4257 target_node
->inherit_rt
);
4258 cmd
= BR_TRANSACTION
;
4265 tr
.flags
= t
->flags
;
4266 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4268 t_from
= binder_get_txn_from(t
);
4270 struct task_struct
*sender
= t_from
->proc
->tsk
;
4272 tr
.sender_pid
= task_tgid_nr_ns(sender
,
4273 task_active_pid_ns(current
));
4278 tr
.data_size
= t
->buffer
->data_size
;
4279 tr
.offsets_size
= t
->buffer
->offsets_size
;
4280 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
4281 ((uintptr_t)t
->buffer
->data
+
4282 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4283 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
4284 ALIGN(t
->buffer
->data_size
,
4287 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4289 binder_thread_dec_tmpref(t_from
);
4291 binder_cleanup_transaction(t
, "put_user failed",
4296 ptr
+= sizeof(uint32_t);
4297 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
4299 binder_thread_dec_tmpref(t_from
);
4301 binder_cleanup_transaction(t
, "copy_to_user failed",
4308 trace_binder_transaction_received(t
);
4309 binder_stat_br(proc
, thread
, cmd
);
4310 binder_debug(BINDER_DEBUG_TRANSACTION
,
4311 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4312 proc
->pid
, thread
->pid
,
4313 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4315 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4316 t_from
? t_from
->pid
: 0, cmd
,
4317 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4318 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
4321 binder_thread_dec_tmpref(t_from
);
4322 t
->buffer
->allow_user_free
= 1;
4323 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
4324 binder_inner_proc_lock(thread
->proc
);
4325 t
->to_parent
= thread
->transaction_stack
;
4326 t
->to_thread
= thread
;
4327 thread
->transaction_stack
= t
;
4328 binder_inner_proc_unlock(thread
->proc
);
4330 binder_free_transaction(t
);
4337 *consumed
= ptr
- buffer
;
4338 binder_inner_proc_lock(proc
);
4339 if (proc
->requested_threads
== 0 &&
4340 list_empty(&thread
->proc
->waiting_threads
) &&
4341 proc
->requested_threads_started
< proc
->max_threads
&&
4342 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4343 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4344 /*spawn a new thread if we leave this out */) {
4345 proc
->requested_threads
++;
4346 binder_inner_proc_unlock(proc
);
4347 binder_debug(BINDER_DEBUG_THREADS
,
4348 "%d:%d BR_SPAWN_LOOPER\n",
4349 proc
->pid
, thread
->pid
);
4350 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4352 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4354 binder_inner_proc_unlock(proc
);
4358 static void binder_release_work(struct binder_proc
*proc
,
4359 struct list_head
*list
)
4361 struct binder_work
*w
;
4364 w
= binder_dequeue_work_head(proc
, list
);
4369 case BINDER_WORK_TRANSACTION
: {
4370 struct binder_transaction
*t
;
4372 t
= container_of(w
, struct binder_transaction
, work
);
4374 binder_cleanup_transaction(t
, "process died.",
4377 case BINDER_WORK_RETURN_ERROR
: {
4378 struct binder_error
*e
= container_of(
4379 w
, struct binder_error
, work
);
4381 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4382 "undelivered TRANSACTION_ERROR: %u\n",
4385 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4386 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4387 "undelivered TRANSACTION_COMPLETE\n");
4389 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4391 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4392 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4393 struct binder_ref_death
*death
;
4395 death
= container_of(w
, struct binder_ref_death
, work
);
4396 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4397 "undelivered death notification, %016llx\n",
4398 (u64
)death
->cookie
);
4400 binder_stats_deleted(BINDER_STAT_DEATH
);
4403 pr_err("unexpected work type, %d, not freed\n",
4411 static struct binder_thread
*binder_get_thread_ilocked(
4412 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4414 struct binder_thread
*thread
= NULL
;
4415 struct rb_node
*parent
= NULL
;
4416 struct rb_node
**p
= &proc
->threads
.rb_node
;
4420 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4422 if (current
->pid
< thread
->pid
)
4424 else if (current
->pid
> thread
->pid
)
4425 p
= &(*p
)->rb_right
;
4431 thread
= new_thread
;
4432 binder_stats_created(BINDER_STAT_THREAD
);
4433 thread
->proc
= proc
;
4434 thread
->pid
= current
->pid
;
4435 get_task_struct(current
);
4436 thread
->task
= current
;
4437 atomic_set(&thread
->tmp_ref
, 0);
4438 init_waitqueue_head(&thread
->wait
);
4439 INIT_LIST_HEAD(&thread
->todo
);
4440 rb_link_node(&thread
->rb_node
, parent
, p
);
4441 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4442 thread
->looper_need_return
= true;
4443 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4444 thread
->return_error
.cmd
= BR_OK
;
4445 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4446 thread
->reply_error
.cmd
= BR_OK
;
4447 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4451 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4453 struct binder_thread
*thread
;
4454 struct binder_thread
*new_thread
;
4456 binder_inner_proc_lock(proc
);
4457 thread
= binder_get_thread_ilocked(proc
, NULL
);
4458 binder_inner_proc_unlock(proc
);
4460 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4461 if (new_thread
== NULL
)
4463 binder_inner_proc_lock(proc
);
4464 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4465 binder_inner_proc_unlock(proc
);
4466 if (thread
!= new_thread
)
4472 static void binder_free_proc(struct binder_proc
*proc
)
4474 BUG_ON(!list_empty(&proc
->todo
));
4475 BUG_ON(!list_empty(&proc
->delivered_death
));
4476 binder_alloc_deferred_release(&proc
->alloc
);
4477 put_task_struct(proc
->tsk
);
4478 binder_stats_deleted(BINDER_STAT_PROC
);
4482 static void binder_free_thread(struct binder_thread
*thread
)
4484 BUG_ON(!list_empty(&thread
->todo
));
4485 binder_stats_deleted(BINDER_STAT_THREAD
);
4486 binder_proc_dec_tmpref(thread
->proc
);
4487 put_task_struct(thread
->task
);
4491 static int binder_thread_release(struct binder_proc
*proc
,
4492 struct binder_thread
*thread
)
4494 struct binder_transaction
*t
;
4495 struct binder_transaction
*send_reply
= NULL
;
4496 int active_transactions
= 0;
4497 struct binder_transaction
*last_t
= NULL
;
4499 binder_inner_proc_lock(thread
->proc
);
4501 * take a ref on the proc so it survives
4502 * after we remove this thread from proc->threads.
4503 * The corresponding dec is when we actually
4504 * free the thread in binder_free_thread()
4508 * take a ref on this thread to ensure it
4509 * survives while we are releasing it
4511 atomic_inc(&thread
->tmp_ref
);
4512 rb_erase(&thread
->rb_node
, &proc
->threads
);
4513 t
= thread
->transaction_stack
;
4515 spin_lock(&t
->lock
);
4516 if (t
->to_thread
== thread
)
4519 thread
->is_dead
= true;
4523 active_transactions
++;
4524 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4525 "release %d:%d transaction %d %s, still active\n",
4526 proc
->pid
, thread
->pid
,
4528 (t
->to_thread
== thread
) ? "in" : "out");
4530 if (t
->to_thread
== thread
) {
4532 t
->to_thread
= NULL
;
4534 t
->buffer
->transaction
= NULL
;
4538 } else if (t
->from
== thread
) {
4543 spin_unlock(&last_t
->lock
);
4545 spin_lock(&t
->lock
);
4549 * If this thread used poll, make sure we remove the waitqueue
4550 * from any epoll data structures holding it with POLLFREE.
4551 * waitqueue_active() is safe to use here because we're holding
4554 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4555 waitqueue_active(&thread
->wait
)) {
4556 wake_up_poll(&thread
->wait
, POLLHUP
| POLLFREE
);
4559 binder_inner_proc_unlock(thread
->proc
);
4562 * This is needed to avoid races between wake_up_poll() above and
4563 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4564 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4565 * lock, so we can be sure it's done after calling synchronize_rcu().
4567 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4571 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4572 binder_release_work(proc
, &thread
->todo
);
4573 binder_thread_dec_tmpref(thread
);
4574 return active_transactions
;
4577 static unsigned int binder_poll(struct file
*filp
,
4578 struct poll_table_struct
*wait
)
4580 struct binder_proc
*proc
= filp
->private_data
;
4581 struct binder_thread
*thread
= NULL
;
4582 bool wait_for_proc_work
;
4584 thread
= binder_get_thread(proc
);
4588 binder_inner_proc_lock(thread
->proc
);
4589 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4590 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4592 binder_inner_proc_unlock(thread
->proc
);
4594 poll_wait(filp
, &thread
->wait
, wait
);
4596 if (binder_has_work(thread
, wait_for_proc_work
))
4602 static int binder_ioctl_write_read(struct file
*filp
,
4603 unsigned int cmd
, unsigned long arg
,
4604 struct binder_thread
*thread
)
4607 struct binder_proc
*proc
= filp
->private_data
;
4608 unsigned int size
= _IOC_SIZE(cmd
);
4609 void __user
*ubuf
= (void __user
*)arg
;
4610 struct binder_write_read bwr
;
4612 if (size
!= sizeof(struct binder_write_read
)) {
4616 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4620 binder_debug(BINDER_DEBUG_READ_WRITE
,
4621 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4622 proc
->pid
, thread
->pid
,
4623 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4624 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4626 if (bwr
.write_size
> 0) {
4627 ret
= binder_thread_write(proc
, thread
,
4630 &bwr
.write_consumed
);
4631 trace_binder_write_done(ret
);
4633 bwr
.read_consumed
= 0;
4634 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4639 if (bwr
.read_size
> 0) {
4640 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4643 filp
->f_flags
& O_NONBLOCK
);
4644 trace_binder_read_done(ret
);
4645 binder_inner_proc_lock(proc
);
4646 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4647 binder_wakeup_proc_ilocked(proc
);
4648 binder_inner_proc_unlock(proc
);
4650 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4655 binder_debug(BINDER_DEBUG_READ_WRITE
,
4656 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4657 proc
->pid
, thread
->pid
,
4658 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4659 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4660 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4668 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4671 struct binder_proc
*proc
= filp
->private_data
;
4672 struct binder_context
*context
= proc
->context
;
4673 struct binder_node
*new_node
;
4674 kuid_t curr_euid
= current_euid();
4676 mutex_lock(&context
->context_mgr_node_lock
);
4677 if (context
->binder_context_mgr_node
) {
4678 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4682 ret
= security_binder_set_context_mgr(proc
->tsk
);
4685 if (uid_valid(context
->binder_context_mgr_uid
)) {
4686 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4687 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4688 from_kuid(&init_user_ns
, curr_euid
),
4689 from_kuid(&init_user_ns
,
4690 context
->binder_context_mgr_uid
));
4695 context
->binder_context_mgr_uid
= curr_euid
;
4697 new_node
= binder_new_node(proc
, NULL
);
4702 binder_node_lock(new_node
);
4703 new_node
->local_weak_refs
++;
4704 new_node
->local_strong_refs
++;
4705 new_node
->has_strong_ref
= 1;
4706 new_node
->has_weak_ref
= 1;
4707 context
->binder_context_mgr_node
= new_node
;
4708 binder_node_unlock(new_node
);
4709 binder_put_node(new_node
);
4711 mutex_unlock(&context
->context_mgr_node_lock
);
4715 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4716 struct binder_node_debug_info
*info
)
4719 binder_uintptr_t ptr
= info
->ptr
;
4721 memset(info
, 0, sizeof(*info
));
4723 binder_inner_proc_lock(proc
);
4724 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4725 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4727 if (node
->ptr
> ptr
) {
4728 info
->ptr
= node
->ptr
;
4729 info
->cookie
= node
->cookie
;
4730 info
->has_strong_ref
= node
->has_strong_ref
;
4731 info
->has_weak_ref
= node
->has_weak_ref
;
4735 binder_inner_proc_unlock(proc
);
4740 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4743 struct binder_proc
*proc
= filp
->private_data
;
4744 struct binder_thread
*thread
;
4745 unsigned int size
= _IOC_SIZE(cmd
);
4746 void __user
*ubuf
= (void __user
*)arg
;
4748 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4749 proc->pid, current->pid, cmd, arg);*/
4751 binder_selftest_alloc(&proc
->alloc
);
4753 trace_binder_ioctl(cmd
, arg
);
4755 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4759 thread
= binder_get_thread(proc
);
4760 if (thread
== NULL
) {
4766 case BINDER_WRITE_READ
:
4767 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4771 case BINDER_SET_MAX_THREADS
: {
4774 if (copy_from_user(&max_threads
, ubuf
,
4775 sizeof(max_threads
))) {
4779 binder_inner_proc_lock(proc
);
4780 proc
->max_threads
= max_threads
;
4781 binder_inner_proc_unlock(proc
);
4784 case BINDER_SET_CONTEXT_MGR
:
4785 ret
= binder_ioctl_set_ctx_mgr(filp
);
4789 case BINDER_THREAD_EXIT
:
4790 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4791 proc
->pid
, thread
->pid
);
4792 binder_thread_release(proc
, thread
);
4795 case BINDER_VERSION
: {
4796 struct binder_version __user
*ver
= ubuf
;
4798 if (size
!= sizeof(struct binder_version
)) {
4802 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4803 &ver
->protocol_version
)) {
4809 case BINDER_GET_NODE_DEBUG_INFO
: {
4810 struct binder_node_debug_info info
;
4812 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4817 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4821 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4834 thread
->looper_need_return
= false;
4835 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4836 if (ret
&& ret
!= -ERESTARTSYS
)
4837 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4839 trace_binder_ioctl_done(ret
);
4843 static void binder_vma_open(struct vm_area_struct
*vma
)
4845 struct binder_proc
*proc
= vma
->vm_private_data
;
4847 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4848 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4849 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4850 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4851 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4854 static void binder_vma_close(struct vm_area_struct
*vma
)
4856 struct binder_proc
*proc
= vma
->vm_private_data
;
4858 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4859 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4860 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4861 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4862 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4863 binder_alloc_vma_close(&proc
->alloc
);
4864 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4867 static int binder_vm_fault(struct vm_fault
*vmf
)
4869 return VM_FAULT_SIGBUS
;
4872 static const struct vm_operations_struct binder_vm_ops
= {
4873 .open
= binder_vma_open
,
4874 .close
= binder_vma_close
,
4875 .fault
= binder_vm_fault
,
4878 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4881 struct binder_proc
*proc
= filp
->private_data
;
4882 const char *failure_string
;
4884 if (proc
->tsk
!= current
->group_leader
)
4887 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4888 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4890 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4891 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4892 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4893 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4894 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4896 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4898 failure_string
= "bad vm_flags";
4901 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4902 vma
->vm_ops
= &binder_vm_ops
;
4903 vma
->vm_private_data
= proc
;
4905 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4908 mutex_lock(&proc
->files_lock
);
4909 proc
->files
= get_files_struct(current
);
4910 mutex_unlock(&proc
->files_lock
);
4914 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
4915 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4919 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4921 struct binder_proc
*proc
;
4922 struct binder_device
*binder_dev
;
4924 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "%s: %d:%d\n", __func__
,
4925 current
->group_leader
->pid
, current
->pid
);
4927 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4930 spin_lock_init(&proc
->inner_lock
);
4931 spin_lock_init(&proc
->outer_lock
);
4932 get_task_struct(current
->group_leader
);
4933 proc
->tsk
= current
->group_leader
;
4934 mutex_init(&proc
->files_lock
);
4935 INIT_LIST_HEAD(&proc
->todo
);
4936 if (binder_supported_policy(current
->policy
)) {
4937 proc
->default_priority
.sched_policy
= current
->policy
;
4938 proc
->default_priority
.prio
= current
->normal_prio
;
4940 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
4941 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
4944 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4946 proc
->context
= &binder_dev
->context
;
4947 binder_alloc_init(&proc
->alloc
);
4949 binder_stats_created(BINDER_STAT_PROC
);
4950 proc
->pid
= current
->group_leader
->pid
;
4951 INIT_LIST_HEAD(&proc
->delivered_death
);
4952 INIT_LIST_HEAD(&proc
->waiting_threads
);
4953 filp
->private_data
= proc
;
4955 mutex_lock(&binder_procs_lock
);
4956 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4957 mutex_unlock(&binder_procs_lock
);
4959 if (binder_debugfs_dir_entry_proc
) {
4962 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4964 * proc debug entries are shared between contexts, so
4965 * this will fail if the process tries to open the driver
4966 * again with a different context. The priting code will
4967 * anyway print all contexts that a given PID has, so this
4970 proc
->debugfs_entry
= debugfs_create_file(strbuf
, 0444,
4971 binder_debugfs_dir_entry_proc
,
4972 (void *)(unsigned long)proc
->pid
,
4979 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4981 struct binder_proc
*proc
= filp
->private_data
;
4983 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4988 static void binder_deferred_flush(struct binder_proc
*proc
)
4993 binder_inner_proc_lock(proc
);
4994 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4995 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4997 thread
->looper_need_return
= true;
4998 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4999 wake_up_interruptible(&thread
->wait
);
5003 binder_inner_proc_unlock(proc
);
5005 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5006 "binder_flush: %d woke %d threads\n", proc
->pid
,
5010 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5012 struct binder_proc
*proc
= filp
->private_data
;
5014 debugfs_remove(proc
->debugfs_entry
);
5015 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5020 static int binder_node_release(struct binder_node
*node
, int refs
)
5022 struct binder_ref
*ref
;
5024 struct binder_proc
*proc
= node
->proc
;
5026 binder_release_work(proc
, &node
->async_todo
);
5028 binder_node_lock(node
);
5029 binder_inner_proc_lock(proc
);
5030 binder_dequeue_work_ilocked(&node
->work
);
5032 * The caller must have taken a temporary ref on the node,
5034 BUG_ON(!node
->tmp_refs
);
5035 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5036 binder_inner_proc_unlock(proc
);
5037 binder_node_unlock(node
);
5038 binder_free_node(node
);
5044 node
->local_strong_refs
= 0;
5045 node
->local_weak_refs
= 0;
5046 binder_inner_proc_unlock(proc
);
5048 spin_lock(&binder_dead_nodes_lock
);
5049 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5050 spin_unlock(&binder_dead_nodes_lock
);
5052 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5055 * Need the node lock to synchronize
5056 * with new notification requests and the
5057 * inner lock to synchronize with queued
5058 * death notifications.
5060 binder_inner_proc_lock(ref
->proc
);
5062 binder_inner_proc_unlock(ref
->proc
);
5068 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5069 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5070 binder_enqueue_work_ilocked(&ref
->death
->work
,
5072 binder_wakeup_proc_ilocked(ref
->proc
);
5073 binder_inner_proc_unlock(ref
->proc
);
5076 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5077 "node %d now dead, refs %d, death %d\n",
5078 node
->debug_id
, refs
, death
);
5079 binder_node_unlock(node
);
5080 binder_put_node(node
);
5085 static void binder_deferred_release(struct binder_proc
*proc
)
5087 struct binder_context
*context
= proc
->context
;
5089 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5091 BUG_ON(proc
->files
);
5093 mutex_lock(&binder_procs_lock
);
5094 hlist_del(&proc
->proc_node
);
5095 mutex_unlock(&binder_procs_lock
);
5097 mutex_lock(&context
->context_mgr_node_lock
);
5098 if (context
->binder_context_mgr_node
&&
5099 context
->binder_context_mgr_node
->proc
== proc
) {
5100 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5101 "%s: %d context_mgr_node gone\n",
5102 __func__
, proc
->pid
);
5103 context
->binder_context_mgr_node
= NULL
;
5105 mutex_unlock(&context
->context_mgr_node_lock
);
5106 binder_inner_proc_lock(proc
);
5108 * Make sure proc stays alive after we
5109 * remove all the threads
5113 proc
->is_dead
= true;
5115 active_transactions
= 0;
5116 while ((n
= rb_first(&proc
->threads
))) {
5117 struct binder_thread
*thread
;
5119 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5120 binder_inner_proc_unlock(proc
);
5122 active_transactions
+= binder_thread_release(proc
, thread
);
5123 binder_inner_proc_lock(proc
);
5128 while ((n
= rb_first(&proc
->nodes
))) {
5129 struct binder_node
*node
;
5131 node
= rb_entry(n
, struct binder_node
, rb_node
);
5134 * take a temporary ref on the node before
5135 * calling binder_node_release() which will either
5136 * kfree() the node or call binder_put_node()
5138 binder_inc_node_tmpref_ilocked(node
);
5139 rb_erase(&node
->rb_node
, &proc
->nodes
);
5140 binder_inner_proc_unlock(proc
);
5141 incoming_refs
= binder_node_release(node
, incoming_refs
);
5142 binder_inner_proc_lock(proc
);
5144 binder_inner_proc_unlock(proc
);
5147 binder_proc_lock(proc
);
5148 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5149 struct binder_ref
*ref
;
5151 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5153 binder_cleanup_ref_olocked(ref
);
5154 binder_proc_unlock(proc
);
5155 binder_free_ref(ref
);
5156 binder_proc_lock(proc
);
5158 binder_proc_unlock(proc
);
5160 binder_release_work(proc
, &proc
->todo
);
5161 binder_release_work(proc
, &proc
->delivered_death
);
5163 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5164 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5165 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5166 outgoing_refs
, active_transactions
);
5168 binder_proc_dec_tmpref(proc
);
5171 static void binder_deferred_func(struct work_struct
*work
)
5173 struct binder_proc
*proc
;
5174 struct files_struct
*files
;
5179 mutex_lock(&binder_deferred_lock
);
5180 if (!hlist_empty(&binder_deferred_list
)) {
5181 proc
= hlist_entry(binder_deferred_list
.first
,
5182 struct binder_proc
, deferred_work_node
);
5183 hlist_del_init(&proc
->deferred_work_node
);
5184 defer
= proc
->deferred_work
;
5185 proc
->deferred_work
= 0;
5190 mutex_unlock(&binder_deferred_lock
);
5193 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5194 mutex_lock(&proc
->files_lock
);
5195 files
= proc
->files
;
5198 mutex_unlock(&proc
->files_lock
);
5201 if (defer
& BINDER_DEFERRED_FLUSH
)
5202 binder_deferred_flush(proc
);
5204 if (defer
& BINDER_DEFERRED_RELEASE
)
5205 binder_deferred_release(proc
); /* frees proc */
5208 put_files_struct(files
);
5211 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5214 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5216 mutex_lock(&binder_deferred_lock
);
5217 proc
->deferred_work
|= defer
;
5218 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5219 hlist_add_head(&proc
->deferred_work_node
,
5220 &binder_deferred_list
);
5221 schedule_work(&binder_deferred_work
);
5223 mutex_unlock(&binder_deferred_lock
);
5226 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5227 struct binder_proc
*proc
,
5229 struct binder_transaction
*t
)
5231 struct binder_proc
*to_proc
;
5232 struct binder_buffer
*buffer
= t
->buffer
;
5234 spin_lock(&t
->lock
);
5235 to_proc
= t
->to_proc
;
5237 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5238 prefix
, t
->debug_id
, t
,
5239 t
->from
? t
->from
->proc
->pid
: 0,
5240 t
->from
? t
->from
->pid
: 0,
5241 to_proc
? to_proc
->pid
: 0,
5242 t
->to_thread
? t
->to_thread
->pid
: 0,
5243 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5244 t
->priority
.prio
, t
->need_reply
);
5245 spin_unlock(&t
->lock
);
5247 if (proc
!= to_proc
) {
5249 * Can only safely deref buffer if we are holding the
5250 * correct proc inner lock for this node
5256 if (buffer
== NULL
) {
5257 seq_puts(m
, " buffer free\n");
5260 if (buffer
->target_node
)
5261 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5262 seq_printf(m
, " size %zd:%zd data %pK\n",
5263 buffer
->data_size
, buffer
->offsets_size
,
5267 static void print_binder_work_ilocked(struct seq_file
*m
,
5268 struct binder_proc
*proc
,
5270 const char *transaction_prefix
,
5271 struct binder_work
*w
)
5273 struct binder_node
*node
;
5274 struct binder_transaction
*t
;
5277 case BINDER_WORK_TRANSACTION
:
5278 t
= container_of(w
, struct binder_transaction
, work
);
5279 print_binder_transaction_ilocked(
5280 m
, proc
, transaction_prefix
, t
);
5282 case BINDER_WORK_RETURN_ERROR
: {
5283 struct binder_error
*e
= container_of(
5284 w
, struct binder_error
, work
);
5286 seq_printf(m
, "%stransaction error: %u\n",
5289 case BINDER_WORK_TRANSACTION_COMPLETE
:
5290 seq_printf(m
, "%stransaction complete\n", prefix
);
5292 case BINDER_WORK_NODE
:
5293 node
= container_of(w
, struct binder_node
, work
);
5294 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5295 prefix
, node
->debug_id
,
5296 (u64
)node
->ptr
, (u64
)node
->cookie
);
5298 case BINDER_WORK_DEAD_BINDER
:
5299 seq_printf(m
, "%shas dead binder\n", prefix
);
5301 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5302 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5304 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5305 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5308 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5313 static void print_binder_thread_ilocked(struct seq_file
*m
,
5314 struct binder_thread
*thread
,
5317 struct binder_transaction
*t
;
5318 struct binder_work
*w
;
5319 size_t start_pos
= m
->count
;
5322 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5323 thread
->pid
, thread
->looper
,
5324 thread
->looper_need_return
,
5325 atomic_read(&thread
->tmp_ref
));
5326 header_pos
= m
->count
;
5327 t
= thread
->transaction_stack
;
5329 if (t
->from
== thread
) {
5330 print_binder_transaction_ilocked(m
, thread
->proc
,
5331 " outgoing transaction", t
);
5333 } else if (t
->to_thread
== thread
) {
5334 print_binder_transaction_ilocked(m
, thread
->proc
,
5335 " incoming transaction", t
);
5338 print_binder_transaction_ilocked(m
, thread
->proc
,
5339 " bad transaction", t
);
5343 list_for_each_entry(w
, &thread
->todo
, entry
) {
5344 print_binder_work_ilocked(m
, thread
->proc
, " ",
5345 " pending transaction", w
);
5347 if (!print_always
&& m
->count
== header_pos
)
5348 m
->count
= start_pos
;
5351 static void print_binder_node_nilocked(struct seq_file
*m
,
5352 struct binder_node
*node
)
5354 struct binder_ref
*ref
;
5355 struct binder_work
*w
;
5359 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5362 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5363 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5364 node
->sched_policy
, node
->min_priority
,
5365 node
->has_strong_ref
, node
->has_weak_ref
,
5366 node
->local_strong_refs
, node
->local_weak_refs
,
5367 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5369 seq_puts(m
, " proc");
5370 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5371 seq_printf(m
, " %d", ref
->proc
->pid
);
5375 list_for_each_entry(w
, &node
->async_todo
, entry
)
5376 print_binder_work_ilocked(m
, node
->proc
, " ",
5377 " pending async transaction", w
);
5381 static void print_binder_ref_olocked(struct seq_file
*m
,
5382 struct binder_ref
*ref
)
5384 binder_node_lock(ref
->node
);
5385 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5386 ref
->data
.debug_id
, ref
->data
.desc
,
5387 ref
->node
->proc
? "" : "dead ",
5388 ref
->node
->debug_id
, ref
->data
.strong
,
5389 ref
->data
.weak
, ref
->death
);
5390 binder_node_unlock(ref
->node
);
5393 static void print_binder_proc(struct seq_file
*m
,
5394 struct binder_proc
*proc
, int print_all
)
5396 struct binder_work
*w
;
5398 size_t start_pos
= m
->count
;
5400 struct binder_node
*last_node
= NULL
;
5402 seq_printf(m
, "proc %d\n", proc
->pid
);
5403 seq_printf(m
, "context %s\n", proc
->context
->name
);
5404 header_pos
= m
->count
;
5406 binder_inner_proc_lock(proc
);
5407 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5408 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5409 rb_node
), print_all
);
5411 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5412 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5415 * take a temporary reference on the node so it
5416 * survives and isn't removed from the tree
5417 * while we print it.
5419 binder_inc_node_tmpref_ilocked(node
);
5420 /* Need to drop inner lock to take node lock */
5421 binder_inner_proc_unlock(proc
);
5423 binder_put_node(last_node
);
5424 binder_node_inner_lock(node
);
5425 print_binder_node_nilocked(m
, node
);
5426 binder_node_inner_unlock(node
);
5428 binder_inner_proc_lock(proc
);
5430 binder_inner_proc_unlock(proc
);
5432 binder_put_node(last_node
);
5435 binder_proc_lock(proc
);
5436 for (n
= rb_first(&proc
->refs_by_desc
);
5439 print_binder_ref_olocked(m
, rb_entry(n
,
5442 binder_proc_unlock(proc
);
5444 binder_alloc_print_allocated(m
, &proc
->alloc
);
5445 binder_inner_proc_lock(proc
);
5446 list_for_each_entry(w
, &proc
->todo
, entry
)
5447 print_binder_work_ilocked(m
, proc
, " ",
5448 " pending transaction", w
);
5449 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5450 seq_puts(m
, " has delivered dead binder\n");
5453 binder_inner_proc_unlock(proc
);
5454 if (!print_all
&& m
->count
== header_pos
)
5455 m
->count
= start_pos
;
5458 static const char * const binder_return_strings
[] = {
5463 "BR_ACQUIRE_RESULT",
5465 "BR_TRANSACTION_COMPLETE",
5470 "BR_ATTEMPT_ACQUIRE",
5475 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5479 static const char * const binder_command_strings
[] = {
5482 "BC_ACQUIRE_RESULT",
5490 "BC_ATTEMPT_ACQUIRE",
5491 "BC_REGISTER_LOOPER",
5494 "BC_REQUEST_DEATH_NOTIFICATION",
5495 "BC_CLEAR_DEATH_NOTIFICATION",
5496 "BC_DEAD_BINDER_DONE",
5497 "BC_TRANSACTION_SG",
5501 static const char * const binder_objstat_strings
[] = {
5508 "transaction_complete"
5511 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5512 struct binder_stats
*stats
)
5516 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5517 ARRAY_SIZE(binder_command_strings
));
5518 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5519 int temp
= atomic_read(&stats
->bc
[i
]);
5522 seq_printf(m
, "%s%s: %d\n", prefix
,
5523 binder_command_strings
[i
], temp
);
5526 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5527 ARRAY_SIZE(binder_return_strings
));
5528 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5529 int temp
= atomic_read(&stats
->br
[i
]);
5532 seq_printf(m
, "%s%s: %d\n", prefix
,
5533 binder_return_strings
[i
], temp
);
5536 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5537 ARRAY_SIZE(binder_objstat_strings
));
5538 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5539 ARRAY_SIZE(stats
->obj_deleted
));
5540 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5541 int created
= atomic_read(&stats
->obj_created
[i
]);
5542 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5544 if (created
|| deleted
)
5545 seq_printf(m
, "%s%s: active %d total %d\n",
5547 binder_objstat_strings
[i
],
5553 static void print_binder_proc_stats(struct seq_file
*m
,
5554 struct binder_proc
*proc
)
5556 struct binder_work
*w
;
5557 struct binder_thread
*thread
;
5559 int count
, strong
, weak
, ready_threads
;
5560 size_t free_async_space
=
5561 binder_alloc_get_free_async_space(&proc
->alloc
);
5563 seq_printf(m
, "proc %d\n", proc
->pid
);
5564 seq_printf(m
, "context %s\n", proc
->context
->name
);
5567 binder_inner_proc_lock(proc
);
5568 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5571 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5574 seq_printf(m
, " threads: %d\n", count
);
5575 seq_printf(m
, " requested threads: %d+%d/%d\n"
5576 " ready threads %d\n"
5577 " free async space %zd\n", proc
->requested_threads
,
5578 proc
->requested_threads_started
, proc
->max_threads
,
5582 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5584 binder_inner_proc_unlock(proc
);
5585 seq_printf(m
, " nodes: %d\n", count
);
5589 binder_proc_lock(proc
);
5590 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5591 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5594 strong
+= ref
->data
.strong
;
5595 weak
+= ref
->data
.weak
;
5597 binder_proc_unlock(proc
);
5598 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5600 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5601 seq_printf(m
, " buffers: %d\n", count
);
5603 binder_alloc_print_pages(m
, &proc
->alloc
);
5606 binder_inner_proc_lock(proc
);
5607 list_for_each_entry(w
, &proc
->todo
, entry
) {
5608 if (w
->type
== BINDER_WORK_TRANSACTION
)
5611 binder_inner_proc_unlock(proc
);
5612 seq_printf(m
, " pending transactions: %d\n", count
);
5614 print_binder_stats(m
, " ", &proc
->stats
);
5618 static int binder_state_show(struct seq_file
*m
, void *unused
)
5620 struct binder_proc
*proc
;
5621 struct binder_node
*node
;
5622 struct binder_node
*last_node
= NULL
;
5624 seq_puts(m
, "binder state:\n");
5626 spin_lock(&binder_dead_nodes_lock
);
5627 if (!hlist_empty(&binder_dead_nodes
))
5628 seq_puts(m
, "dead nodes:\n");
5629 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5631 * take a temporary reference on the node so it
5632 * survives and isn't removed from the list
5633 * while we print it.
5636 spin_unlock(&binder_dead_nodes_lock
);
5638 binder_put_node(last_node
);
5639 binder_node_lock(node
);
5640 print_binder_node_nilocked(m
, node
);
5641 binder_node_unlock(node
);
5643 spin_lock(&binder_dead_nodes_lock
);
5645 spin_unlock(&binder_dead_nodes_lock
);
5647 binder_put_node(last_node
);
5649 mutex_lock(&binder_procs_lock
);
5650 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5651 print_binder_proc(m
, proc
, 1);
5652 mutex_unlock(&binder_procs_lock
);
5657 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5659 struct binder_proc
*proc
;
5661 seq_puts(m
, "binder stats:\n");
5663 print_binder_stats(m
, "", &binder_stats
);
5665 mutex_lock(&binder_procs_lock
);
5666 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5667 print_binder_proc_stats(m
, proc
);
5668 mutex_unlock(&binder_procs_lock
);
5673 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5675 struct binder_proc
*proc
;
5677 seq_puts(m
, "binder transactions:\n");
5678 mutex_lock(&binder_procs_lock
);
5679 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5680 print_binder_proc(m
, proc
, 0);
5681 mutex_unlock(&binder_procs_lock
);
5686 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5688 struct binder_proc
*itr
;
5689 int pid
= (unsigned long)m
->private;
5691 mutex_lock(&binder_procs_lock
);
5692 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5693 if (itr
->pid
== pid
) {
5694 seq_puts(m
, "binder proc state:\n");
5695 print_binder_proc(m
, itr
, 1);
5698 mutex_unlock(&binder_procs_lock
);
5703 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5704 struct binder_transaction_log_entry
*e
)
5706 int debug_id
= READ_ONCE(e
->debug_id_done
);
5708 * read barrier to guarantee debug_id_done read before
5709 * we print the log values
5713 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5714 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5715 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5716 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5717 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5718 e
->return_error
, e
->return_error_param
,
5719 e
->return_error_line
);
5721 * read-barrier to guarantee read of debug_id_done after
5722 * done printing the fields of the entry
5725 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5726 "\n" : " (incomplete)\n");
5729 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5731 struct binder_transaction_log
*log
= m
->private;
5732 unsigned int log_cur
= atomic_read(&log
->cur
);
5737 count
= log_cur
+ 1;
5738 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5739 0 : count
% ARRAY_SIZE(log
->entry
);
5740 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5741 count
= ARRAY_SIZE(log
->entry
);
5742 for (i
= 0; i
< count
; i
++) {
5743 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5745 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5750 static const struct file_operations binder_fops
= {
5751 .owner
= THIS_MODULE
,
5752 .poll
= binder_poll
,
5753 .unlocked_ioctl
= binder_ioctl
,
5754 .compat_ioctl
= binder_ioctl
,
5755 .mmap
= binder_mmap
,
5756 .open
= binder_open
,
5757 .flush
= binder_flush
,
5758 .release
= binder_release
,
5761 BINDER_DEBUG_ENTRY(state
);
5762 BINDER_DEBUG_ENTRY(stats
);
5763 BINDER_DEBUG_ENTRY(transactions
);
5764 BINDER_DEBUG_ENTRY(transaction_log
);
5766 static int __init
init_binder_device(const char *name
)
5769 struct binder_device
*binder_device
;
5771 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5775 binder_device
->miscdev
.fops
= &binder_fops
;
5776 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5777 binder_device
->miscdev
.name
= name
;
5779 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5780 binder_device
->context
.name
= name
;
5781 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5783 ret
= misc_register(&binder_device
->miscdev
);
5785 kfree(binder_device
);
5789 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5794 static int __init
binder_init(void)
5797 char *device_name
, *device_names
, *device_tmp
;
5798 struct binder_device
*device
;
5799 struct hlist_node
*tmp
;
5801 ret
= binder_alloc_shrinker_init();
5805 atomic_set(&binder_transaction_log
.cur
, ~0U);
5806 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5808 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5809 if (binder_debugfs_dir_entry_root
)
5810 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5811 binder_debugfs_dir_entry_root
);
5813 if (binder_debugfs_dir_entry_root
) {
5814 debugfs_create_file("state",
5816 binder_debugfs_dir_entry_root
,
5818 &binder_state_fops
);
5819 debugfs_create_file("stats",
5821 binder_debugfs_dir_entry_root
,
5823 &binder_stats_fops
);
5824 debugfs_create_file("transactions",
5826 binder_debugfs_dir_entry_root
,
5828 &binder_transactions_fops
);
5829 debugfs_create_file("transaction_log",
5831 binder_debugfs_dir_entry_root
,
5832 &binder_transaction_log
,
5833 &binder_transaction_log_fops
);
5834 debugfs_create_file("failed_transaction_log",
5836 binder_debugfs_dir_entry_root
,
5837 &binder_transaction_log_failed
,
5838 &binder_transaction_log_fops
);
5842 * Copy the module_parameter string, because we don't want to
5843 * tokenize it in-place.
5845 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5846 if (!device_names
) {
5848 goto err_alloc_device_names_failed
;
5850 strcpy(device_names
, binder_devices_param
);
5852 device_tmp
= device_names
;
5853 while ((device_name
= strsep(&device_tmp
, ","))) {
5854 ret
= init_binder_device(device_name
);
5856 goto err_init_binder_device_failed
;
5861 err_init_binder_device_failed
:
5862 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5863 misc_deregister(&device
->miscdev
);
5864 hlist_del(&device
->hlist
);
5868 kfree(device_names
);
5870 err_alloc_device_names_failed
:
5871 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5876 device_initcall(binder_init
);
5878 #define CREATE_TRACE_POINTS
5879 #include "binder_trace.h"
5881 MODULE_LICENSE("GPL v2");