3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
74 #include <linux/sched/rt.h>
77 #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
78 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
79 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
81 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
82 #define BINDER_IPC_32BIT 1
85 #include <uapi/linux/android/binder.h>
86 #include "binder_alloc.h"
87 #include "binder_trace.h"
89 static HLIST_HEAD(binder_deferred_list
);
90 static DEFINE_MUTEX(binder_deferred_lock
);
92 static HLIST_HEAD(binder_devices
);
93 static HLIST_HEAD(binder_procs
);
94 static DEFINE_MUTEX(binder_procs_lock
);
96 static HLIST_HEAD(binder_dead_nodes
);
97 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
99 static struct dentry
*binder_debugfs_dir_entry_root
;
100 static struct dentry
*binder_debugfs_dir_entry_proc
;
101 static atomic_t binder_last_id
;
102 static struct workqueue_struct
*binder_deferred_workqueue
;
104 #define BINDER_DEBUG_ENTRY(name) \
105 static int binder_##name##_open(struct inode *inode, struct file *file) \
107 return single_open(file, binder_##name##_show, inode->i_private); \
110 static const struct file_operations binder_##name##_fops = { \
111 .owner = THIS_MODULE, \
112 .open = binder_##name##_open, \
114 .llseek = seq_lseek, \
115 .release = single_release, \
118 static int binder_proc_show(struct seq_file
*m
, void *unused
);
119 BINDER_DEBUG_ENTRY(proc
);
121 /* This is only defined in include/asm-arm/sizes.h */
127 #define SZ_4M 0x400000
130 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
132 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
135 BINDER_DEBUG_USER_ERROR
= 1U << 0,
136 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
137 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
138 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
139 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
140 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
141 BINDER_DEBUG_READ_WRITE
= 1U << 6,
142 BINDER_DEBUG_USER_REFS
= 1U << 7,
143 BINDER_DEBUG_THREADS
= 1U << 8,
144 BINDER_DEBUG_TRANSACTION
= 1U << 9,
145 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
146 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
147 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
148 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
149 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
151 static uint32_t binder_debug_mask
;
153 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
155 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
156 module_param_named(devices
, binder_devices_param
, charp
, S_IRUGO
);
158 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
159 static int binder_stop_on_user_error
;
161 static int binder_set_stop_on_user_error(const char *val
,
162 struct kernel_param
*kp
)
166 ret
= param_set_int(val
, kp
);
167 if (binder_stop_on_user_error
< 2)
168 wake_up(&binder_user_error_wait
);
171 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
172 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
174 #define binder_debug(mask, x...) \
176 if (binder_debug_mask & mask) \
180 #define binder_user_error(x...) \
182 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
184 if (binder_stop_on_user_error) \
185 binder_stop_on_user_error = 2; \
188 #define to_flat_binder_object(hdr) \
189 container_of(hdr, struct flat_binder_object, hdr)
191 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
193 #define to_binder_buffer_object(hdr) \
194 container_of(hdr, struct binder_buffer_object, hdr)
196 #define to_binder_fd_array_object(hdr) \
197 container_of(hdr, struct binder_fd_array_object, hdr)
199 enum binder_stat_types
{
205 BINDER_STAT_TRANSACTION
,
206 BINDER_STAT_TRANSACTION_COMPLETE
,
210 struct binder_stats
{
211 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
212 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
213 atomic_t obj_created
[BINDER_STAT_COUNT
];
214 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
217 static struct binder_stats binder_stats
;
219 static inline void binder_stats_deleted(enum binder_stat_types type
)
221 atomic_inc(&binder_stats
.obj_deleted
[type
]);
224 static inline void binder_stats_created(enum binder_stat_types type
)
226 atomic_inc(&binder_stats
.obj_created
[type
]);
229 struct binder_transaction_log_entry
{
241 int return_error_line
;
242 uint32_t return_error
;
243 uint32_t return_error_param
;
244 const char *context_name
;
246 struct binder_transaction_log
{
249 struct binder_transaction_log_entry entry
[32];
251 static struct binder_transaction_log binder_transaction_log
;
252 static struct binder_transaction_log binder_transaction_log_failed
;
254 static struct binder_transaction_log_entry
*binder_transaction_log_add(
255 struct binder_transaction_log
*log
)
257 struct binder_transaction_log_entry
*e
;
258 unsigned int cur
= atomic_inc_return(&log
->cur
);
260 if (cur
>= ARRAY_SIZE(log
->entry
))
262 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
263 WRITE_ONCE(e
->debug_id_done
, 0);
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
270 memset(e
, 0, sizeof(*e
));
274 struct binder_context
{
275 struct binder_node
*binder_context_mgr_node
;
276 struct mutex context_mgr_node_lock
;
278 kuid_t binder_context_mgr_uid
;
282 struct binder_device
{
283 struct hlist_node hlist
;
284 struct miscdevice miscdev
;
285 struct binder_context context
;
289 * struct binder_work - work enqueued on a worklist
290 * @entry: node enqueued on list
291 * @type: type of work to be performed
293 * There are separate work lists for proc, thread, and node (async).
296 struct list_head entry
;
299 BINDER_WORK_TRANSACTION
= 1,
300 BINDER_WORK_TRANSACTION_COMPLETE
,
301 BINDER_WORK_RETURN_ERROR
,
303 BINDER_WORK_DEAD_BINDER
,
304 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
305 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
309 struct binder_error
{
310 struct binder_work work
;
315 * struct binder_node - binder node bookkeeping
316 * @debug_id: unique ID for debugging
317 * (invariant after initialized)
318 * @lock: lock for node fields
319 * @work: worklist element for node work
320 * (protected by @proc->inner_lock)
321 * @rb_node: element for proc->nodes tree
322 * (protected by @proc->inner_lock)
323 * @dead_node: element for binder_dead_nodes list
324 * (protected by binder_dead_nodes_lock)
325 * @proc: binder_proc that owns this node
326 * (invariant after initialized)
327 * @refs: list of references on this node
328 * (protected by @lock)
329 * @internal_strong_refs: used to take strong references when
330 * initiating a transaction
331 * (protected by @proc->inner_lock if @proc
333 * @local_weak_refs: weak user refs from local process
334 * (protected by @proc->inner_lock if @proc
336 * @local_strong_refs: strong user refs from local process
337 * (protected by @proc->inner_lock if @proc
339 * @tmp_refs: temporary kernel refs
340 * (protected by @proc->inner_lock while @proc
341 * is valid, and by binder_dead_nodes_lock
342 * if @proc is NULL. During inc/dec and node release
343 * it is also protected by @lock to provide safety
344 * as the node dies and @proc becomes NULL)
345 * @ptr: userspace pointer for node
346 * (invariant, no lock needed)
347 * @cookie: userspace cookie for node
348 * (invariant, no lock needed)
349 * @has_strong_ref: userspace notified of strong ref
350 * (protected by @proc->inner_lock if @proc
352 * @pending_strong_ref: userspace has acked notification of strong ref
353 * (protected by @proc->inner_lock if @proc
355 * @has_weak_ref: userspace notified of weak ref
356 * (protected by @proc->inner_lock if @proc
358 * @pending_weak_ref: userspace has acked notification of weak ref
359 * (protected by @proc->inner_lock if @proc
361 * @has_async_transaction: async transaction to node in progress
362 * (protected by @lock)
363 * @sched_policy: minimum scheduling policy for node
364 * (invariant after initialized)
365 * @accept_fds: file descriptor operations supported for node
366 * (invariant after initialized)
367 * @min_priority: minimum scheduling priority
368 * (invariant after initialized)
369 * @inherit_rt: inherit RT scheduling policy from caller
370 * @txn_security_ctx: require sender's security context
371 * (invariant after initialized)
372 * @async_todo: list of async work items
373 * (protected by @proc->inner_lock)
375 * Bookkeeping structure for binder nodes.
380 struct binder_work work
;
382 struct rb_node rb_node
;
383 struct hlist_node dead_node
;
385 struct binder_proc
*proc
;
386 struct hlist_head refs
;
387 int internal_strong_refs
;
389 int local_strong_refs
;
391 binder_uintptr_t ptr
;
392 binder_uintptr_t cookie
;
395 * bitfield elements protected by
399 u8 pending_strong_ref
:1;
401 u8 pending_weak_ref
:1;
405 * invariant after initialization
410 u8 txn_security_ctx
:1;
413 bool has_async_transaction
;
414 struct list_head async_todo
;
417 struct binder_ref_death
{
419 * @work: worklist element for death notifications
420 * (protected by inner_lock of the proc that
421 * this ref belongs to)
423 struct binder_work work
;
424 binder_uintptr_t cookie
;
428 * struct binder_ref_data - binder_ref counts and id
429 * @debug_id: unique ID for the ref
430 * @desc: unique userspace handle for ref
431 * @strong: strong ref count (debugging only if not locked)
432 * @weak: weak ref count (debugging only if not locked)
434 * Structure to hold ref count and ref id information. Since
435 * the actual ref can only be accessed with a lock, this structure
436 * is used to return information about the ref to callers of
437 * ref inc/dec functions.
439 struct binder_ref_data
{
447 * struct binder_ref - struct to track references on nodes
448 * @data: binder_ref_data containing id, handle, and current refcounts
449 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
450 * @rb_node_node: node for lookup by @node in proc's rb_tree
451 * @node_entry: list entry for node->refs list in target node
452 * (protected by @node->lock)
453 * @proc: binder_proc containing ref
454 * @node: binder_node of target node. When cleaning up a
455 * ref for deletion in binder_cleanup_ref, a non-NULL
456 * @node indicates the node must be freed
457 * @death: pointer to death notification (ref_death) if requested
458 * (protected by @node->lock)
460 * Structure to track references from procA to target node (on procB). This
461 * structure is unsafe to access without holding @proc->outer_lock.
464 /* Lookups needed: */
465 /* node + proc => ref (transaction) */
466 /* desc + proc => ref (transaction, inc/dec ref) */
467 /* node => refs + procs (proc exit) */
468 struct binder_ref_data data
;
469 struct rb_node rb_node_desc
;
470 struct rb_node rb_node_node
;
471 struct hlist_node node_entry
;
472 struct binder_proc
*proc
;
473 struct binder_node
*node
;
474 struct binder_ref_death
*death
;
477 enum binder_deferred_state
{
478 BINDER_DEFERRED_PUT_FILES
= 0x01,
479 BINDER_DEFERRED_FLUSH
= 0x02,
480 BINDER_DEFERRED_RELEASE
= 0x04,
484 * struct binder_priority - scheduler policy and priority
485 * @sched_policy scheduler policy
486 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
488 * The binder driver supports inheriting the following scheduler policies:
494 struct binder_priority
{
495 unsigned int sched_policy
;
500 * struct binder_proc - binder process bookkeeping
501 * @proc_node: element for binder_procs list
502 * @threads: rbtree of binder_threads in this proc
503 * (protected by @inner_lock)
504 * @nodes: rbtree of binder nodes associated with
505 * this proc ordered by node->ptr
506 * (protected by @inner_lock)
507 * @refs_by_desc: rbtree of refs ordered by ref->desc
508 * (protected by @outer_lock)
509 * @refs_by_node: rbtree of refs ordered by ref->node
510 * (protected by @outer_lock)
511 * @waiting_threads: threads currently waiting for proc work
512 * (protected by @inner_lock)
513 * @pid PID of group_leader of process
514 * (invariant after initialized)
515 * @tsk task_struct for group_leader of process
516 * (invariant after initialized)
517 * @files files_struct for process
518 * (protected by @files_lock)
519 * @files_lock mutex to protect @files
520 * @deferred_work_node: element for binder_deferred_list
521 * (protected by binder_deferred_lock)
522 * @deferred_work: bitmap of deferred work to perform
523 * (protected by binder_deferred_lock)
524 * @is_dead: process is dead and awaiting free
525 * when outstanding transactions are cleaned up
526 * (protected by @inner_lock)
527 * @todo: list of work for this process
528 * (protected by @inner_lock)
529 * @stats: per-process binder statistics
530 * (atomics, no lock needed)
531 * @delivered_death: list of delivered death notification
532 * (protected by @inner_lock)
533 * @max_threads: cap on number of binder threads
534 * (protected by @inner_lock)
535 * @requested_threads: number of binder threads requested but not
536 * yet started. In current implementation, can
538 * (protected by @inner_lock)
539 * @requested_threads_started: number binder threads started
540 * (protected by @inner_lock)
541 * @tmp_ref: temporary reference to indicate proc is in use
542 * (protected by @inner_lock)
543 * @default_priority: default scheduler priority
544 * (invariant after initialized)
545 * @debugfs_entry: debugfs node
546 * @alloc: binder allocator bookkeeping
547 * @context: binder_context for this proc
548 * (invariant after initialized)
549 * @inner_lock: can nest under outer_lock and/or node lock
550 * @outer_lock: no nesting under innor or node lock
551 * Lock order: 1) outer, 2) node, 3) inner
553 * Bookkeeping structure for binder processes
556 struct hlist_node proc_node
;
557 struct rb_root threads
;
558 struct rb_root nodes
;
559 struct rb_root refs_by_desc
;
560 struct rb_root refs_by_node
;
561 struct list_head waiting_threads
;
563 struct task_struct
*tsk
;
564 struct files_struct
*files
;
565 struct mutex files_lock
;
566 struct hlist_node deferred_work_node
;
570 struct list_head todo
;
571 struct binder_stats stats
;
572 struct list_head delivered_death
;
574 int requested_threads
;
575 int requested_threads_started
;
577 struct binder_priority default_priority
;
578 struct dentry
*debugfs_entry
;
579 struct binder_alloc alloc
;
580 struct binder_context
*context
;
581 spinlock_t inner_lock
;
582 spinlock_t outer_lock
;
586 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
587 BINDER_LOOPER_STATE_ENTERED
= 0x02,
588 BINDER_LOOPER_STATE_EXITED
= 0x04,
589 BINDER_LOOPER_STATE_INVALID
= 0x08,
590 BINDER_LOOPER_STATE_WAITING
= 0x10,
591 BINDER_LOOPER_STATE_POLL
= 0x20,
595 * struct binder_thread - binder thread bookkeeping
596 * @proc: binder process for this thread
597 * (invariant after initialization)
598 * @rb_node: element for proc->threads rbtree
599 * (protected by @proc->inner_lock)
600 * @waiting_thread_node: element for @proc->waiting_threads list
601 * (protected by @proc->inner_lock)
602 * @pid: PID for this thread
603 * (invariant after initialization)
604 * @looper: bitmap of looping state
605 * (only accessed by this thread)
606 * @looper_needs_return: looping thread needs to exit driver
608 * @transaction_stack: stack of in-progress transactions for this thread
609 * (protected by @proc->inner_lock)
610 * @todo: list of work to do for this thread
611 * (protected by @proc->inner_lock)
612 * @process_todo: whether work in @todo should be processed
613 * (protected by @proc->inner_lock)
614 * @return_error: transaction errors reported by this thread
615 * (only accessed by this thread)
616 * @reply_error: transaction errors reported by target thread
617 * (protected by @proc->inner_lock)
618 * @wait: wait queue for thread work
619 * @stats: per-thread statistics
620 * (atomics, no lock needed)
621 * @tmp_ref: temporary reference to indicate thread is in use
622 * (atomic since @proc->inner_lock cannot
623 * always be acquired)
624 * @is_dead: thread is dead and awaiting free
625 * when outstanding transactions are cleaned up
626 * (protected by @proc->inner_lock)
627 * @task: struct task_struct for this thread
629 * Bookkeeping structure for binder threads.
631 struct binder_thread
{
632 struct binder_proc
*proc
;
633 struct rb_node rb_node
;
634 struct list_head waiting_thread_node
;
636 int looper
; /* only modified by this thread */
637 bool looper_need_return
; /* can be written by other thread */
638 struct binder_transaction
*transaction_stack
;
639 struct list_head todo
;
641 struct binder_error return_error
;
642 struct binder_error reply_error
;
643 wait_queue_head_t wait
;
644 struct binder_stats stats
;
647 struct task_struct
*task
;
650 struct binder_transaction
{
652 struct binder_work work
;
653 struct binder_thread
*from
;
654 struct binder_transaction
*from_parent
;
655 struct binder_proc
*to_proc
;
656 struct binder_thread
*to_thread
;
657 struct binder_transaction
*to_parent
;
658 unsigned need_reply
:1;
659 /* unsigned is_dead:1; */ /* not used at the moment */
661 struct binder_buffer
*buffer
;
664 struct binder_priority priority
;
665 struct binder_priority saved_priority
;
666 bool set_priority_called
;
668 binder_uintptr_t security_ctx
;
670 * @lock: protects @from, @to_proc, and @to_thread
672 * @from, @to_proc, and @to_thread can be set to NULL
673 * during thread teardown
679 * binder_proc_lock() - Acquire outer lock for given binder_proc
680 * @proc: struct binder_proc to acquire
682 * Acquires proc->outer_lock. Used to protect binder_ref
683 * structures associated with the given proc.
685 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
687 _binder_proc_lock(struct binder_proc
*proc
, int line
)
689 binder_debug(BINDER_DEBUG_SPINLOCKS
,
690 "%s: line=%d\n", __func__
, line
);
691 spin_lock(&proc
->outer_lock
);
695 * binder_proc_unlock() - Release spinlock for given binder_proc
696 * @proc: struct binder_proc to acquire
698 * Release lock acquired via binder_proc_lock()
700 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
702 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
704 binder_debug(BINDER_DEBUG_SPINLOCKS
,
705 "%s: line=%d\n", __func__
, line
);
706 spin_unlock(&proc
->outer_lock
);
710 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
711 * @proc: struct binder_proc to acquire
713 * Acquires proc->inner_lock. Used to protect todo lists
715 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
717 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
719 binder_debug(BINDER_DEBUG_SPINLOCKS
,
720 "%s: line=%d\n", __func__
, line
);
721 spin_lock(&proc
->inner_lock
);
725 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
726 * @proc: struct binder_proc to acquire
728 * Release lock acquired via binder_inner_proc_lock()
730 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
732 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
734 binder_debug(BINDER_DEBUG_SPINLOCKS
,
735 "%s: line=%d\n", __func__
, line
);
736 spin_unlock(&proc
->inner_lock
);
740 * binder_node_lock() - Acquire spinlock for given binder_node
741 * @node: struct binder_node to acquire
743 * Acquires node->lock. Used to protect binder_node fields
745 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
747 _binder_node_lock(struct binder_node
*node
, int line
)
749 binder_debug(BINDER_DEBUG_SPINLOCKS
,
750 "%s: line=%d\n", __func__
, line
);
751 spin_lock(&node
->lock
);
755 * binder_node_unlock() - Release spinlock for given binder_proc
756 * @node: struct binder_node to acquire
758 * Release lock acquired via binder_node_lock()
760 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
762 _binder_node_unlock(struct binder_node
*node
, int line
)
764 binder_debug(BINDER_DEBUG_SPINLOCKS
,
765 "%s: line=%d\n", __func__
, line
);
766 spin_unlock(&node
->lock
);
770 * binder_node_inner_lock() - Acquire node and inner locks
771 * @node: struct binder_node to acquire
773 * Acquires node->lock. If node->proc also acquires
774 * proc->inner_lock. Used to protect binder_node fields
776 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
778 _binder_node_inner_lock(struct binder_node
*node
, int line
)
780 binder_debug(BINDER_DEBUG_SPINLOCKS
,
781 "%s: line=%d\n", __func__
, line
);
782 spin_lock(&node
->lock
);
784 binder_inner_proc_lock(node
->proc
);
788 * binder_node_unlock() - Release node and inner locks
789 * @node: struct binder_node to acquire
791 * Release lock acquired via binder_node_lock()
793 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
795 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
797 struct binder_proc
*proc
= node
->proc
;
799 binder_debug(BINDER_DEBUG_SPINLOCKS
,
800 "%s: line=%d\n", __func__
, line
);
802 binder_inner_proc_unlock(proc
);
803 spin_unlock(&node
->lock
);
806 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
808 return list_empty(list
);
812 * binder_worklist_empty() - Check if no items on the work list
813 * @proc: binder_proc associated with list
814 * @list: list to check
816 * Return: true if there are no items on list, else false
818 static bool binder_worklist_empty(struct binder_proc
*proc
,
819 struct list_head
*list
)
823 binder_inner_proc_lock(proc
);
824 ret
= binder_worklist_empty_ilocked(list
);
825 binder_inner_proc_unlock(proc
);
830 * binder_enqueue_work_ilocked() - Add an item to the work list
831 * @work: struct binder_work to add to list
832 * @target_list: list to add work to
834 * Adds the work to the specified list. Asserts that work
835 * is not already on a list.
837 * Requires the proc->inner_lock to be held.
840 binder_enqueue_work_ilocked(struct binder_work
*work
,
841 struct list_head
*target_list
)
843 BUG_ON(target_list
== NULL
);
844 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
845 list_add_tail(&work
->entry
, target_list
);
849 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
850 * @thread: thread to queue work to
851 * @work: struct binder_work to add to list
853 * Adds the work to the todo list of the thread. Doesn't set the process_todo
854 * flag, which means that (if it wasn't already set) the thread will go to
855 * sleep without handling this work when it calls read.
857 * Requires the proc->inner_lock to be held.
860 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread
*thread
,
861 struct binder_work
*work
)
863 binder_enqueue_work_ilocked(work
, &thread
->todo
);
867 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
868 * @thread: thread to queue work to
869 * @work: struct binder_work to add to list
871 * Adds the work to the todo list of the thread, and enables processing
874 * Requires the proc->inner_lock to be held.
877 binder_enqueue_thread_work_ilocked(struct binder_thread
*thread
,
878 struct binder_work
*work
)
880 binder_enqueue_work_ilocked(work
, &thread
->todo
);
881 thread
->process_todo
= true;
885 * binder_enqueue_thread_work() - Add an item to the thread work list
886 * @thread: thread to queue work to
887 * @work: struct binder_work to add to list
889 * Adds the work to the todo list of the thread, and enables processing
893 binder_enqueue_thread_work(struct binder_thread
*thread
,
894 struct binder_work
*work
)
896 binder_inner_proc_lock(thread
->proc
);
897 binder_enqueue_thread_work_ilocked(thread
, work
);
898 binder_inner_proc_unlock(thread
->proc
);
902 binder_dequeue_work_ilocked(struct binder_work
*work
)
904 list_del_init(&work
->entry
);
908 * binder_dequeue_work() - Removes an item from the work list
909 * @proc: binder_proc associated with list
910 * @work: struct binder_work to remove from list
912 * Removes the specified work item from whatever list it is on.
913 * Can safely be called if work is not on any list.
916 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
918 binder_inner_proc_lock(proc
);
919 binder_dequeue_work_ilocked(work
);
920 binder_inner_proc_unlock(proc
);
923 static struct binder_work
*binder_dequeue_work_head_ilocked(
924 struct list_head
*list
)
926 struct binder_work
*w
;
928 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
930 list_del_init(&w
->entry
);
935 * binder_dequeue_work_head() - Dequeues the item at head of list
936 * @proc: binder_proc associated with list
937 * @list: list to dequeue head
939 * Removes the head of the list if there are items on the list
941 * Return: pointer dequeued binder_work, NULL if list was empty
943 static struct binder_work
*binder_dequeue_work_head(
944 struct binder_proc
*proc
,
945 struct list_head
*list
)
947 struct binder_work
*w
;
949 binder_inner_proc_lock(proc
);
950 w
= binder_dequeue_work_head_ilocked(list
);
951 binder_inner_proc_unlock(proc
);
956 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
957 static void binder_free_thread(struct binder_thread
*thread
);
958 static void binder_free_proc(struct binder_proc
*proc
);
959 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
961 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
963 unsigned long rlim_cur
;
967 mutex_lock(&proc
->files_lock
);
968 if (proc
->files
== NULL
) {
972 if (!lock_task_sighand(proc
->tsk
, &irqs
)) {
976 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
977 unlock_task_sighand(proc
->tsk
, &irqs
);
979 ret
= __alloc_fd(proc
->files
, 0, rlim_cur
, flags
);
981 mutex_unlock(&proc
->files_lock
);
986 * copied from fd_install
988 static void task_fd_install(
989 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
991 mutex_lock(&proc
->files_lock
);
993 __fd_install(proc
->files
, fd
, file
);
994 mutex_unlock(&proc
->files_lock
);
998 * copied from sys_close
1000 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
1004 mutex_lock(&proc
->files_lock
);
1005 if (proc
->files
== NULL
) {
1009 retval
= __close_fd(proc
->files
, fd
);
1010 /* can't restart close syscall because file table entry was cleared */
1011 if (unlikely(retval
== -ERESTARTSYS
||
1012 retval
== -ERESTARTNOINTR
||
1013 retval
== -ERESTARTNOHAND
||
1014 retval
== -ERESTART_RESTARTBLOCK
))
1017 mutex_unlock(&proc
->files_lock
);
1021 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
1024 return thread
->process_todo
||
1025 thread
->looper_need_return
||
1027 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
1030 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
1034 binder_inner_proc_lock(thread
->proc
);
1035 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
1036 binder_inner_proc_unlock(thread
->proc
);
1041 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
1043 return !thread
->transaction_stack
&&
1044 binder_worklist_empty_ilocked(&thread
->todo
) &&
1045 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
1046 BINDER_LOOPER_STATE_REGISTERED
));
1049 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
1053 struct binder_thread
*thread
;
1055 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
1056 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
1057 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
1058 binder_available_for_proc_work_ilocked(thread
)) {
1060 wake_up_interruptible_sync(&thread
->wait
);
1062 wake_up_interruptible(&thread
->wait
);
1068 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1069 * @proc: process to select a thread from
1071 * Note that calling this function moves the thread off the waiting_threads
1072 * list, so it can only be woken up by the caller of this function, or a
1073 * signal. Therefore, callers *should* always wake up the thread this function
1076 * Return: If there's a thread currently waiting for process work,
1077 * returns that thread. Otherwise returns NULL.
1079 static struct binder_thread
*
1080 binder_select_thread_ilocked(struct binder_proc
*proc
)
1082 struct binder_thread
*thread
;
1084 assert_spin_locked(&proc
->inner_lock
);
1085 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1086 struct binder_thread
,
1087 waiting_thread_node
);
1090 list_del_init(&thread
->waiting_thread_node
);
1096 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1097 * @proc: process to wake up a thread in
1098 * @thread: specific thread to wake-up (may be NULL)
1099 * @sync: whether to do a synchronous wake-up
1101 * This function wakes up a thread in the @proc process.
1102 * The caller may provide a specific thread to wake-up in
1103 * the @thread parameter. If @thread is NULL, this function
1104 * will wake up threads that have called poll().
1106 * Note that for this function to work as expected, callers
1107 * should first call binder_select_thread() to find a thread
1108 * to handle the work (if they don't have a thread already),
1109 * and pass the result into the @thread parameter.
1111 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1112 struct binder_thread
*thread
,
1115 assert_spin_locked(&proc
->inner_lock
);
1119 wake_up_interruptible_sync(&thread
->wait
);
1121 wake_up_interruptible(&thread
->wait
);
1125 /* Didn't find a thread waiting for proc work; this can happen
1127 * 1. All threads are busy handling transactions
1128 * In that case, one of those threads should call back into
1129 * the kernel driver soon and pick up this work.
1130 * 2. Threads are using the (e)poll interface, in which case
1131 * they may be blocked on the waitqueue without having been
1132 * added to waiting_threads. For this case, we just iterate
1133 * over all threads not handling transaction work, and
1134 * wake them all up. We wake all because we don't know whether
1135 * a thread that called into (e)poll is handling non-binder
1138 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1141 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1143 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1145 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1148 static bool is_rt_policy(int policy
)
1150 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1153 static bool is_fair_policy(int policy
)
1155 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1158 static bool binder_supported_policy(int policy
)
1160 return is_fair_policy(policy
) || is_rt_policy(policy
);
1163 static int to_userspace_prio(int policy
, int kernel_priority
)
1165 if (is_fair_policy(policy
))
1166 return PRIO_TO_NICE(kernel_priority
);
1168 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1171 static int to_kernel_prio(int policy
, int user_priority
)
1173 if (is_fair_policy(policy
))
1174 return NICE_TO_PRIO(user_priority
);
1176 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1179 static void binder_do_set_priority(struct task_struct
*task
,
1180 struct binder_priority desired
,
1183 int priority
; /* user-space prio value */
1185 unsigned int policy
= desired
.sched_policy
;
1187 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1190 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1192 priority
= to_userspace_prio(policy
, desired
.prio
);
1194 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1195 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1197 if (max_rtprio
== 0) {
1198 policy
= SCHED_NORMAL
;
1199 priority
= MIN_NICE
;
1200 } else if (priority
> max_rtprio
) {
1201 priority
= max_rtprio
;
1205 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1206 long min_nice
= (MAX_NICE
- task_rlimit(task
, RLIMIT_NICE
) + 1);
1208 if (min_nice
> MAX_NICE
) {
1209 binder_user_error("%d RLIMIT_NICE not set\n",
1212 } else if (priority
< min_nice
) {
1213 priority
= min_nice
;
1217 if (policy
!= desired
.sched_policy
||
1218 to_kernel_prio(policy
, priority
) != desired
.prio
)
1219 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1220 "%d: priority %d not allowed, using %d instead\n",
1221 task
->pid
, desired
.prio
,
1222 to_kernel_prio(policy
, priority
));
1224 trace_binder_set_priority(task
->tgid
, task
->pid
, task
->normal_prio
,
1225 to_kernel_prio(policy
, priority
),
1228 /* Set the actual priority */
1229 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1230 struct sched_param params
;
1232 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1234 sched_setscheduler_nocheck(task
,
1235 policy
| SCHED_RESET_ON_FORK
,
1238 if (is_fair_policy(policy
))
1239 set_user_nice(task
, priority
);
1242 static void binder_set_priority(struct task_struct
*task
,
1243 struct binder_priority desired
)
1245 binder_do_set_priority(task
, desired
, /* verify = */ true);
1248 static void binder_restore_priority(struct task_struct
*task
,
1249 struct binder_priority desired
)
1251 binder_do_set_priority(task
, desired
, /* verify = */ false);
1254 static void binder_transaction_priority(struct task_struct
*task
,
1255 struct binder_transaction
*t
,
1256 struct binder_priority node_prio
,
1259 struct binder_priority desired_prio
= t
->priority
;
1261 if (t
->set_priority_called
)
1264 t
->set_priority_called
= true;
1265 t
->saved_priority
.sched_policy
= task
->policy
;
1266 t
->saved_priority
.prio
= task
->normal_prio
;
1268 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1269 desired_prio
.prio
= NICE_TO_PRIO(0);
1270 desired_prio
.sched_policy
= SCHED_NORMAL
;
1273 if (node_prio
.prio
< t
->priority
.prio
||
1274 (node_prio
.prio
== t
->priority
.prio
&&
1275 node_prio
.sched_policy
== SCHED_FIFO
)) {
1277 * In case the minimum priority on the node is
1278 * higher (lower value), use that priority. If
1279 * the priority is the same, but the node uses
1280 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1281 * run unbounded, unlike SCHED_RR.
1283 desired_prio
= node_prio
;
1286 binder_set_priority(task
, desired_prio
);
1289 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1290 binder_uintptr_t ptr
)
1292 struct rb_node
*n
= proc
->nodes
.rb_node
;
1293 struct binder_node
*node
;
1295 assert_spin_locked(&proc
->inner_lock
);
1298 node
= rb_entry(n
, struct binder_node
, rb_node
);
1300 if (ptr
< node
->ptr
)
1302 else if (ptr
> node
->ptr
)
1306 * take an implicit weak reference
1307 * to ensure node stays alive until
1308 * call to binder_put_node()
1310 binder_inc_node_tmpref_ilocked(node
);
1317 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1318 binder_uintptr_t ptr
)
1320 struct binder_node
*node
;
1322 binder_inner_proc_lock(proc
);
1323 node
= binder_get_node_ilocked(proc
, ptr
);
1324 binder_inner_proc_unlock(proc
);
1328 static struct binder_node
*binder_init_node_ilocked(
1329 struct binder_proc
*proc
,
1330 struct binder_node
*new_node
,
1331 struct flat_binder_object
*fp
)
1333 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1334 struct rb_node
*parent
= NULL
;
1335 struct binder_node
*node
;
1336 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1337 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1338 __u32 flags
= fp
? fp
->flags
: 0;
1341 assert_spin_locked(&proc
->inner_lock
);
1346 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1348 if (ptr
< node
->ptr
)
1350 else if (ptr
> node
->ptr
)
1351 p
= &(*p
)->rb_right
;
1354 * A matching node is already in
1355 * the rb tree. Abandon the init
1358 binder_inc_node_tmpref_ilocked(node
);
1363 binder_stats_created(BINDER_STAT_NODE
);
1365 rb_link_node(&node
->rb_node
, parent
, p
);
1366 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1367 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1370 node
->cookie
= cookie
;
1371 node
->work
.type
= BINDER_WORK_NODE
;
1372 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1373 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_SCHED_POLICY_MASK
) >>
1374 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1375 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1376 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1377 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1378 node
->txn_security_ctx
= !!(flags
& FLAT_BINDER_FLAG_TXN_SECURITY_CTX
);
1379 spin_lock_init(&node
->lock
);
1380 INIT_LIST_HEAD(&node
->work
.entry
);
1381 INIT_LIST_HEAD(&node
->async_todo
);
1382 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1383 "%d:%d node %d u%016llx c%016llx created\n",
1384 proc
->pid
, current
->pid
, node
->debug_id
,
1385 (u64
)node
->ptr
, (u64
)node
->cookie
);
1390 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1391 struct flat_binder_object
*fp
)
1393 struct binder_node
*node
;
1394 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1398 binder_inner_proc_lock(proc
);
1399 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1400 binder_inner_proc_unlock(proc
);
1401 if (node
!= new_node
)
1403 * The node was already added by another thread
1410 static void binder_free_node(struct binder_node
*node
)
1413 binder_stats_deleted(BINDER_STAT_NODE
);
1416 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1418 struct list_head
*target_list
)
1420 struct binder_proc
*proc
= node
->proc
;
1422 assert_spin_locked(&node
->lock
);
1424 assert_spin_locked(&proc
->inner_lock
);
1427 if (target_list
== NULL
&&
1428 node
->internal_strong_refs
== 0 &&
1430 node
== node
->proc
->context
->
1431 binder_context_mgr_node
&&
1432 node
->has_strong_ref
)) {
1433 pr_err("invalid inc strong node for %d\n",
1437 node
->internal_strong_refs
++;
1439 node
->local_strong_refs
++;
1440 if (!node
->has_strong_ref
&& target_list
) {
1441 binder_dequeue_work_ilocked(&node
->work
);
1443 * Note: this function is the only place where we queue
1444 * directly to a thread->todo without using the
1445 * corresponding binder_enqueue_thread_work() helper
1446 * functions; in this case it's ok to not set the
1447 * process_todo flag, since we know this node work will
1448 * always be followed by other work that starts queue
1449 * processing: in case of synchronous transactions, a
1450 * BR_REPLY or BR_ERROR; in case of oneway
1451 * transactions, a BR_TRANSACTION_COMPLETE.
1453 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1457 node
->local_weak_refs
++;
1458 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1459 if (target_list
== NULL
) {
1460 pr_err("invalid inc weak node for %d\n",
1467 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1473 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1474 struct list_head
*target_list
)
1478 binder_node_inner_lock(node
);
1479 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1480 binder_node_inner_unlock(node
);
1485 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1486 int strong
, int internal
)
1488 struct binder_proc
*proc
= node
->proc
;
1490 assert_spin_locked(&node
->lock
);
1492 assert_spin_locked(&proc
->inner_lock
);
1495 node
->internal_strong_refs
--;
1497 node
->local_strong_refs
--;
1498 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1502 node
->local_weak_refs
--;
1503 if (node
->local_weak_refs
|| node
->tmp_refs
||
1504 !hlist_empty(&node
->refs
))
1508 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1509 if (list_empty(&node
->work
.entry
)) {
1510 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1511 binder_wakeup_proc_ilocked(proc
);
1514 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1515 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1517 binder_dequeue_work_ilocked(&node
->work
);
1518 rb_erase(&node
->rb_node
, &proc
->nodes
);
1519 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1520 "refless node %d deleted\n",
1523 BUG_ON(!list_empty(&node
->work
.entry
));
1524 spin_lock(&binder_dead_nodes_lock
);
1526 * tmp_refs could have changed so
1529 if (node
->tmp_refs
) {
1530 spin_unlock(&binder_dead_nodes_lock
);
1533 hlist_del(&node
->dead_node
);
1534 spin_unlock(&binder_dead_nodes_lock
);
1535 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1536 "dead node %d deleted\n",
1545 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1549 binder_node_inner_lock(node
);
1550 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1551 binder_node_inner_unlock(node
);
1553 binder_free_node(node
);
1556 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1559 * No call to binder_inc_node() is needed since we
1560 * don't need to inform userspace of any changes to
1567 * binder_inc_node_tmpref() - take a temporary reference on node
1568 * @node: node to reference
1570 * Take reference on node to prevent the node from being freed
1571 * while referenced only by a local variable. The inner lock is
1572 * needed to serialize with the node work on the queue (which
1573 * isn't needed after the node is dead). If the node is dead
1574 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1575 * node->tmp_refs against dead-node-only cases where the node
1576 * lock cannot be acquired (eg traversing the dead node list to
1579 static void binder_inc_node_tmpref(struct binder_node
*node
)
1581 binder_node_lock(node
);
1583 binder_inner_proc_lock(node
->proc
);
1585 spin_lock(&binder_dead_nodes_lock
);
1586 binder_inc_node_tmpref_ilocked(node
);
1588 binder_inner_proc_unlock(node
->proc
);
1590 spin_unlock(&binder_dead_nodes_lock
);
1591 binder_node_unlock(node
);
1595 * binder_dec_node_tmpref() - remove a temporary reference on node
1596 * @node: node to reference
1598 * Release temporary reference on node taken via binder_inc_node_tmpref()
1600 static void binder_dec_node_tmpref(struct binder_node
*node
)
1604 binder_node_inner_lock(node
);
1606 spin_lock(&binder_dead_nodes_lock
);
1608 BUG_ON(node
->tmp_refs
< 0);
1610 spin_unlock(&binder_dead_nodes_lock
);
1612 * Call binder_dec_node() to check if all refcounts are 0
1613 * and cleanup is needed. Calling with strong=0 and internal=1
1614 * causes no actual reference to be released in binder_dec_node().
1615 * If that changes, a change is needed here too.
1617 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1618 binder_node_inner_unlock(node
);
1620 binder_free_node(node
);
1623 static void binder_put_node(struct binder_node
*node
)
1625 binder_dec_node_tmpref(node
);
1628 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1629 u32 desc
, bool need_strong_ref
)
1631 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1632 struct binder_ref
*ref
;
1635 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1637 if (desc
< ref
->data
.desc
) {
1639 } else if (desc
> ref
->data
.desc
) {
1641 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1642 binder_user_error("tried to use weak ref as strong ref\n");
1652 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1653 * @proc: binder_proc that owns the ref
1654 * @node: binder_node of target
1655 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1657 * Look up the ref for the given node and return it if it exists
1659 * If it doesn't exist and the caller provides a newly allocated
1660 * ref, initialize the fields of the newly allocated ref and insert
1661 * into the given proc rb_trees and node refs list.
1663 * Return: the ref for node. It is possible that another thread
1664 * allocated/initialized the ref first in which case the
1665 * returned ref would be different than the passed-in
1666 * new_ref. new_ref must be kfree'd by the caller in
1669 static struct binder_ref
*binder_get_ref_for_node_olocked(
1670 struct binder_proc
*proc
,
1671 struct binder_node
*node
,
1672 struct binder_ref
*new_ref
)
1674 struct binder_context
*context
= proc
->context
;
1675 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1676 struct rb_node
*parent
= NULL
;
1677 struct binder_ref
*ref
;
1682 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1684 if (node
< ref
->node
)
1686 else if (node
> ref
->node
)
1687 p
= &(*p
)->rb_right
;
1694 binder_stats_created(BINDER_STAT_REF
);
1695 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1696 new_ref
->proc
= proc
;
1697 new_ref
->node
= node
;
1698 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1699 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1701 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1702 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1703 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1704 if (ref
->data
.desc
> new_ref
->data
.desc
)
1706 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1709 p
= &proc
->refs_by_desc
.rb_node
;
1712 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1714 if (new_ref
->data
.desc
< ref
->data
.desc
)
1716 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1717 p
= &(*p
)->rb_right
;
1721 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1722 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1724 binder_node_lock(node
);
1725 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1727 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1728 "%d new ref %d desc %d for node %d\n",
1729 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1731 binder_node_unlock(node
);
1735 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1737 bool delete_node
= false;
1739 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1740 "%d delete ref %d desc %d for node %d\n",
1741 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1742 ref
->node
->debug_id
);
1744 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1745 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1747 binder_node_inner_lock(ref
->node
);
1748 if (ref
->data
.strong
)
1749 binder_dec_node_nilocked(ref
->node
, 1, 1);
1751 hlist_del(&ref
->node_entry
);
1752 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1753 binder_node_inner_unlock(ref
->node
);
1755 * Clear ref->node unless we want the caller to free the node
1759 * The caller uses ref->node to determine
1760 * whether the node needs to be freed. Clear
1761 * it since the node is still alive.
1767 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1768 "%d delete ref %d desc %d has death notification\n",
1769 ref
->proc
->pid
, ref
->data
.debug_id
,
1771 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1772 binder_stats_deleted(BINDER_STAT_DEATH
);
1774 binder_stats_deleted(BINDER_STAT_REF
);
1778 * binder_inc_ref_olocked() - increment the ref for given handle
1779 * @ref: ref to be incremented
1780 * @strong: if true, strong increment, else weak
1781 * @target_list: list to queue node work on
1783 * Increment the ref. @ref->proc->outer_lock must be held on entry
1785 * Return: 0, if successful, else errno
1787 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1788 struct list_head
*target_list
)
1793 if (ref
->data
.strong
== 0) {
1794 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1800 if (ref
->data
.weak
== 0) {
1801 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1811 * binder_dec_ref() - dec the ref for given handle
1812 * @ref: ref to be decremented
1813 * @strong: if true, strong decrement, else weak
1815 * Decrement the ref.
1817 * Return: true if ref is cleaned up and ready to be freed
1819 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1822 if (ref
->data
.strong
== 0) {
1823 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1824 ref
->proc
->pid
, ref
->data
.debug_id
,
1825 ref
->data
.desc
, ref
->data
.strong
,
1830 if (ref
->data
.strong
== 0)
1831 binder_dec_node(ref
->node
, strong
, 1);
1833 if (ref
->data
.weak
== 0) {
1834 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1835 ref
->proc
->pid
, ref
->data
.debug_id
,
1836 ref
->data
.desc
, ref
->data
.strong
,
1842 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1843 binder_cleanup_ref_olocked(ref
);
1850 * binder_get_node_from_ref() - get the node from the given proc/desc
1851 * @proc: proc containing the ref
1852 * @desc: the handle associated with the ref
1853 * @need_strong_ref: if true, only return node if ref is strong
1854 * @rdata: the id/refcount data for the ref
1856 * Given a proc and ref handle, return the associated binder_node
1858 * Return: a binder_node or NULL if not found or not strong when strong required
1860 static struct binder_node
*binder_get_node_from_ref(
1861 struct binder_proc
*proc
,
1862 u32 desc
, bool need_strong_ref
,
1863 struct binder_ref_data
*rdata
)
1865 struct binder_node
*node
;
1866 struct binder_ref
*ref
;
1868 binder_proc_lock(proc
);
1869 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1874 * Take an implicit reference on the node to ensure
1875 * it stays alive until the call to binder_put_node()
1877 binder_inc_node_tmpref(node
);
1880 binder_proc_unlock(proc
);
1885 binder_proc_unlock(proc
);
1890 * binder_free_ref() - free the binder_ref
1893 * Free the binder_ref. Free the binder_node indicated by ref->node
1894 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1896 static void binder_free_ref(struct binder_ref
*ref
)
1899 binder_free_node(ref
->node
);
1905 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1906 * @proc: proc containing the ref
1907 * @desc: the handle associated with the ref
1908 * @increment: true=inc reference, false=dec reference
1909 * @strong: true=strong reference, false=weak reference
1910 * @rdata: the id/refcount data for the ref
1912 * Given a proc and ref handle, increment or decrement the ref
1913 * according to "increment" arg.
1915 * Return: 0 if successful, else errno
1917 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1918 uint32_t desc
, bool increment
, bool strong
,
1919 struct binder_ref_data
*rdata
)
1922 struct binder_ref
*ref
;
1923 bool delete_ref
= false;
1925 binder_proc_lock(proc
);
1926 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1932 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1934 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1938 binder_proc_unlock(proc
);
1941 binder_free_ref(ref
);
1945 binder_proc_unlock(proc
);
1950 * binder_dec_ref_for_handle() - dec the ref for given handle
1951 * @proc: proc containing the ref
1952 * @desc: the handle associated with the ref
1953 * @strong: true=strong reference, false=weak reference
1954 * @rdata: the id/refcount data for the ref
1956 * Just calls binder_update_ref_for_handle() to decrement the ref.
1958 * Return: 0 if successful, else errno
1960 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1961 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1963 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1968 * binder_inc_ref_for_node() - increment the ref for given proc/node
1969 * @proc: proc containing the ref
1970 * @node: target node
1971 * @strong: true=strong reference, false=weak reference
1972 * @target_list: worklist to use if node is incremented
1973 * @rdata: the id/refcount data for the ref
1975 * Given a proc and node, increment the ref. Create the ref if it
1976 * doesn't already exist
1978 * Return: 0 if successful, else errno
1980 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1981 struct binder_node
*node
,
1983 struct list_head
*target_list
,
1984 struct binder_ref_data
*rdata
)
1986 struct binder_ref
*ref
;
1987 struct binder_ref
*new_ref
= NULL
;
1990 binder_proc_lock(proc
);
1991 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1993 binder_proc_unlock(proc
);
1994 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1997 binder_proc_lock(proc
);
1998 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
2000 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
2002 binder_proc_unlock(proc
);
2003 if (new_ref
&& ref
!= new_ref
)
2005 * Another thread created the ref first so
2006 * free the one we allocated
2012 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
2013 struct binder_transaction
*t
)
2015 BUG_ON(!target_thread
);
2016 assert_spin_locked(&target_thread
->proc
->inner_lock
);
2017 BUG_ON(target_thread
->transaction_stack
!= t
);
2018 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
2019 target_thread
->transaction_stack
=
2020 target_thread
->transaction_stack
->from_parent
;
2025 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2026 * @thread: thread to decrement
2028 * A thread needs to be kept alive while being used to create or
2029 * handle a transaction. binder_get_txn_from() is used to safely
2030 * extract t->from from a binder_transaction and keep the thread
2031 * indicated by t->from from being freed. When done with that
2032 * binder_thread, this function is called to decrement the
2033 * tmp_ref and free if appropriate (thread has been released
2034 * and no transaction being processed by the driver)
2036 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
2039 * atomic is used to protect the counter value while
2040 * it cannot reach zero or thread->is_dead is false
2042 binder_inner_proc_lock(thread
->proc
);
2043 atomic_dec(&thread
->tmp_ref
);
2044 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
2045 binder_inner_proc_unlock(thread
->proc
);
2046 binder_free_thread(thread
);
2049 binder_inner_proc_unlock(thread
->proc
);
2053 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2054 * @proc: proc to decrement
2056 * A binder_proc needs to be kept alive while being used to create or
2057 * handle a transaction. proc->tmp_ref is incremented when
2058 * creating a new transaction or the binder_proc is currently in-use
2059 * by threads that are being released. When done with the binder_proc,
2060 * this function is called to decrement the counter and free the
2061 * proc if appropriate (proc has been released, all threads have
2062 * been released and not currenly in-use to process a transaction).
2064 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
2066 binder_inner_proc_lock(proc
);
2068 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
2070 binder_inner_proc_unlock(proc
);
2071 binder_free_proc(proc
);
2074 binder_inner_proc_unlock(proc
);
2078 * binder_get_txn_from() - safely extract the "from" thread in transaction
2079 * @t: binder transaction for t->from
2081 * Atomically return the "from" thread and increment the tmp_ref
2082 * count for the thread to ensure it stays alive until
2083 * binder_thread_dec_tmpref() is called.
2085 * Return: the value of t->from
2087 static struct binder_thread
*binder_get_txn_from(
2088 struct binder_transaction
*t
)
2090 struct binder_thread
*from
;
2092 spin_lock(&t
->lock
);
2095 atomic_inc(&from
->tmp_ref
);
2096 spin_unlock(&t
->lock
);
2101 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2102 * @t: binder transaction for t->from
2104 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2105 * to guarantee that the thread cannot be released while operating on it.
2106 * The caller must call binder_inner_proc_unlock() to release the inner lock
2107 * as well as call binder_dec_thread_txn() to release the reference.
2109 * Return: the value of t->from
2111 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2112 struct binder_transaction
*t
)
2114 struct binder_thread
*from
;
2116 from
= binder_get_txn_from(t
);
2119 binder_inner_proc_lock(from
->proc
);
2121 BUG_ON(from
!= t
->from
);
2124 binder_inner_proc_unlock(from
->proc
);
2125 binder_thread_dec_tmpref(from
);
2129 static void binder_free_transaction(struct binder_transaction
*t
)
2131 struct binder_proc
*target_proc
= t
->to_proc
;
2134 binder_inner_proc_lock(target_proc
);
2136 t
->buffer
->transaction
= NULL
;
2137 binder_inner_proc_unlock(target_proc
);
2140 * If the transaction has no target_proc, then
2141 * t->buffer->transaction has already been cleared.
2144 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2147 static void binder_send_failed_reply(struct binder_transaction
*t
,
2148 uint32_t error_code
)
2150 struct binder_thread
*target_thread
;
2151 struct binder_transaction
*next
;
2153 BUG_ON(t
->flags
& TF_ONE_WAY
);
2155 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2156 if (target_thread
) {
2157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2158 "send failed reply for transaction %d to %d:%d\n",
2160 target_thread
->proc
->pid
,
2161 target_thread
->pid
);
2163 binder_pop_transaction_ilocked(target_thread
, t
);
2164 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2165 target_thread
->reply_error
.cmd
= error_code
;
2166 binder_enqueue_thread_work_ilocked(
2168 &target_thread
->reply_error
.work
);
2169 wake_up_interruptible(&target_thread
->wait
);
2171 WARN(1, "Unexpected reply error: %u\n",
2172 target_thread
->reply_error
.cmd
);
2174 binder_inner_proc_unlock(target_thread
->proc
);
2175 binder_thread_dec_tmpref(target_thread
);
2176 binder_free_transaction(t
);
2179 next
= t
->from_parent
;
2181 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2182 "send failed reply for transaction %d, target dead\n",
2185 binder_free_transaction(t
);
2187 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2188 "reply failed, no target thread at root\n");
2192 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2193 "reply failed, no target thread -- retry %d\n",
2199 * binder_cleanup_transaction() - cleans up undelivered transaction
2200 * @t: transaction that needs to be cleaned up
2201 * @reason: reason the transaction wasn't delivered
2202 * @error_code: error to return to caller (if synchronous call)
2204 static void binder_cleanup_transaction(struct binder_transaction
*t
,
2206 uint32_t error_code
)
2208 if (t
->buffer
->target_node
&& !(t
->flags
& TF_ONE_WAY
)) {
2209 binder_send_failed_reply(t
, error_code
);
2211 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
2212 "undelivered transaction %d, %s\n",
2213 t
->debug_id
, reason
);
2214 binder_free_transaction(t
);
2219 * binder_validate_object() - checks for a valid metadata object in a buffer.
2220 * @buffer: binder_buffer that we're parsing.
2221 * @offset: offset in the buffer at which to validate an object.
2223 * Return: If there's a valid metadata object at @offset in @buffer, the
2224 * size of that object. Otherwise, it returns zero.
2226 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2228 /* Check if we can read a header first */
2229 struct binder_object_header
*hdr
;
2230 size_t object_size
= 0;
2232 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
2233 buffer
->data_size
< sizeof(*hdr
) ||
2234 !IS_ALIGNED(offset
, sizeof(u32
)))
2237 /* Ok, now see if we can read a complete object. */
2238 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2239 switch (hdr
->type
) {
2240 case BINDER_TYPE_BINDER
:
2241 case BINDER_TYPE_WEAK_BINDER
:
2242 case BINDER_TYPE_HANDLE
:
2243 case BINDER_TYPE_WEAK_HANDLE
:
2244 object_size
= sizeof(struct flat_binder_object
);
2246 case BINDER_TYPE_FD
:
2247 object_size
= sizeof(struct binder_fd_object
);
2249 case BINDER_TYPE_PTR
:
2250 object_size
= sizeof(struct binder_buffer_object
);
2252 case BINDER_TYPE_FDA
:
2253 object_size
= sizeof(struct binder_fd_array_object
);
2258 if (offset
<= buffer
->data_size
- object_size
&&
2259 buffer
->data_size
>= object_size
)
2266 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2267 * @b: binder_buffer containing the object
2268 * @index: index in offset array at which the binder_buffer_object is
2270 * @start: points to the start of the offset array
2271 * @num_valid: the number of valid offsets in the offset array
2273 * Return: If @index is within the valid range of the offset array
2274 * described by @start and @num_valid, and if there's a valid
2275 * binder_buffer_object at the offset found in index @index
2276 * of the offset array, that object is returned. Otherwise,
2277 * %NULL is returned.
2278 * Note that the offset found in index @index itself is not
2279 * verified; this function assumes that @num_valid elements
2280 * from @start were previously verified to have valid offsets.
2282 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2283 binder_size_t index
,
2284 binder_size_t
*start
,
2285 binder_size_t num_valid
)
2287 struct binder_buffer_object
*buffer_obj
;
2288 binder_size_t
*offp
;
2290 if (index
>= num_valid
)
2293 offp
= start
+ index
;
2294 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2295 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2302 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2303 * @b: transaction buffer
2304 * @objects_start start of objects buffer
2305 * @buffer: binder_buffer_object in which to fix up
2306 * @offset: start offset in @buffer to fix up
2307 * @last_obj: last binder_buffer_object that we fixed up in
2308 * @last_min_offset: minimum fixup offset in @last_obj
2310 * Return: %true if a fixup in buffer @buffer at offset @offset is
2313 * For safety reasons, we only allow fixups inside a buffer to happen
2314 * at increasing offsets; additionally, we only allow fixup on the last
2315 * buffer object that was verified, or one of its parents.
2317 * Example of what is allowed:
2320 * B (parent = A, offset = 0)
2321 * C (parent = A, offset = 16)
2322 * D (parent = C, offset = 0)
2323 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2325 * Examples of what is not allowed:
2327 * Decreasing offsets within the same parent:
2329 * C (parent = A, offset = 16)
2330 * B (parent = A, offset = 0) // decreasing offset within A
2332 * Referring to a parent that wasn't the last object or any of its parents:
2334 * B (parent = A, offset = 0)
2335 * C (parent = A, offset = 0)
2336 * C (parent = A, offset = 16)
2337 * D (parent = B, offset = 0) // B is not A or any of A's parents
2339 static bool binder_validate_fixup(struct binder_buffer
*b
,
2340 binder_size_t
*objects_start
,
2341 struct binder_buffer_object
*buffer
,
2342 binder_size_t fixup_offset
,
2343 struct binder_buffer_object
*last_obj
,
2344 binder_size_t last_min_offset
)
2347 /* Nothing to fix up in */
2351 while (last_obj
!= buffer
) {
2353 * Safe to retrieve the parent of last_obj, since it
2354 * was already previously verified by the driver.
2356 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2358 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2359 last_obj
= (struct binder_buffer_object
*)
2360 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2362 return (fixup_offset
>= last_min_offset
);
2365 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2366 struct binder_buffer
*buffer
,
2367 binder_size_t
*failed_at
)
2369 binder_size_t
*offp
, *off_start
, *off_end
;
2370 int debug_id
= buffer
->debug_id
;
2372 binder_debug(BINDER_DEBUG_TRANSACTION
,
2373 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2374 proc
->pid
, buffer
->debug_id
,
2375 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2377 if (buffer
->target_node
)
2378 binder_dec_node(buffer
->target_node
, 1, 0);
2380 off_start
= (binder_size_t
*)(buffer
->data
+
2381 ALIGN(buffer
->data_size
, sizeof(void *)));
2383 off_end
= failed_at
;
2385 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2386 for (offp
= off_start
; offp
< off_end
; offp
++) {
2387 struct binder_object_header
*hdr
;
2388 size_t object_size
= binder_validate_object(buffer
, *offp
);
2390 if (object_size
== 0) {
2391 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2392 debug_id
, (u64
)*offp
, buffer
->data_size
);
2395 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2396 switch (hdr
->type
) {
2397 case BINDER_TYPE_BINDER
:
2398 case BINDER_TYPE_WEAK_BINDER
: {
2399 struct flat_binder_object
*fp
;
2400 struct binder_node
*node
;
2402 fp
= to_flat_binder_object(hdr
);
2403 node
= binder_get_node(proc
, fp
->binder
);
2405 pr_err("transaction release %d bad node %016llx\n",
2406 debug_id
, (u64
)fp
->binder
);
2409 binder_debug(BINDER_DEBUG_TRANSACTION
,
2410 " node %d u%016llx\n",
2411 node
->debug_id
, (u64
)node
->ptr
);
2412 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2414 binder_put_node(node
);
2416 case BINDER_TYPE_HANDLE
:
2417 case BINDER_TYPE_WEAK_HANDLE
: {
2418 struct flat_binder_object
*fp
;
2419 struct binder_ref_data rdata
;
2422 fp
= to_flat_binder_object(hdr
);
2423 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2424 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2427 pr_err("transaction release %d bad handle %d, ret = %d\n",
2428 debug_id
, fp
->handle
, ret
);
2431 binder_debug(BINDER_DEBUG_TRANSACTION
,
2432 " ref %d desc %d\n",
2433 rdata
.debug_id
, rdata
.desc
);
2436 case BINDER_TYPE_FD
: {
2437 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2439 binder_debug(BINDER_DEBUG_TRANSACTION
,
2440 " fd %d\n", fp
->fd
);
2442 task_close_fd(proc
, fp
->fd
);
2444 case BINDER_TYPE_PTR
:
2446 * Nothing to do here, this will get cleaned up when the
2447 * transaction buffer gets freed
2450 case BINDER_TYPE_FDA
: {
2451 struct binder_fd_array_object
*fda
;
2452 struct binder_buffer_object
*parent
;
2453 uintptr_t parent_buffer
;
2456 binder_size_t fd_buf_size
;
2458 fda
= to_binder_fd_array_object(hdr
);
2459 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2463 pr_err("transaction release %d bad parent offset",
2468 * Since the parent was already fixed up, convert it
2469 * back to kernel address space to access it
2471 parent_buffer
= parent
->buffer
-
2472 binder_alloc_get_user_buffer_offset(
2475 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2476 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2477 pr_err("transaction release %d invalid number of fds (%lld)\n",
2478 debug_id
, (u64
)fda
->num_fds
);
2481 if (fd_buf_size
> parent
->length
||
2482 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2483 /* No space for all file descriptors here. */
2484 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2485 debug_id
, (u64
)fda
->num_fds
);
2488 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2489 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2490 task_close_fd(proc
, fd_array
[fd_index
]);
2493 pr_err("transaction release %d bad object type %x\n",
2494 debug_id
, hdr
->type
);
2500 static int binder_translate_binder(struct flat_binder_object
*fp
,
2501 struct binder_transaction
*t
,
2502 struct binder_thread
*thread
)
2504 struct binder_node
*node
;
2505 struct binder_proc
*proc
= thread
->proc
;
2506 struct binder_proc
*target_proc
= t
->to_proc
;
2507 struct binder_ref_data rdata
;
2510 node
= binder_get_node(proc
, fp
->binder
);
2512 node
= binder_new_node(proc
, fp
);
2516 if (fp
->cookie
!= node
->cookie
) {
2517 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2518 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2519 node
->debug_id
, (u64
)fp
->cookie
,
2524 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2529 ret
= binder_inc_ref_for_node(target_proc
, node
,
2530 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2531 &thread
->todo
, &rdata
);
2535 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2536 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2538 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2540 fp
->handle
= rdata
.desc
;
2543 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2544 binder_debug(BINDER_DEBUG_TRANSACTION
,
2545 " node %d u%016llx -> ref %d desc %d\n",
2546 node
->debug_id
, (u64
)node
->ptr
,
2547 rdata
.debug_id
, rdata
.desc
);
2549 binder_put_node(node
);
2553 static int binder_translate_handle(struct flat_binder_object
*fp
,
2554 struct binder_transaction
*t
,
2555 struct binder_thread
*thread
)
2557 struct binder_proc
*proc
= thread
->proc
;
2558 struct binder_proc
*target_proc
= t
->to_proc
;
2559 struct binder_node
*node
;
2560 struct binder_ref_data src_rdata
;
2563 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2564 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2566 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2567 proc
->pid
, thread
->pid
, fp
->handle
);
2570 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2575 binder_node_lock(node
);
2576 if (node
->proc
== target_proc
) {
2577 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2578 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2580 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2581 fp
->binder
= node
->ptr
;
2582 fp
->cookie
= node
->cookie
;
2584 binder_inner_proc_lock(node
->proc
);
2585 binder_inc_node_nilocked(node
,
2586 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2589 binder_inner_proc_unlock(node
->proc
);
2590 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2591 binder_debug(BINDER_DEBUG_TRANSACTION
,
2592 " ref %d desc %d -> node %d u%016llx\n",
2593 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2595 binder_node_unlock(node
);
2597 struct binder_ref_data dest_rdata
;
2599 binder_node_unlock(node
);
2600 ret
= binder_inc_ref_for_node(target_proc
, node
,
2601 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2607 fp
->handle
= dest_rdata
.desc
;
2609 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2611 binder_debug(BINDER_DEBUG_TRANSACTION
,
2612 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2613 src_rdata
.debug_id
, src_rdata
.desc
,
2614 dest_rdata
.debug_id
, dest_rdata
.desc
,
2618 binder_put_node(node
);
2622 static int binder_translate_fd(int fd
,
2623 struct binder_transaction
*t
,
2624 struct binder_thread
*thread
,
2625 struct binder_transaction
*in_reply_to
)
2627 struct binder_proc
*proc
= thread
->proc
;
2628 struct binder_proc
*target_proc
= t
->to_proc
;
2632 bool target_allows_fd
;
2635 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2637 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2638 if (!target_allows_fd
) {
2639 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2640 proc
->pid
, thread
->pid
,
2641 in_reply_to
? "reply" : "transaction",
2644 goto err_fd_not_accepted
;
2649 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2650 proc
->pid
, thread
->pid
, fd
);
2654 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2660 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2661 if (target_fd
< 0) {
2663 goto err_get_unused_fd
;
2665 task_fd_install(target_proc
, target_fd
, file
);
2666 trace_binder_transaction_fd(t
, fd
, target_fd
);
2667 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2676 err_fd_not_accepted
:
2680 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2681 struct binder_buffer_object
*parent
,
2682 struct binder_transaction
*t
,
2683 struct binder_thread
*thread
,
2684 struct binder_transaction
*in_reply_to
)
2686 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2688 uintptr_t parent_buffer
;
2690 struct binder_proc
*proc
= thread
->proc
;
2691 struct binder_proc
*target_proc
= t
->to_proc
;
2693 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2694 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2695 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2696 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2699 if (fd_buf_size
> parent
->length
||
2700 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2701 /* No space for all file descriptors here. */
2702 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2703 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2707 * Since the parent was already fixed up, convert it
2708 * back to the kernel address space to access it
2710 parent_buffer
= parent
->buffer
-
2711 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2712 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2713 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2714 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2715 proc
->pid
, thread
->pid
);
2718 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2719 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2722 goto err_translate_fd_failed
;
2723 fd_array
[fdi
] = target_fd
;
2727 err_translate_fd_failed
:
2729 * Failed to allocate fd or security error, free fds
2732 num_installed_fds
= fdi
;
2733 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2734 task_close_fd(target_proc
, fd_array
[fdi
]);
2738 static int binder_fixup_parent(struct binder_transaction
*t
,
2739 struct binder_thread
*thread
,
2740 struct binder_buffer_object
*bp
,
2741 binder_size_t
*off_start
,
2742 binder_size_t num_valid
,
2743 struct binder_buffer_object
*last_fixup_obj
,
2744 binder_size_t last_fixup_min_off
)
2746 struct binder_buffer_object
*parent
;
2748 struct binder_buffer
*b
= t
->buffer
;
2749 struct binder_proc
*proc
= thread
->proc
;
2750 struct binder_proc
*target_proc
= t
->to_proc
;
2752 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2755 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2757 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2758 proc
->pid
, thread
->pid
);
2762 if (!binder_validate_fixup(b
, off_start
,
2763 parent
, bp
->parent_offset
,
2765 last_fixup_min_off
)) {
2766 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2767 proc
->pid
, thread
->pid
);
2771 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2772 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2773 /* No space for a pointer here! */
2774 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2775 proc
->pid
, thread
->pid
);
2778 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2779 binder_alloc_get_user_buffer_offset(
2780 &target_proc
->alloc
));
2781 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2787 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2788 * @t: transaction to send
2789 * @proc: process to send the transaction to
2790 * @thread: thread in @proc to send the transaction to (may be NULL)
2792 * This function queues a transaction to the specified process. It will try
2793 * to find a thread in the target process to handle the transaction and
2794 * wake it up. If no thread is found, the work is queued to the proc
2797 * If the @thread parameter is not NULL, the transaction is always queued
2798 * to the waitlist of that specific thread.
2800 * Return: true if the transactions was successfully queued
2801 * false if the target process or thread is dead
2803 static bool binder_proc_transaction(struct binder_transaction
*t
,
2804 struct binder_proc
*proc
,
2805 struct binder_thread
*thread
)
2807 struct binder_node
*node
= t
->buffer
->target_node
;
2808 struct binder_priority node_prio
;
2809 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2810 bool pending_async
= false;
2813 binder_node_lock(node
);
2814 node_prio
.prio
= node
->min_priority
;
2815 node_prio
.sched_policy
= node
->sched_policy
;
2819 if (node
->has_async_transaction
) {
2820 pending_async
= true;
2822 node
->has_async_transaction
= 1;
2826 binder_inner_proc_lock(proc
);
2828 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2829 binder_inner_proc_unlock(proc
);
2830 binder_node_unlock(node
);
2834 if (!thread
&& !pending_async
)
2835 thread
= binder_select_thread_ilocked(proc
);
2838 binder_transaction_priority(thread
->task
, t
, node_prio
,
2840 binder_enqueue_thread_work_ilocked(thread
, &t
->work
);
2841 } else if (!pending_async
) {
2842 binder_enqueue_work_ilocked(&t
->work
, &proc
->todo
);
2844 binder_enqueue_work_ilocked(&t
->work
, &node
->async_todo
);
2848 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2850 binder_inner_proc_unlock(proc
);
2851 binder_node_unlock(node
);
2857 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2858 * @node: struct binder_node for which to get refs
2859 * @proc: returns @node->proc if valid
2860 * @error: if no @proc then returns BR_DEAD_REPLY
2862 * User-space normally keeps the node alive when creating a transaction
2863 * since it has a reference to the target. The local strong ref keeps it
2864 * alive if the sending process dies before the target process processes
2865 * the transaction. If the source process is malicious or has a reference
2866 * counting bug, relying on the local strong ref can fail.
2868 * Since user-space can cause the local strong ref to go away, we also take
2869 * a tmpref on the node to ensure it survives while we are constructing
2870 * the transaction. We also need a tmpref on the proc while we are
2871 * constructing the transaction, so we take that here as well.
2873 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2874 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2875 * target proc has died, @error is set to BR_DEAD_REPLY
2877 static struct binder_node
*binder_get_node_refs_for_txn(
2878 struct binder_node
*node
,
2879 struct binder_proc
**procp
,
2882 struct binder_node
*target_node
= NULL
;
2884 binder_node_inner_lock(node
);
2887 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2888 binder_inc_node_tmpref_ilocked(node
);
2889 node
->proc
->tmp_ref
++;
2890 *procp
= node
->proc
;
2892 *error
= BR_DEAD_REPLY
;
2893 binder_node_inner_unlock(node
);
2898 static void binder_transaction(struct binder_proc
*proc
,
2899 struct binder_thread
*thread
,
2900 struct binder_transaction_data
*tr
, int reply
,
2901 binder_size_t extra_buffers_size
)
2904 struct binder_transaction
*t
;
2905 struct binder_work
*tcomplete
;
2906 binder_size_t
*offp
, *off_end
, *off_start
;
2907 binder_size_t off_min
;
2908 u8
*sg_bufp
, *sg_buf_end
;
2909 struct binder_proc
*target_proc
= NULL
;
2910 struct binder_thread
*target_thread
= NULL
;
2911 struct binder_node
*target_node
= NULL
;
2912 struct binder_transaction
*in_reply_to
= NULL
;
2913 struct binder_transaction_log_entry
*e
;
2914 uint32_t return_error
= 0;
2915 uint32_t return_error_param
= 0;
2916 uint32_t return_error_line
= 0;
2917 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2918 binder_size_t last_fixup_min_off
= 0;
2919 struct binder_context
*context
= proc
->context
;
2920 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2921 char *secctx
= NULL
;
2924 e
= binder_transaction_log_add(&binder_transaction_log
);
2925 e
->debug_id
= t_debug_id
;
2926 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2927 e
->from_proc
= proc
->pid
;
2928 e
->from_thread
= thread
->pid
;
2929 e
->target_handle
= tr
->target
.handle
;
2930 e
->data_size
= tr
->data_size
;
2931 e
->offsets_size
= tr
->offsets_size
;
2932 e
->context_name
= proc
->context
->name
;
2935 binder_inner_proc_lock(proc
);
2936 in_reply_to
= thread
->transaction_stack
;
2937 if (in_reply_to
== NULL
) {
2938 binder_inner_proc_unlock(proc
);
2939 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2940 proc
->pid
, thread
->pid
);
2941 return_error
= BR_FAILED_REPLY
;
2942 return_error_param
= -EPROTO
;
2943 return_error_line
= __LINE__
;
2944 goto err_empty_call_stack
;
2946 if (in_reply_to
->to_thread
!= thread
) {
2947 spin_lock(&in_reply_to
->lock
);
2948 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2949 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2950 in_reply_to
->to_proc
?
2951 in_reply_to
->to_proc
->pid
: 0,
2952 in_reply_to
->to_thread
?
2953 in_reply_to
->to_thread
->pid
: 0);
2954 spin_unlock(&in_reply_to
->lock
);
2955 binder_inner_proc_unlock(proc
);
2956 return_error
= BR_FAILED_REPLY
;
2957 return_error_param
= -EPROTO
;
2958 return_error_line
= __LINE__
;
2960 goto err_bad_call_stack
;
2962 thread
->transaction_stack
= in_reply_to
->to_parent
;
2963 binder_inner_proc_unlock(proc
);
2964 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2965 if (target_thread
== NULL
) {
2966 return_error
= BR_DEAD_REPLY
;
2967 return_error_line
= __LINE__
;
2968 goto err_dead_binder
;
2970 if (target_thread
->transaction_stack
!= in_reply_to
) {
2971 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2972 proc
->pid
, thread
->pid
,
2973 target_thread
->transaction_stack
?
2974 target_thread
->transaction_stack
->debug_id
: 0,
2975 in_reply_to
->debug_id
);
2976 binder_inner_proc_unlock(target_thread
->proc
);
2977 return_error
= BR_FAILED_REPLY
;
2978 return_error_param
= -EPROTO
;
2979 return_error_line
= __LINE__
;
2981 target_thread
= NULL
;
2982 goto err_dead_binder
;
2984 target_proc
= target_thread
->proc
;
2985 target_proc
->tmp_ref
++;
2986 binder_inner_proc_unlock(target_thread
->proc
);
2988 if (tr
->target
.handle
) {
2989 struct binder_ref
*ref
;
2992 * There must already be a strong ref
2993 * on this node. If so, do a strong
2994 * increment on the node to ensure it
2995 * stays alive until the transaction is
2998 binder_proc_lock(proc
);
2999 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
3002 target_node
= binder_get_node_refs_for_txn(
3003 ref
->node
, &target_proc
,
3006 binder_user_error("%d:%d got transaction to invalid handle\n",
3007 proc
->pid
, thread
->pid
);
3008 return_error
= BR_FAILED_REPLY
;
3010 binder_proc_unlock(proc
);
3012 mutex_lock(&context
->context_mgr_node_lock
);
3013 target_node
= context
->binder_context_mgr_node
;
3015 target_node
= binder_get_node_refs_for_txn(
3016 target_node
, &target_proc
,
3019 return_error
= BR_DEAD_REPLY
;
3020 mutex_unlock(&context
->context_mgr_node_lock
);
3021 if (target_node
&& target_proc
== proc
) {
3022 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3023 proc
->pid
, thread
->pid
);
3024 return_error
= BR_FAILED_REPLY
;
3025 return_error_param
= -EINVAL
;
3026 return_error_line
= __LINE__
;
3027 goto err_invalid_target_handle
;
3032 * return_error is set above
3034 return_error_param
= -EINVAL
;
3035 return_error_line
= __LINE__
;
3036 goto err_dead_binder
;
3038 e
->to_node
= target_node
->debug_id
;
3039 if (security_binder_transaction(proc
->tsk
,
3040 target_proc
->tsk
) < 0) {
3041 return_error
= BR_FAILED_REPLY
;
3042 return_error_param
= -EPERM
;
3043 return_error_line
= __LINE__
;
3044 goto err_invalid_target_handle
;
3046 binder_inner_proc_lock(proc
);
3047 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
3048 struct binder_transaction
*tmp
;
3050 tmp
= thread
->transaction_stack
;
3051 if (tmp
->to_thread
!= thread
) {
3052 spin_lock(&tmp
->lock
);
3053 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3054 proc
->pid
, thread
->pid
, tmp
->debug_id
,
3055 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
3057 tmp
->to_thread
->pid
: 0);
3058 spin_unlock(&tmp
->lock
);
3059 binder_inner_proc_unlock(proc
);
3060 return_error
= BR_FAILED_REPLY
;
3061 return_error_param
= -EPROTO
;
3062 return_error_line
= __LINE__
;
3063 goto err_bad_call_stack
;
3066 struct binder_thread
*from
;
3068 spin_lock(&tmp
->lock
);
3070 if (from
&& from
->proc
== target_proc
) {
3071 atomic_inc(&from
->tmp_ref
);
3072 target_thread
= from
;
3073 spin_unlock(&tmp
->lock
);
3076 spin_unlock(&tmp
->lock
);
3077 tmp
= tmp
->from_parent
;
3080 binder_inner_proc_unlock(proc
);
3083 e
->to_thread
= target_thread
->pid
;
3084 e
->to_proc
= target_proc
->pid
;
3086 /* TODO: reuse incoming transaction for reply */
3087 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
3089 return_error
= BR_FAILED_REPLY
;
3090 return_error_param
= -ENOMEM
;
3091 return_error_line
= __LINE__
;
3092 goto err_alloc_t_failed
;
3094 binder_stats_created(BINDER_STAT_TRANSACTION
);
3095 spin_lock_init(&t
->lock
);
3097 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
3098 if (tcomplete
== NULL
) {
3099 return_error
= BR_FAILED_REPLY
;
3100 return_error_param
= -ENOMEM
;
3101 return_error_line
= __LINE__
;
3102 goto err_alloc_tcomplete_failed
;
3104 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
3106 t
->debug_id
= t_debug_id
;
3109 binder_debug(BINDER_DEBUG_TRANSACTION
,
3110 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3111 proc
->pid
, thread
->pid
, t
->debug_id
,
3112 target_proc
->pid
, target_thread
->pid
,
3113 (u64
)tr
->data
.ptr
.buffer
,
3114 (u64
)tr
->data
.ptr
.offsets
,
3115 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3116 (u64
)extra_buffers_size
);
3118 binder_debug(BINDER_DEBUG_TRANSACTION
,
3119 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3120 proc
->pid
, thread
->pid
, t
->debug_id
,
3121 target_proc
->pid
, target_node
->debug_id
,
3122 (u64
)tr
->data
.ptr
.buffer
,
3123 (u64
)tr
->data
.ptr
.offsets
,
3124 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3125 (u64
)extra_buffers_size
);
3127 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3131 t
->sender_euid
= task_euid(proc
->tsk
);
3132 t
->to_proc
= target_proc
;
3133 t
->to_thread
= target_thread
;
3135 t
->flags
= tr
->flags
;
3136 if (!(t
->flags
& TF_ONE_WAY
) &&
3137 binder_supported_policy(current
->policy
)) {
3138 /* Inherit supported policies for synchronous transactions */
3139 t
->priority
.sched_policy
= current
->policy
;
3140 t
->priority
.prio
= current
->normal_prio
;
3142 /* Otherwise, fall back to the default priority */
3143 t
->priority
= target_proc
->default_priority
;
3146 if (target_node
&& target_node
->txn_security_ctx
) {
3150 security_task_getsecid(proc
->tsk
, &secid
);
3151 ret
= security_secid_to_secctx(secid
, &secctx
, &secctx_sz
);
3153 return_error
= BR_FAILED_REPLY
;
3154 return_error_param
= ret
;
3155 return_error_line
= __LINE__
;
3156 goto err_get_secctx_failed
;
3158 added_size
= ALIGN(secctx_sz
, sizeof(u64
));
3159 extra_buffers_size
+= added_size
;
3160 if (extra_buffers_size
< added_size
) {
3161 /* integer overflow of extra_buffers_size */
3162 return_error
= BR_FAILED_REPLY
;
3163 return_error_param
= EINVAL
;
3164 return_error_line
= __LINE__
;
3165 goto err_bad_extra_size
;
3169 trace_binder_transaction(reply
, t
, target_node
);
3171 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3172 tr
->offsets_size
, extra_buffers_size
,
3173 !reply
&& (t
->flags
& TF_ONE_WAY
));
3174 if (IS_ERR(t
->buffer
)) {
3176 * -ESRCH indicates VMA cleared. The target is dying.
3178 return_error_param
= PTR_ERR(t
->buffer
);
3179 return_error
= return_error_param
== -ESRCH
?
3180 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3181 return_error_line
= __LINE__
;
3183 goto err_binder_alloc_buf_failed
;
3186 size_t buf_offset
= ALIGN(tr
->data_size
, sizeof(void *)) +
3187 ALIGN(tr
->offsets_size
, sizeof(void *)) +
3188 ALIGN(extra_buffers_size
, sizeof(void *)) -
3189 ALIGN(secctx_sz
, sizeof(u64
));
3190 char *kptr
= t
->buffer
->data
+ buf_offset
;
3192 t
->security_ctx
= (uintptr_t)kptr
+
3193 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
3194 memcpy(kptr
, secctx
, secctx_sz
);
3195 security_release_secctx(secctx
, secctx_sz
);
3198 t
->buffer
->debug_id
= t
->debug_id
;
3199 t
->buffer
->transaction
= t
;
3200 t
->buffer
->target_node
= target_node
;
3201 trace_binder_transaction_alloc_buf(t
->buffer
);
3202 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3203 ALIGN(tr
->data_size
, sizeof(void *)));
3206 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3207 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3208 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3209 proc
->pid
, thread
->pid
);
3210 return_error
= BR_FAILED_REPLY
;
3211 return_error_param
= -EFAULT
;
3212 return_error_line
= __LINE__
;
3213 goto err_copy_data_failed
;
3215 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3216 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3217 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3218 proc
->pid
, thread
->pid
);
3219 return_error
= BR_FAILED_REPLY
;
3220 return_error_param
= -EFAULT
;
3221 return_error_line
= __LINE__
;
3222 goto err_copy_data_failed
;
3224 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3225 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3226 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3227 return_error
= BR_FAILED_REPLY
;
3228 return_error_param
= -EINVAL
;
3229 return_error_line
= __LINE__
;
3230 goto err_bad_offset
;
3232 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3233 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3234 proc
->pid
, thread
->pid
,
3235 extra_buffers_size
);
3236 return_error
= BR_FAILED_REPLY
;
3237 return_error_param
= -EINVAL
;
3238 return_error_line
= __LINE__
;
3239 goto err_bad_offset
;
3241 off_end
= (void *)off_start
+ tr
->offsets_size
;
3242 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3243 sg_buf_end
= sg_bufp
+ extra_buffers_size
-
3244 ALIGN(secctx_sz
, sizeof(u64
));
3246 for (; offp
< off_end
; offp
++) {
3247 struct binder_object_header
*hdr
;
3248 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3250 if (object_size
== 0 || *offp
< off_min
) {
3251 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3252 proc
->pid
, thread
->pid
, (u64
)*offp
,
3254 (u64
)t
->buffer
->data_size
);
3255 return_error
= BR_FAILED_REPLY
;
3256 return_error_param
= -EINVAL
;
3257 return_error_line
= __LINE__
;
3258 goto err_bad_offset
;
3261 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3262 off_min
= *offp
+ object_size
;
3263 switch (hdr
->type
) {
3264 case BINDER_TYPE_BINDER
:
3265 case BINDER_TYPE_WEAK_BINDER
: {
3266 struct flat_binder_object
*fp
;
3268 fp
= to_flat_binder_object(hdr
);
3269 ret
= binder_translate_binder(fp
, t
, thread
);
3271 return_error
= BR_FAILED_REPLY
;
3272 return_error_param
= ret
;
3273 return_error_line
= __LINE__
;
3274 goto err_translate_failed
;
3277 case BINDER_TYPE_HANDLE
:
3278 case BINDER_TYPE_WEAK_HANDLE
: {
3279 struct flat_binder_object
*fp
;
3281 fp
= to_flat_binder_object(hdr
);
3282 ret
= binder_translate_handle(fp
, t
, thread
);
3284 return_error
= BR_FAILED_REPLY
;
3285 return_error_param
= ret
;
3286 return_error_line
= __LINE__
;
3287 goto err_translate_failed
;
3291 case BINDER_TYPE_FD
: {
3292 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3293 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3296 if (target_fd
< 0) {
3297 return_error
= BR_FAILED_REPLY
;
3298 return_error_param
= target_fd
;
3299 return_error_line
= __LINE__
;
3300 goto err_translate_failed
;
3305 case BINDER_TYPE_FDA
: {
3306 struct binder_fd_array_object
*fda
=
3307 to_binder_fd_array_object(hdr
);
3308 struct binder_buffer_object
*parent
=
3309 binder_validate_ptr(t
->buffer
, fda
->parent
,
3313 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3314 proc
->pid
, thread
->pid
);
3315 return_error
= BR_FAILED_REPLY
;
3316 return_error_param
= -EINVAL
;
3317 return_error_line
= __LINE__
;
3318 goto err_bad_parent
;
3320 if (!binder_validate_fixup(t
->buffer
, off_start
,
3321 parent
, fda
->parent_offset
,
3323 last_fixup_min_off
)) {
3324 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3325 proc
->pid
, thread
->pid
);
3326 return_error
= BR_FAILED_REPLY
;
3327 return_error_param
= -EINVAL
;
3328 return_error_line
= __LINE__
;
3329 goto err_bad_parent
;
3331 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3334 return_error
= BR_FAILED_REPLY
;
3335 return_error_param
= ret
;
3336 return_error_line
= __LINE__
;
3337 goto err_translate_failed
;
3339 last_fixup_obj
= parent
;
3340 last_fixup_min_off
=
3341 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3343 case BINDER_TYPE_PTR
: {
3344 struct binder_buffer_object
*bp
=
3345 to_binder_buffer_object(hdr
);
3346 size_t buf_left
= sg_buf_end
- sg_bufp
;
3348 if (bp
->length
> buf_left
) {
3349 binder_user_error("%d:%d got transaction with too large buffer\n",
3350 proc
->pid
, thread
->pid
);
3351 return_error
= BR_FAILED_REPLY
;
3352 return_error_param
= -EINVAL
;
3353 return_error_line
= __LINE__
;
3354 goto err_bad_offset
;
3356 if (copy_from_user(sg_bufp
,
3357 (const void __user
*)(uintptr_t)
3358 bp
->buffer
, bp
->length
)) {
3359 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3360 proc
->pid
, thread
->pid
);
3361 return_error_param
= -EFAULT
;
3362 return_error
= BR_FAILED_REPLY
;
3363 return_error_line
= __LINE__
;
3364 goto err_copy_data_failed
;
3366 /* Fixup buffer pointer to target proc address space */
3367 bp
->buffer
= (uintptr_t)sg_bufp
+
3368 binder_alloc_get_user_buffer_offset(
3369 &target_proc
->alloc
);
3370 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3372 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3375 last_fixup_min_off
);
3377 return_error
= BR_FAILED_REPLY
;
3378 return_error_param
= ret
;
3379 return_error_line
= __LINE__
;
3380 goto err_translate_failed
;
3382 last_fixup_obj
= bp
;
3383 last_fixup_min_off
= 0;
3386 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3387 proc
->pid
, thread
->pid
, hdr
->type
);
3388 return_error
= BR_FAILED_REPLY
;
3389 return_error_param
= -EINVAL
;
3390 return_error_line
= __LINE__
;
3391 goto err_bad_object_type
;
3394 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3395 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3398 binder_enqueue_thread_work(thread
, tcomplete
);
3399 binder_inner_proc_lock(target_proc
);
3400 if (target_thread
->is_dead
) {
3401 binder_inner_proc_unlock(target_proc
);
3402 goto err_dead_proc_or_thread
;
3404 BUG_ON(t
->buffer
->async_transaction
!= 0);
3405 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3406 binder_enqueue_thread_work_ilocked(target_thread
, &t
->work
);
3407 binder_inner_proc_unlock(target_proc
);
3408 wake_up_interruptible_sync(&target_thread
->wait
);
3409 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3410 binder_free_transaction(in_reply_to
);
3411 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3412 BUG_ON(t
->buffer
->async_transaction
!= 0);
3413 binder_inner_proc_lock(proc
);
3415 * Defer the TRANSACTION_COMPLETE, so we don't return to
3416 * userspace immediately; this allows the target process to
3417 * immediately start processing this transaction, reducing
3418 * latency. We will then return the TRANSACTION_COMPLETE when
3419 * the target replies (or there is an error).
3421 binder_enqueue_deferred_thread_work_ilocked(thread
, tcomplete
);
3423 t
->from_parent
= thread
->transaction_stack
;
3424 thread
->transaction_stack
= t
;
3425 binder_inner_proc_unlock(proc
);
3426 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3427 binder_inner_proc_lock(proc
);
3428 binder_pop_transaction_ilocked(thread
, t
);
3429 binder_inner_proc_unlock(proc
);
3430 goto err_dead_proc_or_thread
;
3433 BUG_ON(target_node
== NULL
);
3434 BUG_ON(t
->buffer
->async_transaction
!= 1);
3435 binder_enqueue_thread_work(thread
, tcomplete
);
3436 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3437 goto err_dead_proc_or_thread
;
3440 binder_thread_dec_tmpref(target_thread
);
3441 binder_proc_dec_tmpref(target_proc
);
3443 binder_dec_node_tmpref(target_node
);
3445 * write barrier to synchronize with initialization
3449 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3452 err_dead_proc_or_thread
:
3453 return_error
= BR_DEAD_REPLY
;
3454 return_error_line
= __LINE__
;
3455 binder_dequeue_work(proc
, tcomplete
);
3456 err_translate_failed
:
3457 err_bad_object_type
:
3460 err_copy_data_failed
:
3461 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3462 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3464 binder_dec_node_tmpref(target_node
);
3466 t
->buffer
->transaction
= NULL
;
3467 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3468 err_binder_alloc_buf_failed
:
3471 security_release_secctx(secctx
, secctx_sz
);
3472 err_get_secctx_failed
:
3474 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3475 err_alloc_tcomplete_failed
:
3477 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3480 err_empty_call_stack
:
3482 err_invalid_target_handle
:
3484 binder_thread_dec_tmpref(target_thread
);
3486 binder_proc_dec_tmpref(target_proc
);
3488 binder_dec_node(target_node
, 1, 0);
3489 binder_dec_node_tmpref(target_node
);
3492 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3493 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3494 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3495 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3499 struct binder_transaction_log_entry
*fe
;
3501 e
->return_error
= return_error
;
3502 e
->return_error_param
= return_error_param
;
3503 e
->return_error_line
= return_error_line
;
3504 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3507 * write barrier to synchronize with initialization
3511 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3512 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3515 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3517 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3518 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3519 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3520 binder_send_failed_reply(in_reply_to
, return_error
);
3522 thread
->return_error
.cmd
= return_error
;
3523 binder_enqueue_thread_work(thread
, &thread
->return_error
.work
);
3527 int binder_thread_write(struct binder_proc
*proc
,
3528 struct binder_thread
*thread
,
3529 binder_uintptr_t binder_buffer
, size_t size
,
3530 binder_size_t
*consumed
)
3533 struct binder_context
*context
= proc
->context
;
3534 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3535 void __user
*ptr
= buffer
+ *consumed
;
3536 void __user
*end
= buffer
+ size
;
3538 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3541 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3543 ptr
+= sizeof(uint32_t);
3544 trace_binder_command(cmd
);
3545 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3546 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3547 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3548 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3556 const char *debug_string
;
3557 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3558 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3559 struct binder_ref_data rdata
;
3561 if (get_user(target
, (uint32_t __user
*)ptr
))
3564 ptr
+= sizeof(uint32_t);
3566 if (increment
&& !target
) {
3567 struct binder_node
*ctx_mgr_node
;
3568 mutex_lock(&context
->context_mgr_node_lock
);
3569 ctx_mgr_node
= context
->binder_context_mgr_node
;
3571 ret
= binder_inc_ref_for_node(
3573 strong
, NULL
, &rdata
);
3574 mutex_unlock(&context
->context_mgr_node_lock
);
3577 ret
= binder_update_ref_for_handle(
3578 proc
, target
, increment
, strong
,
3580 if (!ret
&& rdata
.desc
!= target
) {
3581 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3582 proc
->pid
, thread
->pid
,
3583 target
, rdata
.desc
);
3587 debug_string
= "IncRefs";
3590 debug_string
= "Acquire";
3593 debug_string
= "Release";
3597 debug_string
= "DecRefs";
3601 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3602 proc
->pid
, thread
->pid
, debug_string
,
3603 strong
, target
, ret
);
3606 binder_debug(BINDER_DEBUG_USER_REFS
,
3607 "%d:%d %s ref %d desc %d s %d w %d\n",
3608 proc
->pid
, thread
->pid
, debug_string
,
3609 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3613 case BC_INCREFS_DONE
:
3614 case BC_ACQUIRE_DONE
: {
3615 binder_uintptr_t node_ptr
;
3616 binder_uintptr_t cookie
;
3617 struct binder_node
*node
;
3620 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3622 ptr
+= sizeof(binder_uintptr_t
);
3623 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3625 ptr
+= sizeof(binder_uintptr_t
);
3626 node
= binder_get_node(proc
, node_ptr
);
3628 binder_user_error("%d:%d %s u%016llx no match\n",
3629 proc
->pid
, thread
->pid
,
3630 cmd
== BC_INCREFS_DONE
?
3636 if (cookie
!= node
->cookie
) {
3637 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3638 proc
->pid
, thread
->pid
,
3639 cmd
== BC_INCREFS_DONE
?
3640 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3641 (u64
)node_ptr
, node
->debug_id
,
3642 (u64
)cookie
, (u64
)node
->cookie
);
3643 binder_put_node(node
);
3646 binder_node_inner_lock(node
);
3647 if (cmd
== BC_ACQUIRE_DONE
) {
3648 if (node
->pending_strong_ref
== 0) {
3649 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3650 proc
->pid
, thread
->pid
,
3652 binder_node_inner_unlock(node
);
3653 binder_put_node(node
);
3656 node
->pending_strong_ref
= 0;
3658 if (node
->pending_weak_ref
== 0) {
3659 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3660 proc
->pid
, thread
->pid
,
3662 binder_node_inner_unlock(node
);
3663 binder_put_node(node
);
3666 node
->pending_weak_ref
= 0;
3668 free_node
= binder_dec_node_nilocked(node
,
3669 cmd
== BC_ACQUIRE_DONE
, 0);
3671 binder_debug(BINDER_DEBUG_USER_REFS
,
3672 "%d:%d %s node %d ls %d lw %d tr %d\n",
3673 proc
->pid
, thread
->pid
,
3674 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3675 node
->debug_id
, node
->local_strong_refs
,
3676 node
->local_weak_refs
, node
->tmp_refs
);
3677 binder_node_inner_unlock(node
);
3678 binder_put_node(node
);
3681 case BC_ATTEMPT_ACQUIRE
:
3682 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3684 case BC_ACQUIRE_RESULT
:
3685 pr_err("BC_ACQUIRE_RESULT not supported\n");
3688 case BC_FREE_BUFFER
: {
3689 binder_uintptr_t data_ptr
;
3690 struct binder_buffer
*buffer
;
3692 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3694 ptr
+= sizeof(binder_uintptr_t
);
3696 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3698 if (IS_ERR_OR_NULL(buffer
)) {
3699 if (PTR_ERR(buffer
) == -EPERM
) {
3701 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3702 proc
->pid
, thread
->pid
,
3706 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3707 proc
->pid
, thread
->pid
,
3712 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3713 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3714 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3716 buffer
->transaction
? "active" : "finished");
3718 binder_inner_proc_lock(proc
);
3719 if (buffer
->transaction
) {
3720 buffer
->transaction
->buffer
= NULL
;
3721 buffer
->transaction
= NULL
;
3723 binder_inner_proc_unlock(proc
);
3724 if (buffer
->async_transaction
&& buffer
->target_node
) {
3725 struct binder_node
*buf_node
;
3726 struct binder_work
*w
;
3728 buf_node
= buffer
->target_node
;
3729 binder_node_inner_lock(buf_node
);
3730 BUG_ON(!buf_node
->has_async_transaction
);
3731 BUG_ON(buf_node
->proc
!= proc
);
3732 w
= binder_dequeue_work_head_ilocked(
3733 &buf_node
->async_todo
);
3735 buf_node
->has_async_transaction
= 0;
3737 binder_enqueue_work_ilocked(
3739 binder_wakeup_proc_ilocked(proc
);
3741 binder_node_inner_unlock(buf_node
);
3743 trace_binder_transaction_buffer_release(buffer
);
3744 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3745 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3749 case BC_TRANSACTION_SG
:
3751 struct binder_transaction_data_sg tr
;
3753 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3756 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3757 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3760 case BC_TRANSACTION
:
3762 struct binder_transaction_data tr
;
3764 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3767 binder_transaction(proc
, thread
, &tr
,
3768 cmd
== BC_REPLY
, 0);
3772 case BC_REGISTER_LOOPER
:
3773 binder_debug(BINDER_DEBUG_THREADS
,
3774 "%d:%d BC_REGISTER_LOOPER\n",
3775 proc
->pid
, thread
->pid
);
3776 binder_inner_proc_lock(proc
);
3777 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3778 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3779 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3780 proc
->pid
, thread
->pid
);
3781 } else if (proc
->requested_threads
== 0) {
3782 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3783 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3784 proc
->pid
, thread
->pid
);
3786 proc
->requested_threads
--;
3787 proc
->requested_threads_started
++;
3789 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3790 binder_inner_proc_unlock(proc
);
3792 case BC_ENTER_LOOPER
:
3793 binder_debug(BINDER_DEBUG_THREADS
,
3794 "%d:%d BC_ENTER_LOOPER\n",
3795 proc
->pid
, thread
->pid
);
3796 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3797 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3798 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3799 proc
->pid
, thread
->pid
);
3801 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3803 case BC_EXIT_LOOPER
:
3804 binder_debug(BINDER_DEBUG_THREADS
,
3805 "%d:%d BC_EXIT_LOOPER\n",
3806 proc
->pid
, thread
->pid
);
3807 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3810 case BC_REQUEST_DEATH_NOTIFICATION
:
3811 case BC_CLEAR_DEATH_NOTIFICATION
: {
3813 binder_uintptr_t cookie
;
3814 struct binder_ref
*ref
;
3815 struct binder_ref_death
*death
= NULL
;
3817 if (get_user(target
, (uint32_t __user
*)ptr
))
3819 ptr
+= sizeof(uint32_t);
3820 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3822 ptr
+= sizeof(binder_uintptr_t
);
3823 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3825 * Allocate memory for death notification
3826 * before taking lock
3828 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3829 if (death
== NULL
) {
3830 WARN_ON(thread
->return_error
.cmd
!=
3832 thread
->return_error
.cmd
= BR_ERROR
;
3833 binder_enqueue_thread_work(
3835 &thread
->return_error
.work
);
3837 BINDER_DEBUG_FAILED_TRANSACTION
,
3838 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3839 proc
->pid
, thread
->pid
);
3843 binder_proc_lock(proc
);
3844 ref
= binder_get_ref_olocked(proc
, target
, false);
3846 binder_user_error("%d:%d %s invalid ref %d\n",
3847 proc
->pid
, thread
->pid
,
3848 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3849 "BC_REQUEST_DEATH_NOTIFICATION" :
3850 "BC_CLEAR_DEATH_NOTIFICATION",
3852 binder_proc_unlock(proc
);
3857 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3858 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3859 proc
->pid
, thread
->pid
,
3860 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3861 "BC_REQUEST_DEATH_NOTIFICATION" :
3862 "BC_CLEAR_DEATH_NOTIFICATION",
3863 (u64
)cookie
, ref
->data
.debug_id
,
3864 ref
->data
.desc
, ref
->data
.strong
,
3865 ref
->data
.weak
, ref
->node
->debug_id
);
3867 binder_node_lock(ref
->node
);
3868 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3870 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3871 proc
->pid
, thread
->pid
);
3872 binder_node_unlock(ref
->node
);
3873 binder_proc_unlock(proc
);
3877 binder_stats_created(BINDER_STAT_DEATH
);
3878 INIT_LIST_HEAD(&death
->work
.entry
);
3879 death
->cookie
= cookie
;
3881 if (ref
->node
->proc
== NULL
) {
3882 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3884 binder_inner_proc_lock(proc
);
3885 binder_enqueue_work_ilocked(
3886 &ref
->death
->work
, &proc
->todo
);
3887 binder_wakeup_proc_ilocked(proc
);
3888 binder_inner_proc_unlock(proc
);
3891 if (ref
->death
== NULL
) {
3892 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3893 proc
->pid
, thread
->pid
);
3894 binder_node_unlock(ref
->node
);
3895 binder_proc_unlock(proc
);
3899 if (death
->cookie
!= cookie
) {
3900 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3901 proc
->pid
, thread
->pid
,
3904 binder_node_unlock(ref
->node
);
3905 binder_proc_unlock(proc
);
3909 binder_inner_proc_lock(proc
);
3910 if (list_empty(&death
->work
.entry
)) {
3911 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3912 if (thread
->looper
&
3913 (BINDER_LOOPER_STATE_REGISTERED
|
3914 BINDER_LOOPER_STATE_ENTERED
))
3915 binder_enqueue_thread_work_ilocked(
3919 binder_enqueue_work_ilocked(
3922 binder_wakeup_proc_ilocked(
3926 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3927 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3929 binder_inner_proc_unlock(proc
);
3931 binder_node_unlock(ref
->node
);
3932 binder_proc_unlock(proc
);
3934 case BC_DEAD_BINDER_DONE
: {
3935 struct binder_work
*w
;
3936 binder_uintptr_t cookie
;
3937 struct binder_ref_death
*death
= NULL
;
3939 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3942 ptr
+= sizeof(cookie
);
3943 binder_inner_proc_lock(proc
);
3944 list_for_each_entry(w
, &proc
->delivered_death
,
3946 struct binder_ref_death
*tmp_death
=
3948 struct binder_ref_death
,
3951 if (tmp_death
->cookie
== cookie
) {
3956 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3957 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3958 proc
->pid
, thread
->pid
, (u64
)cookie
,
3960 if (death
== NULL
) {
3961 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3962 proc
->pid
, thread
->pid
, (u64
)cookie
);
3963 binder_inner_proc_unlock(proc
);
3966 binder_dequeue_work_ilocked(&death
->work
);
3967 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3968 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3969 if (thread
->looper
&
3970 (BINDER_LOOPER_STATE_REGISTERED
|
3971 BINDER_LOOPER_STATE_ENTERED
))
3972 binder_enqueue_thread_work_ilocked(
3973 thread
, &death
->work
);
3975 binder_enqueue_work_ilocked(
3978 binder_wakeup_proc_ilocked(proc
);
3981 binder_inner_proc_unlock(proc
);
3985 pr_err("%d:%d unknown command %d\n",
3986 proc
->pid
, thread
->pid
, cmd
);
3989 *consumed
= ptr
- buffer
;
3994 static void binder_stat_br(struct binder_proc
*proc
,
3995 struct binder_thread
*thread
, uint32_t cmd
)
3997 trace_binder_return(cmd
);
3998 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3999 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
4000 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
4001 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
4005 static int binder_put_node_cmd(struct binder_proc
*proc
,
4006 struct binder_thread
*thread
,
4008 binder_uintptr_t node_ptr
,
4009 binder_uintptr_t node_cookie
,
4011 uint32_t cmd
, const char *cmd_name
)
4013 void __user
*ptr
= *ptrp
;
4015 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4017 ptr
+= sizeof(uint32_t);
4019 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
4021 ptr
+= sizeof(binder_uintptr_t
);
4023 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
4025 ptr
+= sizeof(binder_uintptr_t
);
4027 binder_stat_br(proc
, thread
, cmd
);
4028 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
4029 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
4030 (u64
)node_ptr
, (u64
)node_cookie
);
4036 static int binder_wait_for_work(struct binder_thread
*thread
,
4040 struct binder_proc
*proc
= thread
->proc
;
4043 freezer_do_not_count();
4044 binder_inner_proc_lock(proc
);
4046 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
4047 if (binder_has_work_ilocked(thread
, do_proc_work
))
4050 list_add(&thread
->waiting_thread_node
,
4051 &proc
->waiting_threads
);
4052 binder_inner_proc_unlock(proc
);
4054 binder_inner_proc_lock(proc
);
4055 list_del_init(&thread
->waiting_thread_node
);
4056 if (signal_pending(current
)) {
4061 finish_wait(&thread
->wait
, &wait
);
4062 binder_inner_proc_unlock(proc
);
4068 static int binder_thread_read(struct binder_proc
*proc
,
4069 struct binder_thread
*thread
,
4070 binder_uintptr_t binder_buffer
, size_t size
,
4071 binder_size_t
*consumed
, int non_block
)
4073 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
4074 void __user
*ptr
= buffer
+ *consumed
;
4075 void __user
*end
= buffer
+ size
;
4078 int wait_for_proc_work
;
4080 if (*consumed
== 0) {
4081 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
4083 ptr
+= sizeof(uint32_t);
4087 binder_inner_proc_lock(proc
);
4088 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4089 binder_inner_proc_unlock(proc
);
4091 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
4093 trace_binder_wait_for_work(wait_for_proc_work
,
4094 !!thread
->transaction_stack
,
4095 !binder_worklist_empty(proc
, &thread
->todo
));
4096 if (wait_for_proc_work
) {
4097 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4098 BINDER_LOOPER_STATE_ENTERED
))) {
4099 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4100 proc
->pid
, thread
->pid
, thread
->looper
);
4101 wait_event_interruptible(binder_user_error_wait
,
4102 binder_stop_on_user_error
< 2);
4104 binder_restore_priority(current
, proc
->default_priority
);
4108 if (!binder_has_work(thread
, wait_for_proc_work
))
4111 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
4114 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
4121 struct binder_transaction_data_secctx tr
;
4122 struct binder_transaction_data
*trd
= &tr
.transaction_data
;
4123 struct binder_work
*w
= NULL
;
4124 struct list_head
*list
= NULL
;
4125 struct binder_transaction
*t
= NULL
;
4126 struct binder_thread
*t_from
;
4127 size_t trsize
= sizeof(*trd
);
4129 binder_inner_proc_lock(proc
);
4130 if (!binder_worklist_empty_ilocked(&thread
->todo
))
4131 list
= &thread
->todo
;
4132 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
4136 binder_inner_proc_unlock(proc
);
4139 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
4144 if (end
- ptr
< sizeof(tr
) + 4) {
4145 binder_inner_proc_unlock(proc
);
4148 w
= binder_dequeue_work_head_ilocked(list
);
4149 if (binder_worklist_empty_ilocked(&thread
->todo
))
4150 thread
->process_todo
= false;
4153 case BINDER_WORK_TRANSACTION
: {
4154 binder_inner_proc_unlock(proc
);
4155 t
= container_of(w
, struct binder_transaction
, work
);
4157 case BINDER_WORK_RETURN_ERROR
: {
4158 struct binder_error
*e
= container_of(
4159 w
, struct binder_error
, work
);
4161 WARN_ON(e
->cmd
== BR_OK
);
4162 binder_inner_proc_unlock(proc
);
4163 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
4166 ptr
+= sizeof(uint32_t);
4168 binder_stat_br(proc
, thread
, cmd
);
4170 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4171 binder_inner_proc_unlock(proc
);
4172 cmd
= BR_TRANSACTION_COMPLETE
;
4173 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4175 ptr
+= sizeof(uint32_t);
4177 binder_stat_br(proc
, thread
, cmd
);
4178 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4179 "%d:%d BR_TRANSACTION_COMPLETE\n",
4180 proc
->pid
, thread
->pid
);
4182 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4184 case BINDER_WORK_NODE
: {
4185 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4187 binder_uintptr_t node_ptr
= node
->ptr
;
4188 binder_uintptr_t node_cookie
= node
->cookie
;
4189 int node_debug_id
= node
->debug_id
;
4192 void __user
*orig_ptr
= ptr
;
4194 BUG_ON(proc
!= node
->proc
);
4195 strong
= node
->internal_strong_refs
||
4196 node
->local_strong_refs
;
4197 weak
= !hlist_empty(&node
->refs
) ||
4198 node
->local_weak_refs
||
4199 node
->tmp_refs
|| strong
;
4200 has_strong_ref
= node
->has_strong_ref
;
4201 has_weak_ref
= node
->has_weak_ref
;
4203 if (weak
&& !has_weak_ref
) {
4204 node
->has_weak_ref
= 1;
4205 node
->pending_weak_ref
= 1;
4206 node
->local_weak_refs
++;
4208 if (strong
&& !has_strong_ref
) {
4209 node
->has_strong_ref
= 1;
4210 node
->pending_strong_ref
= 1;
4211 node
->local_strong_refs
++;
4213 if (!strong
&& has_strong_ref
)
4214 node
->has_strong_ref
= 0;
4215 if (!weak
&& has_weak_ref
)
4216 node
->has_weak_ref
= 0;
4217 if (!weak
&& !strong
) {
4218 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4219 "%d:%d node %d u%016llx c%016llx deleted\n",
4220 proc
->pid
, thread
->pid
,
4224 rb_erase(&node
->rb_node
, &proc
->nodes
);
4225 binder_inner_proc_unlock(proc
);
4226 binder_node_lock(node
);
4228 * Acquire the node lock before freeing the
4229 * node to serialize with other threads that
4230 * may have been holding the node lock while
4231 * decrementing this node (avoids race where
4232 * this thread frees while the other thread
4233 * is unlocking the node after the final
4236 binder_node_unlock(node
);
4237 binder_free_node(node
);
4239 binder_inner_proc_unlock(proc
);
4241 if (weak
&& !has_weak_ref
)
4242 ret
= binder_put_node_cmd(
4243 proc
, thread
, &ptr
, node_ptr
,
4244 node_cookie
, node_debug_id
,
4245 BR_INCREFS
, "BR_INCREFS");
4246 if (!ret
&& strong
&& !has_strong_ref
)
4247 ret
= binder_put_node_cmd(
4248 proc
, thread
, &ptr
, node_ptr
,
4249 node_cookie
, node_debug_id
,
4250 BR_ACQUIRE
, "BR_ACQUIRE");
4251 if (!ret
&& !strong
&& has_strong_ref
)
4252 ret
= binder_put_node_cmd(
4253 proc
, thread
, &ptr
, node_ptr
,
4254 node_cookie
, node_debug_id
,
4255 BR_RELEASE
, "BR_RELEASE");
4256 if (!ret
&& !weak
&& has_weak_ref
)
4257 ret
= binder_put_node_cmd(
4258 proc
, thread
, &ptr
, node_ptr
,
4259 node_cookie
, node_debug_id
,
4260 BR_DECREFS
, "BR_DECREFS");
4261 if (orig_ptr
== ptr
)
4262 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4263 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4264 proc
->pid
, thread
->pid
,
4271 case BINDER_WORK_DEAD_BINDER
:
4272 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4273 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4274 struct binder_ref_death
*death
;
4276 binder_uintptr_t cookie
;
4278 death
= container_of(w
, struct binder_ref_death
, work
);
4279 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4280 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4282 cmd
= BR_DEAD_BINDER
;
4283 cookie
= death
->cookie
;
4285 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4286 "%d:%d %s %016llx\n",
4287 proc
->pid
, thread
->pid
,
4288 cmd
== BR_DEAD_BINDER
?
4290 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4292 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4293 binder_inner_proc_unlock(proc
);
4295 binder_stats_deleted(BINDER_STAT_DEATH
);
4297 binder_enqueue_work_ilocked(
4298 w
, &proc
->delivered_death
);
4299 binder_inner_proc_unlock(proc
);
4301 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4303 ptr
+= sizeof(uint32_t);
4304 if (put_user(cookie
,
4305 (binder_uintptr_t __user
*)ptr
))
4307 ptr
+= sizeof(binder_uintptr_t
);
4308 binder_stat_br(proc
, thread
, cmd
);
4309 if (cmd
== BR_DEAD_BINDER
)
4310 goto done
; /* DEAD_BINDER notifications can cause transactions */
4317 BUG_ON(t
->buffer
== NULL
);
4318 if (t
->buffer
->target_node
) {
4319 struct binder_node
*target_node
= t
->buffer
->target_node
;
4320 struct binder_priority node_prio
;
4322 trd
->target
.ptr
= target_node
->ptr
;
4323 trd
->cookie
= target_node
->cookie
;
4324 node_prio
.sched_policy
= target_node
->sched_policy
;
4325 node_prio
.prio
= target_node
->min_priority
;
4326 binder_transaction_priority(current
, t
, node_prio
,
4327 target_node
->inherit_rt
);
4328 cmd
= BR_TRANSACTION
;
4330 trd
->target
.ptr
= 0;
4334 trd
->code
= t
->code
;
4335 trd
->flags
= t
->flags
;
4336 trd
->sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4338 t_from
= binder_get_txn_from(t
);
4340 struct task_struct
*sender
= t_from
->proc
->tsk
;
4343 task_tgid_nr_ns(sender
,
4344 task_active_pid_ns(current
));
4346 trd
->sender_pid
= 0;
4349 trd
->data_size
= t
->buffer
->data_size
;
4350 trd
->offsets_size
= t
->buffer
->offsets_size
;
4351 trd
->data
.ptr
.buffer
= (binder_uintptr_t
)
4352 ((uintptr_t)t
->buffer
->data
+
4353 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4354 trd
->data
.ptr
.offsets
= trd
->data
.ptr
.buffer
+
4355 ALIGN(t
->buffer
->data_size
,
4358 tr
.secctx
= t
->security_ctx
;
4359 if (t
->security_ctx
) {
4360 cmd
= BR_TRANSACTION_SEC_CTX
;
4361 trsize
= sizeof(tr
);
4363 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4365 binder_thread_dec_tmpref(t_from
);
4367 binder_cleanup_transaction(t
, "put_user failed",
4372 ptr
+= sizeof(uint32_t);
4373 if (copy_to_user(ptr
, &tr
, trsize
)) {
4375 binder_thread_dec_tmpref(t_from
);
4377 binder_cleanup_transaction(t
, "copy_to_user failed",
4384 trace_binder_transaction_received(t
);
4385 binder_stat_br(proc
, thread
, cmd
);
4386 binder_debug(BINDER_DEBUG_TRANSACTION
,
4387 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4388 proc
->pid
, thread
->pid
,
4389 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4390 (cmd
== BR_TRANSACTION_SEC_CTX
) ?
4391 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4392 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4393 t_from
? t_from
->pid
: 0, cmd
,
4394 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4395 (u64
)trd
->data
.ptr
.buffer
,
4396 (u64
)trd
->data
.ptr
.offsets
);
4399 binder_thread_dec_tmpref(t_from
);
4400 t
->buffer
->allow_user_free
= 1;
4401 if (cmd
!= BR_REPLY
&& !(t
->flags
& TF_ONE_WAY
)) {
4402 binder_inner_proc_lock(thread
->proc
);
4403 t
->to_parent
= thread
->transaction_stack
;
4404 t
->to_thread
= thread
;
4405 thread
->transaction_stack
= t
;
4406 binder_inner_proc_unlock(thread
->proc
);
4408 binder_free_transaction(t
);
4415 *consumed
= ptr
- buffer
;
4416 binder_inner_proc_lock(proc
);
4417 if (proc
->requested_threads
== 0 &&
4418 list_empty(&thread
->proc
->waiting_threads
) &&
4419 proc
->requested_threads_started
< proc
->max_threads
&&
4420 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4421 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4422 /*spawn a new thread if we leave this out */) {
4423 proc
->requested_threads
++;
4424 binder_inner_proc_unlock(proc
);
4425 binder_debug(BINDER_DEBUG_THREADS
,
4426 "%d:%d BR_SPAWN_LOOPER\n",
4427 proc
->pid
, thread
->pid
);
4428 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4430 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4432 binder_inner_proc_unlock(proc
);
4436 static void binder_release_work(struct binder_proc
*proc
,
4437 struct list_head
*list
)
4439 struct binder_work
*w
;
4442 w
= binder_dequeue_work_head(proc
, list
);
4447 case BINDER_WORK_TRANSACTION
: {
4448 struct binder_transaction
*t
;
4450 t
= container_of(w
, struct binder_transaction
, work
);
4452 binder_cleanup_transaction(t
, "process died.",
4455 case BINDER_WORK_RETURN_ERROR
: {
4456 struct binder_error
*e
= container_of(
4457 w
, struct binder_error
, work
);
4459 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4460 "undelivered TRANSACTION_ERROR: %u\n",
4463 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4464 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4465 "undelivered TRANSACTION_COMPLETE\n");
4467 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4469 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4470 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4471 struct binder_ref_death
*death
;
4473 death
= container_of(w
, struct binder_ref_death
, work
);
4474 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4475 "undelivered death notification, %016llx\n",
4476 (u64
)death
->cookie
);
4478 binder_stats_deleted(BINDER_STAT_DEATH
);
4481 pr_err("unexpected work type, %d, not freed\n",
4489 static struct binder_thread
*binder_get_thread_ilocked(
4490 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4492 struct binder_thread
*thread
= NULL
;
4493 struct rb_node
*parent
= NULL
;
4494 struct rb_node
**p
= &proc
->threads
.rb_node
;
4498 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4500 if (current
->pid
< thread
->pid
)
4502 else if (current
->pid
> thread
->pid
)
4503 p
= &(*p
)->rb_right
;
4509 thread
= new_thread
;
4510 binder_stats_created(BINDER_STAT_THREAD
);
4511 thread
->proc
= proc
;
4512 thread
->pid
= current
->pid
;
4513 get_task_struct(current
);
4514 thread
->task
= current
;
4515 atomic_set(&thread
->tmp_ref
, 0);
4516 init_waitqueue_head(&thread
->wait
);
4517 INIT_LIST_HEAD(&thread
->todo
);
4518 rb_link_node(&thread
->rb_node
, parent
, p
);
4519 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4520 thread
->looper_need_return
= true;
4521 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4522 thread
->return_error
.cmd
= BR_OK
;
4523 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4524 thread
->reply_error
.cmd
= BR_OK
;
4525 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4529 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4531 struct binder_thread
*thread
;
4532 struct binder_thread
*new_thread
;
4534 binder_inner_proc_lock(proc
);
4535 thread
= binder_get_thread_ilocked(proc
, NULL
);
4536 binder_inner_proc_unlock(proc
);
4538 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4539 if (new_thread
== NULL
)
4541 binder_inner_proc_lock(proc
);
4542 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4543 binder_inner_proc_unlock(proc
);
4544 if (thread
!= new_thread
)
4550 static void binder_free_proc(struct binder_proc
*proc
)
4552 BUG_ON(!list_empty(&proc
->todo
));
4553 BUG_ON(!list_empty(&proc
->delivered_death
));
4554 binder_alloc_deferred_release(&proc
->alloc
);
4555 put_task_struct(proc
->tsk
);
4556 binder_stats_deleted(BINDER_STAT_PROC
);
4560 static void binder_free_thread(struct binder_thread
*thread
)
4562 BUG_ON(!list_empty(&thread
->todo
));
4563 binder_stats_deleted(BINDER_STAT_THREAD
);
4564 binder_proc_dec_tmpref(thread
->proc
);
4565 put_task_struct(thread
->task
);
4569 static int binder_thread_release(struct binder_proc
*proc
,
4570 struct binder_thread
*thread
)
4572 struct binder_transaction
*t
;
4573 struct binder_transaction
*send_reply
= NULL
;
4574 int active_transactions
= 0;
4575 struct binder_transaction
*last_t
= NULL
;
4577 binder_inner_proc_lock(thread
->proc
);
4579 * take a ref on the proc so it survives
4580 * after we remove this thread from proc->threads.
4581 * The corresponding dec is when we actually
4582 * free the thread in binder_free_thread()
4586 * take a ref on this thread to ensure it
4587 * survives while we are releasing it
4589 atomic_inc(&thread
->tmp_ref
);
4590 rb_erase(&thread
->rb_node
, &proc
->threads
);
4591 t
= thread
->transaction_stack
;
4593 spin_lock(&t
->lock
);
4594 if (t
->to_thread
== thread
)
4597 thread
->is_dead
= true;
4601 active_transactions
++;
4602 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4603 "release %d:%d transaction %d %s, still active\n",
4604 proc
->pid
, thread
->pid
,
4606 (t
->to_thread
== thread
) ? "in" : "out");
4608 if (t
->to_thread
== thread
) {
4610 t
->to_thread
= NULL
;
4612 t
->buffer
->transaction
= NULL
;
4616 } else if (t
->from
== thread
) {
4621 spin_unlock(&last_t
->lock
);
4623 spin_lock(&t
->lock
);
4627 * If this thread used poll, make sure we remove the waitqueue
4628 * from any epoll data structures holding it with POLLFREE.
4629 * waitqueue_active() is safe to use here because we're holding
4632 if ((thread
->looper
& BINDER_LOOPER_STATE_POLL
) &&
4633 waitqueue_active(&thread
->wait
)) {
4634 wake_up_poll(&thread
->wait
, POLLHUP
| POLLFREE
);
4637 binder_inner_proc_unlock(thread
->proc
);
4640 * This is needed to avoid races between wake_up_poll() above and
4641 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4642 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4643 * lock, so we can be sure it's done after calling synchronize_rcu().
4645 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
)
4649 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4650 binder_release_work(proc
, &thread
->todo
);
4651 binder_thread_dec_tmpref(thread
);
4652 return active_transactions
;
4655 static unsigned int binder_poll(struct file
*filp
,
4656 struct poll_table_struct
*wait
)
4658 struct binder_proc
*proc
= filp
->private_data
;
4659 struct binder_thread
*thread
= NULL
;
4660 bool wait_for_proc_work
;
4662 thread
= binder_get_thread(proc
);
4664 binder_inner_proc_lock(thread
->proc
);
4665 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4666 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4668 binder_inner_proc_unlock(thread
->proc
);
4670 poll_wait(filp
, &thread
->wait
, wait
);
4672 if (binder_has_work(thread
, wait_for_proc_work
))
4678 static int binder_ioctl_write_read(struct file
*filp
,
4679 unsigned int cmd
, unsigned long arg
,
4680 struct binder_thread
*thread
)
4683 struct binder_proc
*proc
= filp
->private_data
;
4684 unsigned int size
= _IOC_SIZE(cmd
);
4685 void __user
*ubuf
= (void __user
*)arg
;
4686 struct binder_write_read bwr
;
4688 if (size
!= sizeof(struct binder_write_read
)) {
4692 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4696 binder_debug(BINDER_DEBUG_READ_WRITE
,
4697 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4698 proc
->pid
, thread
->pid
,
4699 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4700 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4702 if (bwr
.write_size
> 0) {
4703 ret
= binder_thread_write(proc
, thread
,
4706 &bwr
.write_consumed
);
4707 trace_binder_write_done(ret
);
4709 bwr
.read_consumed
= 0;
4710 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4715 if (bwr
.read_size
> 0) {
4716 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4719 filp
->f_flags
& O_NONBLOCK
);
4720 trace_binder_read_done(ret
);
4721 binder_inner_proc_lock(proc
);
4722 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4723 binder_wakeup_proc_ilocked(proc
);
4724 binder_inner_proc_unlock(proc
);
4726 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4731 binder_debug(BINDER_DEBUG_READ_WRITE
,
4732 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4733 proc
->pid
, thread
->pid
,
4734 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4735 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4736 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4744 static int binder_ioctl_set_ctx_mgr(struct file
*filp
,
4745 struct flat_binder_object
*fbo
)
4748 struct binder_proc
*proc
= filp
->private_data
;
4749 struct binder_context
*context
= proc
->context
;
4750 struct binder_node
*new_node
;
4751 kuid_t curr_euid
= current_euid();
4753 mutex_lock(&context
->context_mgr_node_lock
);
4754 if (context
->binder_context_mgr_node
) {
4755 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4759 ret
= security_binder_set_context_mgr(proc
->tsk
);
4762 if (uid_valid(context
->binder_context_mgr_uid
)) {
4763 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4764 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4765 from_kuid(&init_user_ns
, curr_euid
),
4766 from_kuid(&init_user_ns
,
4767 context
->binder_context_mgr_uid
));
4772 context
->binder_context_mgr_uid
= curr_euid
;
4774 new_node
= binder_new_node(proc
, fbo
);
4779 binder_node_lock(new_node
);
4780 new_node
->local_weak_refs
++;
4781 new_node
->local_strong_refs
++;
4782 new_node
->has_strong_ref
= 1;
4783 new_node
->has_weak_ref
= 1;
4784 context
->binder_context_mgr_node
= new_node
;
4785 binder_node_unlock(new_node
);
4786 binder_put_node(new_node
);
4788 mutex_unlock(&context
->context_mgr_node_lock
);
4792 static int binder_ioctl_get_node_info_for_ref(struct binder_proc
*proc
,
4793 struct binder_node_info_for_ref
*info
)
4795 struct binder_node
*node
;
4796 struct binder_context
*context
= proc
->context
;
4797 __u32 handle
= info
->handle
;
4799 if (info
->strong_count
|| info
->weak_count
|| info
->reserved1
||
4800 info
->reserved2
|| info
->reserved3
) {
4801 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4806 /* This ioctl may only be used by the context manager */
4807 mutex_lock(&context
->context_mgr_node_lock
);
4808 if (!context
->binder_context_mgr_node
||
4809 context
->binder_context_mgr_node
->proc
!= proc
) {
4810 mutex_unlock(&context
->context_mgr_node_lock
);
4813 mutex_unlock(&context
->context_mgr_node_lock
);
4815 node
= binder_get_node_from_ref(proc
, handle
, true, NULL
);
4819 info
->strong_count
= node
->local_strong_refs
+
4820 node
->internal_strong_refs
;
4821 info
->weak_count
= node
->local_weak_refs
;
4823 binder_put_node(node
);
4828 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4829 struct binder_node_debug_info
*info
) {
4831 binder_uintptr_t ptr
= info
->ptr
;
4833 memset(info
, 0, sizeof(*info
));
4835 binder_inner_proc_lock(proc
);
4836 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4837 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4839 if (node
->ptr
> ptr
) {
4840 info
->ptr
= node
->ptr
;
4841 info
->cookie
= node
->cookie
;
4842 info
->has_strong_ref
= node
->has_strong_ref
;
4843 info
->has_weak_ref
= node
->has_weak_ref
;
4847 binder_inner_proc_unlock(proc
);
4852 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4855 struct binder_proc
*proc
= filp
->private_data
;
4856 struct binder_thread
*thread
;
4857 unsigned int size
= _IOC_SIZE(cmd
);
4858 void __user
*ubuf
= (void __user
*)arg
;
4860 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4861 proc->pid, current->pid, cmd, arg);*/
4863 binder_selftest_alloc(&proc
->alloc
);
4865 trace_binder_ioctl(cmd
, arg
);
4867 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4871 thread
= binder_get_thread(proc
);
4872 if (thread
== NULL
) {
4878 case BINDER_WRITE_READ
:
4879 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4883 case BINDER_SET_MAX_THREADS
: {
4886 if (copy_from_user(&max_threads
, ubuf
,
4887 sizeof(max_threads
))) {
4891 binder_inner_proc_lock(proc
);
4892 proc
->max_threads
= max_threads
;
4893 binder_inner_proc_unlock(proc
);
4896 case BINDER_SET_CONTEXT_MGR_EXT
: {
4897 struct flat_binder_object fbo
;
4899 if (copy_from_user(&fbo
, ubuf
, sizeof(fbo
))) {
4903 ret
= binder_ioctl_set_ctx_mgr(filp
, &fbo
);
4908 case BINDER_SET_CONTEXT_MGR
:
4909 ret
= binder_ioctl_set_ctx_mgr(filp
, NULL
);
4913 case BINDER_THREAD_EXIT
:
4914 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4915 proc
->pid
, thread
->pid
);
4916 binder_thread_release(proc
, thread
);
4919 case BINDER_VERSION
: {
4920 struct binder_version __user
*ver
= ubuf
;
4922 if (size
!= sizeof(struct binder_version
)) {
4926 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4927 &ver
->protocol_version
)) {
4933 case BINDER_GET_NODE_INFO_FOR_REF
: {
4934 struct binder_node_info_for_ref info
;
4936 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4941 ret
= binder_ioctl_get_node_info_for_ref(proc
, &info
);
4945 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4952 case BINDER_GET_NODE_DEBUG_INFO
: {
4953 struct binder_node_debug_info info
;
4955 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4960 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4964 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4977 thread
->looper_need_return
= false;
4978 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4979 if (ret
&& ret
!= -ERESTARTSYS
)
4980 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4982 trace_binder_ioctl_done(ret
);
4986 static void binder_vma_open(struct vm_area_struct
*vma
)
4988 struct binder_proc
*proc
= vma
->vm_private_data
;
4990 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4991 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4992 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4993 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4994 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4997 static void binder_vma_close(struct vm_area_struct
*vma
)
4999 struct binder_proc
*proc
= vma
->vm_private_data
;
5001 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5002 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5003 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5004 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5005 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5006 binder_alloc_vma_close(&proc
->alloc
);
5007 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
5010 static int binder_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
5012 return VM_FAULT_SIGBUS
;
5015 static struct vm_operations_struct binder_vm_ops
= {
5016 .open
= binder_vma_open
,
5017 .close
= binder_vma_close
,
5018 .fault
= binder_vm_fault
,
5021 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
5024 struct binder_proc
*proc
= filp
->private_data
;
5025 const char *failure_string
;
5027 if (proc
->tsk
!= current
->group_leader
)
5030 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
5031 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
5033 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5034 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5035 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
5036 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
5037 (unsigned long)pgprot_val(vma
->vm_page_prot
));
5039 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
5041 failure_string
= "bad vm_flags";
5044 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
5045 vma
->vm_ops
= &binder_vm_ops
;
5046 vma
->vm_private_data
= proc
;
5048 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
5051 mutex_lock(&proc
->files_lock
);
5052 proc
->files
= get_files_struct(current
);
5053 mutex_unlock(&proc
->files_lock
);
5057 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
5058 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
5062 static int binder_open(struct inode
*nodp
, struct file
*filp
)
5064 struct binder_proc
*proc
;
5065 struct binder_device
*binder_dev
;
5067 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
5068 current
->group_leader
->pid
, current
->pid
);
5070 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
5073 spin_lock_init(&proc
->inner_lock
);
5074 spin_lock_init(&proc
->outer_lock
);
5075 get_task_struct(current
->group_leader
);
5076 proc
->tsk
= current
->group_leader
;
5077 mutex_init(&proc
->files_lock
);
5078 INIT_LIST_HEAD(&proc
->todo
);
5079 if (binder_supported_policy(current
->policy
)) {
5080 proc
->default_priority
.sched_policy
= current
->policy
;
5081 proc
->default_priority
.prio
= current
->normal_prio
;
5083 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
5084 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
5087 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
5089 proc
->context
= &binder_dev
->context
;
5090 binder_alloc_init(&proc
->alloc
);
5092 binder_stats_created(BINDER_STAT_PROC
);
5093 proc
->pid
= current
->group_leader
->pid
;
5094 INIT_LIST_HEAD(&proc
->delivered_death
);
5095 INIT_LIST_HEAD(&proc
->waiting_threads
);
5096 filp
->private_data
= proc
;
5098 mutex_lock(&binder_procs_lock
);
5099 hlist_add_head(&proc
->proc_node
, &binder_procs
);
5100 mutex_unlock(&binder_procs_lock
);
5102 if (binder_debugfs_dir_entry_proc
) {
5105 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
5107 * proc debug entries are shared between contexts, so
5108 * this will fail if the process tries to open the driver
5109 * again with a different context. The priting code will
5110 * anyway print all contexts that a given PID has, so this
5113 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
5114 binder_debugfs_dir_entry_proc
,
5115 (void *)(unsigned long)proc
->pid
,
5122 static int binder_flush(struct file
*filp
, fl_owner_t id
)
5124 struct binder_proc
*proc
= filp
->private_data
;
5126 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
5131 static void binder_deferred_flush(struct binder_proc
*proc
)
5136 binder_inner_proc_lock(proc
);
5137 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
5138 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5140 thread
->looper_need_return
= true;
5141 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
5142 wake_up_interruptible(&thread
->wait
);
5146 binder_inner_proc_unlock(proc
);
5148 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5149 "binder_flush: %d woke %d threads\n", proc
->pid
,
5153 static int binder_release(struct inode
*nodp
, struct file
*filp
)
5155 struct binder_proc
*proc
= filp
->private_data
;
5157 debugfs_remove(proc
->debugfs_entry
);
5158 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
5163 static int binder_node_release(struct binder_node
*node
, int refs
)
5165 struct binder_ref
*ref
;
5167 struct binder_proc
*proc
= node
->proc
;
5169 binder_release_work(proc
, &node
->async_todo
);
5171 binder_node_lock(node
);
5172 binder_inner_proc_lock(proc
);
5173 binder_dequeue_work_ilocked(&node
->work
);
5175 * The caller must have taken a temporary ref on the node,
5177 BUG_ON(!node
->tmp_refs
);
5178 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
5179 binder_inner_proc_unlock(proc
);
5180 binder_node_unlock(node
);
5181 binder_free_node(node
);
5187 node
->local_strong_refs
= 0;
5188 node
->local_weak_refs
= 0;
5189 binder_inner_proc_unlock(proc
);
5191 spin_lock(&binder_dead_nodes_lock
);
5192 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
5193 spin_unlock(&binder_dead_nodes_lock
);
5195 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
5198 * Need the node lock to synchronize
5199 * with new notification requests and the
5200 * inner lock to synchronize with queued
5201 * death notifications.
5203 binder_inner_proc_lock(ref
->proc
);
5205 binder_inner_proc_unlock(ref
->proc
);
5211 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
5212 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
5213 binder_enqueue_work_ilocked(&ref
->death
->work
,
5215 binder_wakeup_proc_ilocked(ref
->proc
);
5216 binder_inner_proc_unlock(ref
->proc
);
5219 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5220 "node %d now dead, refs %d, death %d\n",
5221 node
->debug_id
, refs
, death
);
5222 binder_node_unlock(node
);
5223 binder_put_node(node
);
5228 static void binder_deferred_release(struct binder_proc
*proc
)
5230 struct binder_context
*context
= proc
->context
;
5232 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
5234 BUG_ON(proc
->files
);
5236 mutex_lock(&binder_procs_lock
);
5237 hlist_del(&proc
->proc_node
);
5238 mutex_unlock(&binder_procs_lock
);
5240 mutex_lock(&context
->context_mgr_node_lock
);
5241 if (context
->binder_context_mgr_node
&&
5242 context
->binder_context_mgr_node
->proc
== proc
) {
5243 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
5244 "%s: %d context_mgr_node gone\n",
5245 __func__
, proc
->pid
);
5246 context
->binder_context_mgr_node
= NULL
;
5248 mutex_unlock(&context
->context_mgr_node_lock
);
5249 binder_inner_proc_lock(proc
);
5251 * Make sure proc stays alive after we
5252 * remove all the threads
5256 proc
->is_dead
= true;
5258 active_transactions
= 0;
5259 while ((n
= rb_first(&proc
->threads
))) {
5260 struct binder_thread
*thread
;
5262 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
5263 binder_inner_proc_unlock(proc
);
5265 active_transactions
+= binder_thread_release(proc
, thread
);
5266 binder_inner_proc_lock(proc
);
5271 while ((n
= rb_first(&proc
->nodes
))) {
5272 struct binder_node
*node
;
5274 node
= rb_entry(n
, struct binder_node
, rb_node
);
5277 * take a temporary ref on the node before
5278 * calling binder_node_release() which will either
5279 * kfree() the node or call binder_put_node()
5281 binder_inc_node_tmpref_ilocked(node
);
5282 rb_erase(&node
->rb_node
, &proc
->nodes
);
5283 binder_inner_proc_unlock(proc
);
5284 incoming_refs
= binder_node_release(node
, incoming_refs
);
5285 binder_inner_proc_lock(proc
);
5287 binder_inner_proc_unlock(proc
);
5290 binder_proc_lock(proc
);
5291 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5292 struct binder_ref
*ref
;
5294 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5296 binder_cleanup_ref_olocked(ref
);
5297 binder_proc_unlock(proc
);
5298 binder_free_ref(ref
);
5299 binder_proc_lock(proc
);
5301 binder_proc_unlock(proc
);
5303 binder_release_work(proc
, &proc
->todo
);
5304 binder_release_work(proc
, &proc
->delivered_death
);
5306 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5307 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5308 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5309 outgoing_refs
, active_transactions
);
5311 binder_proc_dec_tmpref(proc
);
5314 static void binder_deferred_func(struct work_struct
*work
)
5316 struct binder_proc
*proc
;
5317 struct files_struct
*files
;
5322 mutex_lock(&binder_deferred_lock
);
5323 if (!hlist_empty(&binder_deferred_list
)) {
5324 proc
= hlist_entry(binder_deferred_list
.first
,
5325 struct binder_proc
, deferred_work_node
);
5326 hlist_del_init(&proc
->deferred_work_node
);
5327 defer
= proc
->deferred_work
;
5328 proc
->deferred_work
= 0;
5333 mutex_unlock(&binder_deferred_lock
);
5336 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5337 mutex_lock(&proc
->files_lock
);
5338 files
= proc
->files
;
5341 mutex_unlock(&proc
->files_lock
);
5344 if (defer
& BINDER_DEFERRED_FLUSH
)
5345 binder_deferred_flush(proc
);
5347 if (defer
& BINDER_DEFERRED_RELEASE
)
5348 binder_deferred_release(proc
); /* frees proc */
5351 put_files_struct(files
);
5354 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5357 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5359 mutex_lock(&binder_deferred_lock
);
5360 proc
->deferred_work
|= defer
;
5361 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5362 hlist_add_head(&proc
->deferred_work_node
,
5363 &binder_deferred_list
);
5364 queue_work(binder_deferred_workqueue
, &binder_deferred_work
);
5366 mutex_unlock(&binder_deferred_lock
);
5369 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5370 struct binder_proc
*proc
,
5372 struct binder_transaction
*t
)
5374 struct binder_proc
*to_proc
;
5375 struct binder_buffer
*buffer
= t
->buffer
;
5377 spin_lock(&t
->lock
);
5378 to_proc
= t
->to_proc
;
5380 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5381 prefix
, t
->debug_id
, t
,
5382 t
->from
? t
->from
->proc
->pid
: 0,
5383 t
->from
? t
->from
->pid
: 0,
5384 to_proc
? to_proc
->pid
: 0,
5385 t
->to_thread
? t
->to_thread
->pid
: 0,
5386 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5387 t
->priority
.prio
, t
->need_reply
);
5388 spin_unlock(&t
->lock
);
5390 if (proc
!= to_proc
) {
5392 * Can only safely deref buffer if we are holding the
5393 * correct proc inner lock for this node
5399 if (buffer
== NULL
) {
5400 seq_puts(m
, " buffer free\n");
5403 if (buffer
->target_node
)
5404 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5405 seq_printf(m
, " size %zd:%zd data %p\n",
5406 buffer
->data_size
, buffer
->offsets_size
,
5410 static void print_binder_work_ilocked(struct seq_file
*m
,
5411 struct binder_proc
*proc
,
5413 const char *transaction_prefix
,
5414 struct binder_work
*w
)
5416 struct binder_node
*node
;
5417 struct binder_transaction
*t
;
5420 case BINDER_WORK_TRANSACTION
:
5421 t
= container_of(w
, struct binder_transaction
, work
);
5422 print_binder_transaction_ilocked(
5423 m
, proc
, transaction_prefix
, t
);
5425 case BINDER_WORK_RETURN_ERROR
: {
5426 struct binder_error
*e
= container_of(
5427 w
, struct binder_error
, work
);
5429 seq_printf(m
, "%stransaction error: %u\n",
5432 case BINDER_WORK_TRANSACTION_COMPLETE
:
5433 seq_printf(m
, "%stransaction complete\n", prefix
);
5435 case BINDER_WORK_NODE
:
5436 node
= container_of(w
, struct binder_node
, work
);
5437 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5438 prefix
, node
->debug_id
,
5439 (u64
)node
->ptr
, (u64
)node
->cookie
);
5441 case BINDER_WORK_DEAD_BINDER
:
5442 seq_printf(m
, "%shas dead binder\n", prefix
);
5444 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5445 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5447 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5448 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5451 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5456 static void print_binder_thread_ilocked(struct seq_file
*m
,
5457 struct binder_thread
*thread
,
5460 struct binder_transaction
*t
;
5461 struct binder_work
*w
;
5462 size_t start_pos
= m
->count
;
5465 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5466 thread
->pid
, thread
->looper
,
5467 thread
->looper_need_return
,
5468 atomic_read(&thread
->tmp_ref
));
5469 header_pos
= m
->count
;
5470 t
= thread
->transaction_stack
;
5472 if (t
->from
== thread
) {
5473 print_binder_transaction_ilocked(m
, thread
->proc
,
5474 " outgoing transaction", t
);
5476 } else if (t
->to_thread
== thread
) {
5477 print_binder_transaction_ilocked(m
, thread
->proc
,
5478 " incoming transaction", t
);
5481 print_binder_transaction_ilocked(m
, thread
->proc
,
5482 " bad transaction", t
);
5486 list_for_each_entry(w
, &thread
->todo
, entry
) {
5487 print_binder_work_ilocked(m
, thread
->proc
, " ",
5488 " pending transaction", w
);
5490 if (!print_always
&& m
->count
== header_pos
)
5491 m
->count
= start_pos
;
5494 static void print_binder_node_nilocked(struct seq_file
*m
,
5495 struct binder_node
*node
)
5497 struct binder_ref
*ref
;
5498 struct binder_work
*w
;
5502 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5505 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5506 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5507 node
->sched_policy
, node
->min_priority
,
5508 node
->has_strong_ref
, node
->has_weak_ref
,
5509 node
->local_strong_refs
, node
->local_weak_refs
,
5510 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5512 seq_puts(m
, " proc");
5513 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5514 seq_printf(m
, " %d", ref
->proc
->pid
);
5518 list_for_each_entry(w
, &node
->async_todo
, entry
)
5519 print_binder_work_ilocked(m
, node
->proc
, " ",
5520 " pending async transaction", w
);
5524 static void print_binder_ref_olocked(struct seq_file
*m
,
5525 struct binder_ref
*ref
)
5527 binder_node_lock(ref
->node
);
5528 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5529 ref
->data
.debug_id
, ref
->data
.desc
,
5530 ref
->node
->proc
? "" : "dead ",
5531 ref
->node
->debug_id
, ref
->data
.strong
,
5532 ref
->data
.weak
, ref
->death
);
5533 binder_node_unlock(ref
->node
);
5536 static void print_binder_proc(struct seq_file
*m
,
5537 struct binder_proc
*proc
, int print_all
)
5539 struct binder_work
*w
;
5541 size_t start_pos
= m
->count
;
5543 struct binder_node
*last_node
= NULL
;
5545 seq_printf(m
, "proc %d\n", proc
->pid
);
5546 seq_printf(m
, "context %s\n", proc
->context
->name
);
5547 header_pos
= m
->count
;
5549 binder_inner_proc_lock(proc
);
5550 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5551 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5552 rb_node
), print_all
);
5554 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5555 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5558 * take a temporary reference on the node so it
5559 * survives and isn't removed from the tree
5560 * while we print it.
5562 binder_inc_node_tmpref_ilocked(node
);
5563 /* Need to drop inner lock to take node lock */
5564 binder_inner_proc_unlock(proc
);
5566 binder_put_node(last_node
);
5567 binder_node_inner_lock(node
);
5568 print_binder_node_nilocked(m
, node
);
5569 binder_node_inner_unlock(node
);
5571 binder_inner_proc_lock(proc
);
5573 binder_inner_proc_unlock(proc
);
5575 binder_put_node(last_node
);
5578 binder_proc_lock(proc
);
5579 for (n
= rb_first(&proc
->refs_by_desc
);
5582 print_binder_ref_olocked(m
, rb_entry(n
,
5585 binder_proc_unlock(proc
);
5587 binder_alloc_print_allocated(m
, &proc
->alloc
);
5588 binder_inner_proc_lock(proc
);
5589 list_for_each_entry(w
, &proc
->todo
, entry
)
5590 print_binder_work_ilocked(m
, proc
, " ",
5591 " pending transaction", w
);
5592 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5593 seq_puts(m
, " has delivered dead binder\n");
5596 binder_inner_proc_unlock(proc
);
5597 if (!print_all
&& m
->count
== header_pos
)
5598 m
->count
= start_pos
;
5601 static const char * const binder_return_strings
[] = {
5606 "BR_ACQUIRE_RESULT",
5608 "BR_TRANSACTION_COMPLETE",
5613 "BR_ATTEMPT_ACQUIRE",
5618 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5622 static const char * const binder_command_strings
[] = {
5625 "BC_ACQUIRE_RESULT",
5633 "BC_ATTEMPT_ACQUIRE",
5634 "BC_REGISTER_LOOPER",
5637 "BC_REQUEST_DEATH_NOTIFICATION",
5638 "BC_CLEAR_DEATH_NOTIFICATION",
5639 "BC_DEAD_BINDER_DONE",
5640 "BC_TRANSACTION_SG",
5644 static const char * const binder_objstat_strings
[] = {
5651 "transaction_complete"
5654 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5655 struct binder_stats
*stats
)
5659 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5660 ARRAY_SIZE(binder_command_strings
));
5661 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5662 int temp
= atomic_read(&stats
->bc
[i
]);
5665 seq_printf(m
, "%s%s: %d\n", prefix
,
5666 binder_command_strings
[i
], temp
);
5669 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5670 ARRAY_SIZE(binder_return_strings
));
5671 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5672 int temp
= atomic_read(&stats
->br
[i
]);
5675 seq_printf(m
, "%s%s: %d\n", prefix
,
5676 binder_return_strings
[i
], temp
);
5679 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5680 ARRAY_SIZE(binder_objstat_strings
));
5681 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5682 ARRAY_SIZE(stats
->obj_deleted
));
5683 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5684 int created
= atomic_read(&stats
->obj_created
[i
]);
5685 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5687 if (created
|| deleted
)
5688 seq_printf(m
, "%s%s: active %d total %d\n",
5690 binder_objstat_strings
[i
],
5696 static void print_binder_proc_stats(struct seq_file
*m
,
5697 struct binder_proc
*proc
)
5699 struct binder_work
*w
;
5700 struct binder_thread
*thread
;
5702 int count
, strong
, weak
, ready_threads
;
5703 size_t free_async_space
=
5704 binder_alloc_get_free_async_space(&proc
->alloc
);
5706 seq_printf(m
, "proc %d\n", proc
->pid
);
5707 seq_printf(m
, "context %s\n", proc
->context
->name
);
5710 binder_inner_proc_lock(proc
);
5711 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5714 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5717 seq_printf(m
, " threads: %d\n", count
);
5718 seq_printf(m
, " requested threads: %d+%d/%d\n"
5719 " ready threads %d\n"
5720 " free async space %zd\n", proc
->requested_threads
,
5721 proc
->requested_threads_started
, proc
->max_threads
,
5725 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5727 binder_inner_proc_unlock(proc
);
5728 seq_printf(m
, " nodes: %d\n", count
);
5732 binder_proc_lock(proc
);
5733 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5734 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5737 strong
+= ref
->data
.strong
;
5738 weak
+= ref
->data
.weak
;
5740 binder_proc_unlock(proc
);
5741 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5743 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5744 seq_printf(m
, " buffers: %d\n", count
);
5747 binder_inner_proc_lock(proc
);
5748 list_for_each_entry(w
, &proc
->todo
, entry
) {
5749 if (w
->type
== BINDER_WORK_TRANSACTION
)
5752 binder_inner_proc_unlock(proc
);
5753 seq_printf(m
, " pending transactions: %d\n", count
);
5755 print_binder_stats(m
, " ", &proc
->stats
);
5759 static int binder_state_show(struct seq_file
*m
, void *unused
)
5761 struct binder_proc
*proc
;
5762 struct binder_node
*node
;
5763 struct binder_node
*last_node
= NULL
;
5765 seq_puts(m
, "binder state:\n");
5767 spin_lock(&binder_dead_nodes_lock
);
5768 if (!hlist_empty(&binder_dead_nodes
))
5769 seq_puts(m
, "dead nodes:\n");
5770 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5772 * take a temporary reference on the node so it
5773 * survives and isn't removed from the list
5774 * while we print it.
5777 spin_unlock(&binder_dead_nodes_lock
);
5779 binder_put_node(last_node
);
5780 binder_node_lock(node
);
5781 print_binder_node_nilocked(m
, node
);
5782 binder_node_unlock(node
);
5784 spin_lock(&binder_dead_nodes_lock
);
5786 spin_unlock(&binder_dead_nodes_lock
);
5788 binder_put_node(last_node
);
5790 mutex_lock(&binder_procs_lock
);
5791 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5792 print_binder_proc(m
, proc
, 1);
5793 mutex_unlock(&binder_procs_lock
);
5798 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5800 struct binder_proc
*proc
;
5802 seq_puts(m
, "binder stats:\n");
5804 print_binder_stats(m
, "", &binder_stats
);
5806 mutex_lock(&binder_procs_lock
);
5807 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5808 print_binder_proc_stats(m
, proc
);
5809 mutex_unlock(&binder_procs_lock
);
5814 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5816 struct binder_proc
*proc
;
5818 seq_puts(m
, "binder transactions:\n");
5819 mutex_lock(&binder_procs_lock
);
5820 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5821 print_binder_proc(m
, proc
, 0);
5822 mutex_unlock(&binder_procs_lock
);
5827 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5829 struct binder_proc
*itr
;
5830 int pid
= (unsigned long)m
->private;
5832 mutex_lock(&binder_procs_lock
);
5833 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5834 if (itr
->pid
== pid
) {
5835 seq_puts(m
, "binder proc state:\n");
5836 print_binder_proc(m
, itr
, 1);
5839 mutex_unlock(&binder_procs_lock
);
5844 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5845 struct binder_transaction_log_entry
*e
)
5847 int debug_id
= READ_ONCE(e
->debug_id_done
);
5849 * read barrier to guarantee debug_id_done read before
5850 * we print the log values
5854 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5855 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5856 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5857 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5858 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5859 e
->return_error
, e
->return_error_param
,
5860 e
->return_error_line
);
5862 * read-barrier to guarantee read of debug_id_done after
5863 * done printing the fields of the entry
5866 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5867 "\n" : " (incomplete)\n");
5870 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5872 struct binder_transaction_log
*log
= m
->private;
5873 unsigned int log_cur
= atomic_read(&log
->cur
);
5878 count
= log_cur
+ 1;
5879 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5880 0 : count
% ARRAY_SIZE(log
->entry
);
5881 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5882 count
= ARRAY_SIZE(log
->entry
);
5883 for (i
= 0; i
< count
; i
++) {
5884 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5886 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5891 static const struct file_operations binder_fops
= {
5892 .owner
= THIS_MODULE
,
5893 .poll
= binder_poll
,
5894 .unlocked_ioctl
= binder_ioctl
,
5895 .compat_ioctl
= binder_ioctl
,
5896 .mmap
= binder_mmap
,
5897 .open
= binder_open
,
5898 .flush
= binder_flush
,
5899 .release
= binder_release
,
5902 BINDER_DEBUG_ENTRY(state
);
5903 BINDER_DEBUG_ENTRY(stats
);
5904 BINDER_DEBUG_ENTRY(transactions
);
5905 BINDER_DEBUG_ENTRY(transaction_log
);
5907 static int __init
init_binder_device(const char *name
)
5910 struct binder_device
*binder_device
;
5912 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5916 binder_device
->miscdev
.fops
= &binder_fops
;
5917 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5918 binder_device
->miscdev
.name
= name
;
5920 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5921 binder_device
->context
.name
= name
;
5922 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5924 ret
= misc_register(&binder_device
->miscdev
);
5926 kfree(binder_device
);
5930 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5935 static int __init
binder_init(void)
5938 char *device_name
, *device_names
;
5939 struct binder_device
*device
;
5940 struct hlist_node
*tmp
;
5942 atomic_set(&binder_transaction_log
.cur
, ~0U);
5943 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5944 binder_deferred_workqueue
= create_singlethread_workqueue("binder");
5945 if (!binder_deferred_workqueue
)
5948 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5949 if (binder_debugfs_dir_entry_root
)
5950 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5951 binder_debugfs_dir_entry_root
);
5953 if (binder_debugfs_dir_entry_root
) {
5954 debugfs_create_file("state",
5956 binder_debugfs_dir_entry_root
,
5958 &binder_state_fops
);
5959 debugfs_create_file("stats",
5961 binder_debugfs_dir_entry_root
,
5963 &binder_stats_fops
);
5964 debugfs_create_file("transactions",
5966 binder_debugfs_dir_entry_root
,
5968 &binder_transactions_fops
);
5969 debugfs_create_file("transaction_log",
5971 binder_debugfs_dir_entry_root
,
5972 &binder_transaction_log
,
5973 &binder_transaction_log_fops
);
5974 debugfs_create_file("failed_transaction_log",
5976 binder_debugfs_dir_entry_root
,
5977 &binder_transaction_log_failed
,
5978 &binder_transaction_log_fops
);
5982 * Copy the module_parameter string, because we don't want to
5983 * tokenize it in-place.
5985 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5986 if (!device_names
) {
5988 goto err_alloc_device_names_failed
;
5990 strcpy(device_names
, binder_devices_param
);
5992 while ((device_name
= strsep(&device_names
, ","))) {
5993 ret
= init_binder_device(device_name
);
5995 goto err_init_binder_device_failed
;
6000 err_init_binder_device_failed
:
6001 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
6002 misc_deregister(&device
->miscdev
);
6003 hlist_del(&device
->hlist
);
6006 err_alloc_device_names_failed
:
6007 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
6009 destroy_workqueue(binder_deferred_workqueue
);
6014 device_initcall(binder_init
);
6016 #define CREATE_TRACE_POINTS
6017 #include "binder_trace.h"
6019 MODULE_LICENSE("GPL v2");