3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
79 #include <uapi/linux/android/binder.h>
80 #include <uapi/linux/sched/types.h>
81 #include "binder_alloc.h"
82 #include "binder_trace.h"
84 static HLIST_HEAD(binder_deferred_list
);
85 static DEFINE_MUTEX(binder_deferred_lock
);
87 static HLIST_HEAD(binder_devices
);
88 static HLIST_HEAD(binder_procs
);
89 static DEFINE_MUTEX(binder_procs_lock
);
91 static HLIST_HEAD(binder_dead_nodes
);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
94 static struct dentry
*binder_debugfs_dir_entry_root
;
95 static struct dentry
*binder_debugfs_dir_entry_proc
;
96 static atomic_t binder_last_id
;
98 #define BINDER_DEBUG_ENTRY(name) \
99 static int binder_##name##_open(struct inode *inode, struct file *file) \
101 return single_open(file, binder_##name##_show, inode->i_private); \
104 static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
112 static int binder_proc_show(struct seq_file
*m
, void *unused
);
113 BINDER_DEBUG_ENTRY(proc
);
115 /* This is only defined in include/asm-arm/sizes.h */
121 #define SZ_4M 0x400000
124 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
127 BINDER_DEBUG_USER_ERROR
= 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
133 BINDER_DEBUG_READ_WRITE
= 1U << 6,
134 BINDER_DEBUG_USER_REFS
= 1U << 7,
135 BINDER_DEBUG_THREADS
= 1U << 8,
136 BINDER_DEBUG_TRANSACTION
= 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
140 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
141 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
143 static uint32_t binder_debug_mask
= BINDER_DEBUG_USER_ERROR
|
144 BINDER_DEBUG_FAILED_TRANSACTION
| BINDER_DEBUG_DEAD_TRANSACTION
;
145 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
147 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
148 module_param_named(devices
, binder_devices_param
, charp
, 0444);
150 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
151 static int binder_stop_on_user_error
;
153 static int binder_set_stop_on_user_error(const char *val
,
154 struct kernel_param
*kp
)
158 ret
= param_set_int(val
, kp
);
159 if (binder_stop_on_user_error
< 2)
160 wake_up(&binder_user_error_wait
);
163 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
164 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
166 #define binder_debug(mask, x...) \
168 if (binder_debug_mask & mask) \
172 #define binder_user_error(x...) \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
180 #define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
185 #define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
188 #define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
191 enum binder_stat_types
{
197 BINDER_STAT_TRANSACTION
,
198 BINDER_STAT_TRANSACTION_COMPLETE
,
202 struct binder_stats
{
203 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
204 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
205 atomic_t obj_created
[BINDER_STAT_COUNT
];
206 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
209 static struct binder_stats binder_stats
;
211 static inline void binder_stats_deleted(enum binder_stat_types type
)
213 atomic_inc(&binder_stats
.obj_deleted
[type
]);
216 static inline void binder_stats_created(enum binder_stat_types type
)
218 atomic_inc(&binder_stats
.obj_created
[type
]);
221 struct binder_transaction_log_entry
{
233 int return_error_line
;
234 uint32_t return_error
;
235 uint32_t return_error_param
;
236 const char *context_name
;
238 struct binder_transaction_log
{
241 struct binder_transaction_log_entry entry
[32];
243 static struct binder_transaction_log binder_transaction_log
;
244 static struct binder_transaction_log binder_transaction_log_failed
;
246 static struct binder_transaction_log_entry
*binder_transaction_log_add(
247 struct binder_transaction_log
*log
)
249 struct binder_transaction_log_entry
*e
;
250 unsigned int cur
= atomic_inc_return(&log
->cur
);
252 if (cur
>= ARRAY_SIZE(log
->entry
))
254 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
255 WRITE_ONCE(e
->debug_id_done
, 0);
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
262 memset(e
, 0, sizeof(*e
));
266 struct binder_context
{
267 struct binder_node
*binder_context_mgr_node
;
268 struct mutex context_mgr_node_lock
;
270 kuid_t binder_context_mgr_uid
;
274 struct binder_device
{
275 struct hlist_node hlist
;
276 struct miscdevice miscdev
;
277 struct binder_context context
;
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
285 * There are separate work lists for proc, thread, and node (async).
288 struct list_head entry
;
291 BINDER_WORK_TRANSACTION
= 1,
292 BINDER_WORK_TRANSACTION_COMPLETE
,
293 BINDER_WORK_RETURN_ERROR
,
295 BINDER_WORK_DEAD_BINDER
,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
301 struct binder_error
{
302 struct binder_work work
;
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
312 * (protected by @proc->inner_lock)
313 * @rb_node: element for proc->nodes tree
314 * (protected by @proc->inner_lock)
315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
320 * (protected by @lock)
321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
323 * (protected by @proc->inner_lock if @proc
325 * @local_weak_refs: weak user refs from local process
326 * (protected by @proc->inner_lock if @proc
328 * @local_strong_refs: strong user refs from local process
329 * (protected by @proc->inner_lock if @proc
331 * @tmp_refs: temporary kernel refs
332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
342 * (protected by @proc->inner_lock if @proc
344 * @pending_strong_ref: userspace has acked notification of strong ref
345 * (protected by @proc->inner_lock if @proc
347 * @has_weak_ref: userspace notified of weak ref
348 * (protected by @proc->inner_lock if @proc
350 * @pending_weak_ref: userspace has acked notification of weak ref
351 * (protected by @proc->inner_lock if @proc
353 * @has_async_transaction: async transaction to node in progress
354 * (protected by @lock)
355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @inherit_rt: inherit RT scheduling policy from caller
362 * (invariant after initialized)
363 * @async_todo: list of async work items
364 * (protected by @proc->inner_lock)
366 * Bookkeeping structure for binder nodes.
371 struct binder_work work
;
373 struct rb_node rb_node
;
374 struct hlist_node dead_node
;
376 struct binder_proc
*proc
;
377 struct hlist_head refs
;
378 int internal_strong_refs
;
380 int local_strong_refs
;
382 binder_uintptr_t ptr
;
383 binder_uintptr_t cookie
;
386 * bitfield elements protected by
390 u8 pending_strong_ref
:1;
392 u8 pending_weak_ref
:1;
396 * invariant after initialization
403 bool has_async_transaction
;
404 struct list_head async_todo
;
407 struct binder_ref_death
{
409 * @work: worklist element for death notifications
410 * (protected by inner_lock of the proc that
411 * this ref belongs to)
413 struct binder_work work
;
414 binder_uintptr_t cookie
;
418 * struct binder_ref_data - binder_ref counts and id
419 * @debug_id: unique ID for the ref
420 * @desc: unique userspace handle for ref
421 * @strong: strong ref count (debugging only if not locked)
422 * @weak: weak ref count (debugging only if not locked)
424 * Structure to hold ref count and ref id information. Since
425 * the actual ref can only be accessed with a lock, this structure
426 * is used to return information about the ref to callers of
427 * ref inc/dec functions.
429 struct binder_ref_data
{
437 * struct binder_ref - struct to track references on nodes
438 * @data: binder_ref_data containing id, handle, and current refcounts
439 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
440 * @rb_node_node: node for lookup by @node in proc's rb_tree
441 * @node_entry: list entry for node->refs list in target node
442 * (protected by @node->lock)
443 * @proc: binder_proc containing ref
444 * @node: binder_node of target node. When cleaning up a
445 * ref for deletion in binder_cleanup_ref, a non-NULL
446 * @node indicates the node must be freed
447 * @death: pointer to death notification (ref_death) if requested
448 * (protected by @node->lock)
450 * Structure to track references from procA to target node (on procB). This
451 * structure is unsafe to access without holding @proc->outer_lock.
454 /* Lookups needed: */
455 /* node + proc => ref (transaction) */
456 /* desc + proc => ref (transaction, inc/dec ref) */
457 /* node => refs + procs (proc exit) */
458 struct binder_ref_data data
;
459 struct rb_node rb_node_desc
;
460 struct rb_node rb_node_node
;
461 struct hlist_node node_entry
;
462 struct binder_proc
*proc
;
463 struct binder_node
*node
;
464 struct binder_ref_death
*death
;
467 enum binder_deferred_state
{
468 BINDER_DEFERRED_PUT_FILES
= 0x01,
469 BINDER_DEFERRED_FLUSH
= 0x02,
470 BINDER_DEFERRED_RELEASE
= 0x04,
474 * struct binder_priority - scheduler policy and priority
475 * @sched_policy scheduler policy
476 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
478 * The binder driver supports inheriting the following scheduler policies:
484 struct binder_priority
{
485 unsigned int sched_policy
;
490 * struct binder_proc - binder process bookkeeping
491 * @proc_node: element for binder_procs list
492 * @threads: rbtree of binder_threads in this proc
493 * (protected by @inner_lock)
494 * @nodes: rbtree of binder nodes associated with
495 * this proc ordered by node->ptr
496 * (protected by @inner_lock)
497 * @refs_by_desc: rbtree of refs ordered by ref->desc
498 * (protected by @outer_lock)
499 * @refs_by_node: rbtree of refs ordered by ref->node
500 * (protected by @outer_lock)
501 * @waiting_threads: threads currently waiting for proc work
502 * (protected by @inner_lock)
503 * @pid PID of group_leader of process
504 * (invariant after initialized)
505 * @tsk task_struct for group_leader of process
506 * (invariant after initialized)
507 * @files files_struct for process
508 * (invariant after initialized)
509 * @deferred_work_node: element for binder_deferred_list
510 * (protected by binder_deferred_lock)
511 * @deferred_work: bitmap of deferred work to perform
512 * (protected by binder_deferred_lock)
513 * @is_dead: process is dead and awaiting free
514 * when outstanding transactions are cleaned up
515 * (protected by @inner_lock)
516 * @todo: list of work for this process
517 * (protected by @inner_lock)
518 * @wait: wait queue head to wait for proc work
519 * (invariant after initialized)
520 * @stats: per-process binder statistics
521 * (atomics, no lock needed)
522 * @delivered_death: list of delivered death notification
523 * (protected by @inner_lock)
524 * @max_threads: cap on number of binder threads
525 * (protected by @inner_lock)
526 * @requested_threads: number of binder threads requested but not
527 * yet started. In current implementation, can
529 * (protected by @inner_lock)
530 * @requested_threads_started: number binder threads started
531 * (protected by @inner_lock)
532 * @tmp_ref: temporary reference to indicate proc is in use
533 * (protected by @inner_lock)
534 * @default_priority: default scheduler priority
535 * (invariant after initialized)
536 * @debugfs_entry: debugfs node
537 * @alloc: binder allocator bookkeeping
538 * @context: binder_context for this proc
539 * (invariant after initialized)
540 * @inner_lock: can nest under outer_lock and/or node lock
541 * @outer_lock: no nesting under innor or node lock
542 * Lock order: 1) outer, 2) node, 3) inner
544 * Bookkeeping structure for binder processes
547 struct hlist_node proc_node
;
548 struct rb_root threads
;
549 struct rb_root nodes
;
550 struct rb_root refs_by_desc
;
551 struct rb_root refs_by_node
;
552 struct list_head waiting_threads
;
554 struct task_struct
*tsk
;
555 struct files_struct
*files
;
556 struct hlist_node deferred_work_node
;
560 struct list_head todo
;
561 wait_queue_head_t wait
;
562 struct binder_stats stats
;
563 struct list_head delivered_death
;
565 int requested_threads
;
566 int requested_threads_started
;
568 struct binder_priority default_priority
;
569 struct dentry
*debugfs_entry
;
570 struct binder_alloc alloc
;
571 struct binder_context
*context
;
572 spinlock_t inner_lock
;
573 spinlock_t outer_lock
;
577 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
578 BINDER_LOOPER_STATE_ENTERED
= 0x02,
579 BINDER_LOOPER_STATE_EXITED
= 0x04,
580 BINDER_LOOPER_STATE_INVALID
= 0x08,
581 BINDER_LOOPER_STATE_WAITING
= 0x10,
582 BINDER_LOOPER_STATE_POLL
= 0x20,
586 * struct binder_thread - binder thread bookkeeping
587 * @proc: binder process for this thread
588 * (invariant after initialization)
589 * @rb_node: element for proc->threads rbtree
590 * (protected by @proc->inner_lock)
591 * @waiting_thread_node: element for @proc->waiting_threads list
592 * (protected by @proc->inner_lock)
593 * @pid: PID for this thread
594 * (invariant after initialization)
595 * @looper: bitmap of looping state
596 * (only accessed by this thread)
597 * @looper_needs_return: looping thread needs to exit driver
599 * @transaction_stack: stack of in-progress transactions for this thread
600 * (protected by @proc->inner_lock)
601 * @todo: list of work to do for this thread
602 * (protected by @proc->inner_lock)
603 * @return_error: transaction errors reported by this thread
604 * (only accessed by this thread)
605 * @reply_error: transaction errors reported by target thread
606 * (protected by @proc->inner_lock)
607 * @wait: wait queue for thread work
608 * @stats: per-thread statistics
609 * (atomics, no lock needed)
610 * @tmp_ref: temporary reference to indicate thread is in use
611 * (atomic since @proc->inner_lock cannot
612 * always be acquired)
613 * @is_dead: thread is dead and awaiting free
614 * when outstanding transactions are cleaned up
615 * (protected by @proc->inner_lock)
616 * @task: struct task_struct for this thread
618 * Bookkeeping structure for binder threads.
620 struct binder_thread
{
621 struct binder_proc
*proc
;
622 struct rb_node rb_node
;
623 struct list_head waiting_thread_node
;
625 int looper
; /* only modified by this thread */
626 bool looper_need_return
; /* can be written by other thread */
627 struct binder_transaction
*transaction_stack
;
628 struct list_head todo
;
629 struct binder_error return_error
;
630 struct binder_error reply_error
;
631 wait_queue_head_t wait
;
632 struct binder_stats stats
;
635 struct task_struct
*task
;
638 struct binder_transaction
{
640 struct binder_work work
;
641 struct binder_thread
*from
;
642 struct binder_transaction
*from_parent
;
643 struct binder_proc
*to_proc
;
644 struct binder_thread
*to_thread
;
645 struct binder_transaction
*to_parent
;
646 unsigned need_reply
:1;
647 /* unsigned is_dead:1; */ /* not used at the moment */
649 struct binder_buffer
*buffer
;
652 struct binder_priority priority
;
653 struct binder_priority saved_priority
;
654 bool set_priority_called
;
657 * @lock: protects @from, @to_proc, and @to_thread
659 * @from, @to_proc, and @to_thread can be set to NULL
660 * during thread teardown
666 * binder_proc_lock() - Acquire outer lock for given binder_proc
667 * @proc: struct binder_proc to acquire
669 * Acquires proc->outer_lock. Used to protect binder_ref
670 * structures associated with the given proc.
672 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
674 _binder_proc_lock(struct binder_proc
*proc
, int line
)
676 binder_debug(BINDER_DEBUG_SPINLOCKS
,
677 "%s: line=%d\n", __func__
, line
);
678 spin_lock(&proc
->outer_lock
);
682 * binder_proc_unlock() - Release spinlock for given binder_proc
683 * @proc: struct binder_proc to acquire
685 * Release lock acquired via binder_proc_lock()
687 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
689 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
691 binder_debug(BINDER_DEBUG_SPINLOCKS
,
692 "%s: line=%d\n", __func__
, line
);
693 spin_unlock(&proc
->outer_lock
);
697 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
698 * @proc: struct binder_proc to acquire
700 * Acquires proc->inner_lock. Used to protect todo lists
702 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
704 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
706 binder_debug(BINDER_DEBUG_SPINLOCKS
,
707 "%s: line=%d\n", __func__
, line
);
708 spin_lock(&proc
->inner_lock
);
712 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
713 * @proc: struct binder_proc to acquire
715 * Release lock acquired via binder_inner_proc_lock()
717 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
719 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
721 binder_debug(BINDER_DEBUG_SPINLOCKS
,
722 "%s: line=%d\n", __func__
, line
);
723 spin_unlock(&proc
->inner_lock
);
727 * binder_node_lock() - Acquire spinlock for given binder_node
728 * @node: struct binder_node to acquire
730 * Acquires node->lock. Used to protect binder_node fields
732 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
734 _binder_node_lock(struct binder_node
*node
, int line
)
736 binder_debug(BINDER_DEBUG_SPINLOCKS
,
737 "%s: line=%d\n", __func__
, line
);
738 spin_lock(&node
->lock
);
742 * binder_node_unlock() - Release spinlock for given binder_proc
743 * @node: struct binder_node to acquire
745 * Release lock acquired via binder_node_lock()
747 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
749 _binder_node_unlock(struct binder_node
*node
, int line
)
751 binder_debug(BINDER_DEBUG_SPINLOCKS
,
752 "%s: line=%d\n", __func__
, line
);
753 spin_unlock(&node
->lock
);
757 * binder_node_inner_lock() - Acquire node and inner locks
758 * @node: struct binder_node to acquire
760 * Acquires node->lock. If node->proc also acquires
761 * proc->inner_lock. Used to protect binder_node fields
763 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
765 _binder_node_inner_lock(struct binder_node
*node
, int line
)
767 binder_debug(BINDER_DEBUG_SPINLOCKS
,
768 "%s: line=%d\n", __func__
, line
);
769 spin_lock(&node
->lock
);
771 binder_inner_proc_lock(node
->proc
);
775 * binder_node_unlock() - Release node and inner locks
776 * @node: struct binder_node to acquire
778 * Release lock acquired via binder_node_lock()
780 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
782 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
784 struct binder_proc
*proc
= node
->proc
;
786 binder_debug(BINDER_DEBUG_SPINLOCKS
,
787 "%s: line=%d\n", __func__
, line
);
789 binder_inner_proc_unlock(proc
);
790 spin_unlock(&node
->lock
);
793 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
795 return list_empty(list
);
799 * binder_worklist_empty() - Check if no items on the work list
800 * @proc: binder_proc associated with list
801 * @list: list to check
803 * Return: true if there are no items on list, else false
805 static bool binder_worklist_empty(struct binder_proc
*proc
,
806 struct list_head
*list
)
810 binder_inner_proc_lock(proc
);
811 ret
= binder_worklist_empty_ilocked(list
);
812 binder_inner_proc_unlock(proc
);
817 binder_enqueue_work_ilocked(struct binder_work
*work
,
818 struct list_head
*target_list
)
820 BUG_ON(target_list
== NULL
);
821 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
822 list_add_tail(&work
->entry
, target_list
);
826 * binder_enqueue_work() - Add an item to the work list
827 * @proc: binder_proc associated with list
828 * @work: struct binder_work to add to list
829 * @target_list: list to add work to
831 * Adds the work to the specified list. Asserts that work
832 * is not already on a list.
835 binder_enqueue_work(struct binder_proc
*proc
,
836 struct binder_work
*work
,
837 struct list_head
*target_list
)
839 binder_inner_proc_lock(proc
);
840 binder_enqueue_work_ilocked(work
, target_list
);
841 binder_inner_proc_unlock(proc
);
845 binder_dequeue_work_ilocked(struct binder_work
*work
)
847 list_del_init(&work
->entry
);
851 * binder_dequeue_work() - Removes an item from the work list
852 * @proc: binder_proc associated with list
853 * @work: struct binder_work to remove from list
855 * Removes the specified work item from whatever list it is on.
856 * Can safely be called if work is not on any list.
859 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
861 binder_inner_proc_lock(proc
);
862 binder_dequeue_work_ilocked(work
);
863 binder_inner_proc_unlock(proc
);
866 static struct binder_work
*binder_dequeue_work_head_ilocked(
867 struct list_head
*list
)
869 struct binder_work
*w
;
871 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
873 list_del_init(&w
->entry
);
878 * binder_dequeue_work_head() - Dequeues the item at head of list
879 * @proc: binder_proc associated with list
880 * @list: list to dequeue head
882 * Removes the head of the list if there are items on the list
884 * Return: pointer dequeued binder_work, NULL if list was empty
886 static struct binder_work
*binder_dequeue_work_head(
887 struct binder_proc
*proc
,
888 struct list_head
*list
)
890 struct binder_work
*w
;
892 binder_inner_proc_lock(proc
);
893 w
= binder_dequeue_work_head_ilocked(list
);
894 binder_inner_proc_unlock(proc
);
899 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
900 static void binder_free_thread(struct binder_thread
*thread
);
901 static void binder_free_proc(struct binder_proc
*proc
);
902 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
904 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
906 struct files_struct
*files
= proc
->files
;
907 unsigned long rlim_cur
;
913 if (!lock_task_sighand(proc
->tsk
, &irqs
))
916 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
917 unlock_task_sighand(proc
->tsk
, &irqs
);
919 return __alloc_fd(files
, 0, rlim_cur
, flags
);
923 * copied from fd_install
925 static void task_fd_install(
926 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
929 __fd_install(proc
->files
, fd
, file
);
933 * copied from sys_close
935 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
939 if (proc
->files
== NULL
)
942 retval
= __close_fd(proc
->files
, fd
);
943 /* can't restart close syscall because file table entry was cleared */
944 if (unlikely(retval
== -ERESTARTSYS
||
945 retval
== -ERESTARTNOINTR
||
946 retval
== -ERESTARTNOHAND
||
947 retval
== -ERESTART_RESTARTBLOCK
))
953 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
956 return !binder_worklist_empty_ilocked(&thread
->todo
) ||
957 thread
->looper_need_return
||
959 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
962 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
966 binder_inner_proc_lock(thread
->proc
);
967 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
968 binder_inner_proc_unlock(thread
->proc
);
973 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
975 return !thread
->transaction_stack
&&
976 binder_worklist_empty_ilocked(&thread
->todo
) &&
977 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
978 BINDER_LOOPER_STATE_REGISTERED
));
981 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
985 struct binder_thread
*thread
;
987 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
988 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
989 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
990 binder_available_for_proc_work_ilocked(thread
)) {
992 wake_up_interruptible_sync(&thread
->wait
);
994 wake_up_interruptible(&thread
->wait
);
1000 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1001 * @proc: process to select a thread from
1003 * Note that calling this function moves the thread off the waiting_threads
1004 * list, so it can only be woken up by the caller of this function, or a
1005 * signal. Therefore, callers *should* always wake up the thread this function
1008 * Return: If there's a thread currently waiting for process work,
1009 * returns that thread. Otherwise returns NULL.
1011 static struct binder_thread
*
1012 binder_select_thread_ilocked(struct binder_proc
*proc
)
1014 struct binder_thread
*thread
;
1016 assert_spin_locked(&proc
->inner_lock
);
1017 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1018 struct binder_thread
,
1019 waiting_thread_node
);
1022 list_del_init(&thread
->waiting_thread_node
);
1028 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1029 * @proc: process to wake up a thread in
1030 * @thread: specific thread to wake-up (may be NULL)
1031 * @sync: whether to do a synchronous wake-up
1033 * This function wakes up a thread in the @proc process.
1034 * The caller may provide a specific thread to wake-up in
1035 * the @thread parameter. If @thread is NULL, this function
1036 * will wake up threads that have called poll().
1038 * Note that for this function to work as expected, callers
1039 * should first call binder_select_thread() to find a thread
1040 * to handle the work (if they don't have a thread already),
1041 * and pass the result into the @thread parameter.
1043 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1044 struct binder_thread
*thread
,
1047 assert_spin_locked(&proc
->inner_lock
);
1051 wake_up_interruptible_sync(&thread
->wait
);
1053 wake_up_interruptible(&thread
->wait
);
1057 /* Didn't find a thread waiting for proc work; this can happen
1059 * 1. All threads are busy handling transactions
1060 * In that case, one of those threads should call back into
1061 * the kernel driver soon and pick up this work.
1062 * 2. Threads are using the (e)poll interface, in which case
1063 * they may be blocked on the waitqueue without having been
1064 * added to waiting_threads. For this case, we just iterate
1065 * over all threads not handling transaction work, and
1066 * wake them all up. We wake all because we don't know whether
1067 * a thread that called into (e)poll is handling non-binder
1070 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1073 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1075 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1077 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1080 static bool is_rt_policy(int policy
)
1082 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1085 static bool is_fair_policy(int policy
)
1087 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1090 static bool binder_supported_policy(int policy
)
1092 return is_fair_policy(policy
) || is_rt_policy(policy
);
1095 static int to_userspace_prio(int policy
, int kernel_priority
)
1097 if (is_fair_policy(policy
))
1098 return PRIO_TO_NICE(kernel_priority
);
1100 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1103 static int to_kernel_prio(int policy
, int user_priority
)
1105 if (is_fair_policy(policy
))
1106 return NICE_TO_PRIO(user_priority
);
1108 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1111 static void binder_do_set_priority(struct task_struct
*task
,
1112 struct binder_priority desired
,
1115 int priority
; /* user-space prio value */
1117 unsigned int policy
= desired
.sched_policy
;
1119 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1122 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1124 priority
= to_userspace_prio(policy
, desired
.prio
);
1126 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1127 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1129 if (max_rtprio
== 0) {
1130 policy
= SCHED_NORMAL
;
1131 priority
= MIN_NICE
;
1132 } else if (priority
> max_rtprio
) {
1133 priority
= max_rtprio
;
1137 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1138 long min_nice
= rlimit_to_nice(task_rlimit(task
, RLIMIT_NICE
));
1140 if (min_nice
> MAX_NICE
) {
1141 binder_user_error("%d RLIMIT_NICE not set\n",
1144 } else if (priority
< min_nice
) {
1145 priority
= min_nice
;
1149 if (policy
!= desired
.sched_policy
||
1150 to_kernel_prio(policy
, priority
) != desired
.prio
)
1151 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1152 "%d: priority %d not allowed, using %d instead\n",
1153 task
->pid
, desired
.prio
,
1154 to_kernel_prio(policy
, priority
));
1156 trace_binder_set_priority(task
->tgid
, task
->pid
, task
->normal_prio
,
1157 to_kernel_prio(policy
, priority
),
1160 /* Set the actual priority */
1161 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1162 struct sched_param params
;
1164 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1166 sched_setscheduler_nocheck(task
,
1167 policy
| SCHED_RESET_ON_FORK
,
1170 if (is_fair_policy(policy
))
1171 set_user_nice(task
, priority
);
1174 static void binder_set_priority(struct task_struct
*task
,
1175 struct binder_priority desired
)
1177 binder_do_set_priority(task
, desired
, /* verify = */ true);
1180 static void binder_restore_priority(struct task_struct
*task
,
1181 struct binder_priority desired
)
1183 binder_do_set_priority(task
, desired
, /* verify = */ false);
1186 static void binder_transaction_priority(struct task_struct
*task
,
1187 struct binder_transaction
*t
,
1188 struct binder_priority node_prio
,
1191 struct binder_priority desired_prio
;
1193 if (t
->set_priority_called
)
1196 t
->set_priority_called
= true;
1197 t
->saved_priority
.sched_policy
= task
->policy
;
1198 t
->saved_priority
.prio
= task
->normal_prio
;
1200 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1201 desired_prio
.prio
= NICE_TO_PRIO(0);
1202 desired_prio
.sched_policy
= SCHED_NORMAL
;
1204 desired_prio
.prio
= t
->priority
.prio
;
1205 desired_prio
.sched_policy
= t
->priority
.sched_policy
;
1208 if (node_prio
.prio
< t
->priority
.prio
||
1209 (node_prio
.prio
== t
->priority
.prio
&&
1210 node_prio
.sched_policy
== SCHED_FIFO
)) {
1212 * In case the minimum priority on the node is
1213 * higher (lower value), use that priority. If
1214 * the priority is the same, but the node uses
1215 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1216 * run unbounded, unlike SCHED_RR.
1218 desired_prio
= node_prio
;
1221 binder_set_priority(task
, desired_prio
);
1224 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1225 binder_uintptr_t ptr
)
1227 struct rb_node
*n
= proc
->nodes
.rb_node
;
1228 struct binder_node
*node
;
1230 assert_spin_locked(&proc
->inner_lock
);
1233 node
= rb_entry(n
, struct binder_node
, rb_node
);
1235 if (ptr
< node
->ptr
)
1237 else if (ptr
> node
->ptr
)
1241 * take an implicit weak reference
1242 * to ensure node stays alive until
1243 * call to binder_put_node()
1245 binder_inc_node_tmpref_ilocked(node
);
1252 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1253 binder_uintptr_t ptr
)
1255 struct binder_node
*node
;
1257 binder_inner_proc_lock(proc
);
1258 node
= binder_get_node_ilocked(proc
, ptr
);
1259 binder_inner_proc_unlock(proc
);
1263 static struct binder_node
*binder_init_node_ilocked(
1264 struct binder_proc
*proc
,
1265 struct binder_node
*new_node
,
1266 struct flat_binder_object
*fp
)
1268 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1269 struct rb_node
*parent
= NULL
;
1270 struct binder_node
*node
;
1271 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1272 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1273 __u32 flags
= fp
? fp
->flags
: 0;
1276 assert_spin_locked(&proc
->inner_lock
);
1281 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1283 if (ptr
< node
->ptr
)
1285 else if (ptr
> node
->ptr
)
1286 p
= &(*p
)->rb_right
;
1289 * A matching node is already in
1290 * the rb tree. Abandon the init
1293 binder_inc_node_tmpref_ilocked(node
);
1298 binder_stats_created(BINDER_STAT_NODE
);
1300 rb_link_node(&node
->rb_node
, parent
, p
);
1301 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1302 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1305 node
->cookie
= cookie
;
1306 node
->work
.type
= BINDER_WORK_NODE
;
1307 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1308 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
) >>
1309 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1310 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1311 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1312 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1313 spin_lock_init(&node
->lock
);
1314 INIT_LIST_HEAD(&node
->work
.entry
);
1315 INIT_LIST_HEAD(&node
->async_todo
);
1316 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1317 "%d:%d node %d u%016llx c%016llx created\n",
1318 proc
->pid
, current
->pid
, node
->debug_id
,
1319 (u64
)node
->ptr
, (u64
)node
->cookie
);
1324 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1325 struct flat_binder_object
*fp
)
1327 struct binder_node
*node
;
1328 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1332 binder_inner_proc_lock(proc
);
1333 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1334 binder_inner_proc_unlock(proc
);
1335 if (node
!= new_node
)
1337 * The node was already added by another thread
1344 static void binder_free_node(struct binder_node
*node
)
1347 binder_stats_deleted(BINDER_STAT_NODE
);
1350 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1352 struct list_head
*target_list
)
1354 struct binder_proc
*proc
= node
->proc
;
1356 assert_spin_locked(&node
->lock
);
1358 assert_spin_locked(&proc
->inner_lock
);
1361 if (target_list
== NULL
&&
1362 node
->internal_strong_refs
== 0 &&
1364 node
== node
->proc
->context
->binder_context_mgr_node
&&
1365 node
->has_strong_ref
)) {
1366 pr_err("invalid inc strong node for %d\n",
1370 node
->internal_strong_refs
++;
1372 node
->local_strong_refs
++;
1373 if (!node
->has_strong_ref
&& target_list
) {
1374 binder_dequeue_work_ilocked(&node
->work
);
1375 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1379 node
->local_weak_refs
++;
1380 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1381 if (target_list
== NULL
) {
1382 pr_err("invalid inc weak node for %d\n",
1386 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1392 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1393 struct list_head
*target_list
)
1397 binder_node_inner_lock(node
);
1398 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1399 binder_node_inner_unlock(node
);
1404 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1405 int strong
, int internal
)
1407 struct binder_proc
*proc
= node
->proc
;
1409 assert_spin_locked(&node
->lock
);
1411 assert_spin_locked(&proc
->inner_lock
);
1414 node
->internal_strong_refs
--;
1416 node
->local_strong_refs
--;
1417 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1421 node
->local_weak_refs
--;
1422 if (node
->local_weak_refs
|| node
->tmp_refs
||
1423 !hlist_empty(&node
->refs
))
1427 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1428 if (list_empty(&node
->work
.entry
)) {
1429 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1430 binder_wakeup_proc_ilocked(proc
);
1433 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1434 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1436 binder_dequeue_work_ilocked(&node
->work
);
1437 rb_erase(&node
->rb_node
, &proc
->nodes
);
1438 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1439 "refless node %d deleted\n",
1442 BUG_ON(!list_empty(&node
->work
.entry
));
1443 spin_lock(&binder_dead_nodes_lock
);
1445 * tmp_refs could have changed so
1448 if (node
->tmp_refs
) {
1449 spin_unlock(&binder_dead_nodes_lock
);
1452 hlist_del(&node
->dead_node
);
1453 spin_unlock(&binder_dead_nodes_lock
);
1454 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1455 "dead node %d deleted\n",
1464 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1468 binder_node_inner_lock(node
);
1469 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1470 binder_node_inner_unlock(node
);
1472 binder_free_node(node
);
1475 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1478 * No call to binder_inc_node() is needed since we
1479 * don't need to inform userspace of any changes to
1486 * binder_inc_node_tmpref() - take a temporary reference on node
1487 * @node: node to reference
1489 * Take reference on node to prevent the node from being freed
1490 * while referenced only by a local variable. The inner lock is
1491 * needed to serialize with the node work on the queue (which
1492 * isn't needed after the node is dead). If the node is dead
1493 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1494 * node->tmp_refs against dead-node-only cases where the node
1495 * lock cannot be acquired (eg traversing the dead node list to
1498 static void binder_inc_node_tmpref(struct binder_node
*node
)
1500 binder_node_lock(node
);
1502 binder_inner_proc_lock(node
->proc
);
1504 spin_lock(&binder_dead_nodes_lock
);
1505 binder_inc_node_tmpref_ilocked(node
);
1507 binder_inner_proc_unlock(node
->proc
);
1509 spin_unlock(&binder_dead_nodes_lock
);
1510 binder_node_unlock(node
);
1514 * binder_dec_node_tmpref() - remove a temporary reference on node
1515 * @node: node to reference
1517 * Release temporary reference on node taken via binder_inc_node_tmpref()
1519 static void binder_dec_node_tmpref(struct binder_node
*node
)
1523 binder_node_inner_lock(node
);
1525 spin_lock(&binder_dead_nodes_lock
);
1527 BUG_ON(node
->tmp_refs
< 0);
1529 spin_unlock(&binder_dead_nodes_lock
);
1531 * Call binder_dec_node() to check if all refcounts are 0
1532 * and cleanup is needed. Calling with strong=0 and internal=1
1533 * causes no actual reference to be released in binder_dec_node().
1534 * If that changes, a change is needed here too.
1536 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1537 binder_node_inner_unlock(node
);
1539 binder_free_node(node
);
1542 static void binder_put_node(struct binder_node
*node
)
1544 binder_dec_node_tmpref(node
);
1547 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1548 u32 desc
, bool need_strong_ref
)
1550 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1551 struct binder_ref
*ref
;
1554 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1556 if (desc
< ref
->data
.desc
) {
1558 } else if (desc
> ref
->data
.desc
) {
1560 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1561 binder_user_error("tried to use weak ref as strong ref\n");
1571 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1572 * @proc: binder_proc that owns the ref
1573 * @node: binder_node of target
1574 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1576 * Look up the ref for the given node and return it if it exists
1578 * If it doesn't exist and the caller provides a newly allocated
1579 * ref, initialize the fields of the newly allocated ref and insert
1580 * into the given proc rb_trees and node refs list.
1582 * Return: the ref for node. It is possible that another thread
1583 * allocated/initialized the ref first in which case the
1584 * returned ref would be different than the passed-in
1585 * new_ref. new_ref must be kfree'd by the caller in
1588 static struct binder_ref
*binder_get_ref_for_node_olocked(
1589 struct binder_proc
*proc
,
1590 struct binder_node
*node
,
1591 struct binder_ref
*new_ref
)
1593 struct binder_context
*context
= proc
->context
;
1594 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1595 struct rb_node
*parent
= NULL
;
1596 struct binder_ref
*ref
;
1601 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1603 if (node
< ref
->node
)
1605 else if (node
> ref
->node
)
1606 p
= &(*p
)->rb_right
;
1613 binder_stats_created(BINDER_STAT_REF
);
1614 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1615 new_ref
->proc
= proc
;
1616 new_ref
->node
= node
;
1617 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1618 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1620 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1621 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1622 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1623 if (ref
->data
.desc
> new_ref
->data
.desc
)
1625 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1628 p
= &proc
->refs_by_desc
.rb_node
;
1631 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1633 if (new_ref
->data
.desc
< ref
->data
.desc
)
1635 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1636 p
= &(*p
)->rb_right
;
1640 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1641 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1643 binder_node_lock(node
);
1644 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1646 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1647 "%d new ref %d desc %d for node %d\n",
1648 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1650 binder_node_unlock(node
);
1654 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1656 bool delete_node
= false;
1658 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1659 "%d delete ref %d desc %d for node %d\n",
1660 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1661 ref
->node
->debug_id
);
1663 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1664 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1666 binder_node_inner_lock(ref
->node
);
1667 if (ref
->data
.strong
)
1668 binder_dec_node_nilocked(ref
->node
, 1, 1);
1670 hlist_del(&ref
->node_entry
);
1671 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1672 binder_node_inner_unlock(ref
->node
);
1674 * Clear ref->node unless we want the caller to free the node
1678 * The caller uses ref->node to determine
1679 * whether the node needs to be freed. Clear
1680 * it since the node is still alive.
1686 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1687 "%d delete ref %d desc %d has death notification\n",
1688 ref
->proc
->pid
, ref
->data
.debug_id
,
1690 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1691 binder_stats_deleted(BINDER_STAT_DEATH
);
1693 binder_stats_deleted(BINDER_STAT_REF
);
1697 * binder_inc_ref_olocked() - increment the ref for given handle
1698 * @ref: ref to be incremented
1699 * @strong: if true, strong increment, else weak
1700 * @target_list: list to queue node work on
1702 * Increment the ref. @ref->proc->outer_lock must be held on entry
1704 * Return: 0, if successful, else errno
1706 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1707 struct list_head
*target_list
)
1712 if (ref
->data
.strong
== 0) {
1713 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1719 if (ref
->data
.weak
== 0) {
1720 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1730 * binder_dec_ref() - dec the ref for given handle
1731 * @ref: ref to be decremented
1732 * @strong: if true, strong decrement, else weak
1734 * Decrement the ref.
1736 * Return: true if ref is cleaned up and ready to be freed
1738 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1741 if (ref
->data
.strong
== 0) {
1742 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1743 ref
->proc
->pid
, ref
->data
.debug_id
,
1744 ref
->data
.desc
, ref
->data
.strong
,
1749 if (ref
->data
.strong
== 0)
1750 binder_dec_node(ref
->node
, strong
, 1);
1752 if (ref
->data
.weak
== 0) {
1753 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1754 ref
->proc
->pid
, ref
->data
.debug_id
,
1755 ref
->data
.desc
, ref
->data
.strong
,
1761 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1762 binder_cleanup_ref_olocked(ref
);
1769 * binder_get_node_from_ref() - get the node from the given proc/desc
1770 * @proc: proc containing the ref
1771 * @desc: the handle associated with the ref
1772 * @need_strong_ref: if true, only return node if ref is strong
1773 * @rdata: the id/refcount data for the ref
1775 * Given a proc and ref handle, return the associated binder_node
1777 * Return: a binder_node or NULL if not found or not strong when strong required
1779 static struct binder_node
*binder_get_node_from_ref(
1780 struct binder_proc
*proc
,
1781 u32 desc
, bool need_strong_ref
,
1782 struct binder_ref_data
*rdata
)
1784 struct binder_node
*node
;
1785 struct binder_ref
*ref
;
1787 binder_proc_lock(proc
);
1788 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1793 * Take an implicit reference on the node to ensure
1794 * it stays alive until the call to binder_put_node()
1796 binder_inc_node_tmpref(node
);
1799 binder_proc_unlock(proc
);
1804 binder_proc_unlock(proc
);
1809 * binder_free_ref() - free the binder_ref
1812 * Free the binder_ref. Free the binder_node indicated by ref->node
1813 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1815 static void binder_free_ref(struct binder_ref
*ref
)
1818 binder_free_node(ref
->node
);
1824 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1825 * @proc: proc containing the ref
1826 * @desc: the handle associated with the ref
1827 * @increment: true=inc reference, false=dec reference
1828 * @strong: true=strong reference, false=weak reference
1829 * @rdata: the id/refcount data for the ref
1831 * Given a proc and ref handle, increment or decrement the ref
1832 * according to "increment" arg.
1834 * Return: 0 if successful, else errno
1836 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1837 uint32_t desc
, bool increment
, bool strong
,
1838 struct binder_ref_data
*rdata
)
1841 struct binder_ref
*ref
;
1842 bool delete_ref
= false;
1844 binder_proc_lock(proc
);
1845 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1851 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1853 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1857 binder_proc_unlock(proc
);
1860 binder_free_ref(ref
);
1864 binder_proc_unlock(proc
);
1869 * binder_dec_ref_for_handle() - dec the ref for given handle
1870 * @proc: proc containing the ref
1871 * @desc: the handle associated with the ref
1872 * @strong: true=strong reference, false=weak reference
1873 * @rdata: the id/refcount data for the ref
1875 * Just calls binder_update_ref_for_handle() to decrement the ref.
1877 * Return: 0 if successful, else errno
1879 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1880 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1882 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1887 * binder_inc_ref_for_node() - increment the ref for given proc/node
1888 * @proc: proc containing the ref
1889 * @node: target node
1890 * @strong: true=strong reference, false=weak reference
1891 * @target_list: worklist to use if node is incremented
1892 * @rdata: the id/refcount data for the ref
1894 * Given a proc and node, increment the ref. Create the ref if it
1895 * doesn't already exist
1897 * Return: 0 if successful, else errno
1899 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1900 struct binder_node
*node
,
1902 struct list_head
*target_list
,
1903 struct binder_ref_data
*rdata
)
1905 struct binder_ref
*ref
;
1906 struct binder_ref
*new_ref
= NULL
;
1909 binder_proc_lock(proc
);
1910 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1912 binder_proc_unlock(proc
);
1913 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1916 binder_proc_lock(proc
);
1917 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1919 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1921 binder_proc_unlock(proc
);
1922 if (new_ref
&& ref
!= new_ref
)
1924 * Another thread created the ref first so
1925 * free the one we allocated
1931 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1932 struct binder_transaction
*t
)
1934 BUG_ON(!target_thread
);
1935 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1936 BUG_ON(target_thread
->transaction_stack
!= t
);
1937 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1938 target_thread
->transaction_stack
=
1939 target_thread
->transaction_stack
->from_parent
;
1944 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1945 * @thread: thread to decrement
1947 * A thread needs to be kept alive while being used to create or
1948 * handle a transaction. binder_get_txn_from() is used to safely
1949 * extract t->from from a binder_transaction and keep the thread
1950 * indicated by t->from from being freed. When done with that
1951 * binder_thread, this function is called to decrement the
1952 * tmp_ref and free if appropriate (thread has been released
1953 * and no transaction being processed by the driver)
1955 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1958 * atomic is used to protect the counter value while
1959 * it cannot reach zero or thread->is_dead is false
1961 binder_inner_proc_lock(thread
->proc
);
1962 atomic_dec(&thread
->tmp_ref
);
1963 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1964 binder_inner_proc_unlock(thread
->proc
);
1965 binder_free_thread(thread
);
1968 binder_inner_proc_unlock(thread
->proc
);
1972 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1973 * @proc: proc to decrement
1975 * A binder_proc needs to be kept alive while being used to create or
1976 * handle a transaction. proc->tmp_ref is incremented when
1977 * creating a new transaction or the binder_proc is currently in-use
1978 * by threads that are being released. When done with the binder_proc,
1979 * this function is called to decrement the counter and free the
1980 * proc if appropriate (proc has been released, all threads have
1981 * been released and not currenly in-use to process a transaction).
1983 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1985 binder_inner_proc_lock(proc
);
1987 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1989 binder_inner_proc_unlock(proc
);
1990 binder_free_proc(proc
);
1993 binder_inner_proc_unlock(proc
);
1997 * binder_get_txn_from() - safely extract the "from" thread in transaction
1998 * @t: binder transaction for t->from
2000 * Atomically return the "from" thread and increment the tmp_ref
2001 * count for the thread to ensure it stays alive until
2002 * binder_thread_dec_tmpref() is called.
2004 * Return: the value of t->from
2006 static struct binder_thread
*binder_get_txn_from(
2007 struct binder_transaction
*t
)
2009 struct binder_thread
*from
;
2011 spin_lock(&t
->lock
);
2014 atomic_inc(&from
->tmp_ref
);
2015 spin_unlock(&t
->lock
);
2020 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2021 * @t: binder transaction for t->from
2023 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2024 * to guarantee that the thread cannot be released while operating on it.
2025 * The caller must call binder_inner_proc_unlock() to release the inner lock
2026 * as well as call binder_dec_thread_txn() to release the reference.
2028 * Return: the value of t->from
2030 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2031 struct binder_transaction
*t
)
2033 struct binder_thread
*from
;
2035 from
= binder_get_txn_from(t
);
2038 binder_inner_proc_lock(from
->proc
);
2040 BUG_ON(from
!= t
->from
);
2043 binder_inner_proc_unlock(from
->proc
);
2044 binder_thread_dec_tmpref(from
);
2048 static void binder_free_transaction(struct binder_transaction
*t
)
2051 t
->buffer
->transaction
= NULL
;
2053 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2056 static void binder_send_failed_reply(struct binder_transaction
*t
,
2057 uint32_t error_code
)
2059 struct binder_thread
*target_thread
;
2060 struct binder_transaction
*next
;
2062 BUG_ON(t
->flags
& TF_ONE_WAY
);
2064 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2065 if (target_thread
) {
2066 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2067 "send failed reply for transaction %d to %d:%d\n",
2069 target_thread
->proc
->pid
,
2070 target_thread
->pid
);
2072 binder_pop_transaction_ilocked(target_thread
, t
);
2073 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2074 target_thread
->reply_error
.cmd
= error_code
;
2075 binder_enqueue_work_ilocked(
2076 &target_thread
->reply_error
.work
,
2077 &target_thread
->todo
);
2078 wake_up_interruptible(&target_thread
->wait
);
2080 WARN(1, "Unexpected reply error: %u\n",
2081 target_thread
->reply_error
.cmd
);
2083 binder_inner_proc_unlock(target_thread
->proc
);
2084 binder_thread_dec_tmpref(target_thread
);
2085 binder_free_transaction(t
);
2088 next
= t
->from_parent
;
2090 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2091 "send failed reply for transaction %d, target dead\n",
2094 binder_free_transaction(t
);
2096 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2097 "reply failed, no target thread at root\n");
2101 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2102 "reply failed, no target thread -- retry %d\n",
2108 * binder_validate_object() - checks for a valid metadata object in a buffer.
2109 * @buffer: binder_buffer that we're parsing.
2110 * @offset: offset in the buffer at which to validate an object.
2112 * Return: If there's a valid metadata object at @offset in @buffer, the
2113 * size of that object. Otherwise, it returns zero.
2115 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2117 /* Check if we can read a header first */
2118 struct binder_object_header
*hdr
;
2119 size_t object_size
= 0;
2121 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
2122 buffer
->data_size
< sizeof(*hdr
) ||
2123 !IS_ALIGNED(offset
, sizeof(u32
)))
2126 /* Ok, now see if we can read a complete object. */
2127 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2128 switch (hdr
->type
) {
2129 case BINDER_TYPE_BINDER
:
2130 case BINDER_TYPE_WEAK_BINDER
:
2131 case BINDER_TYPE_HANDLE
:
2132 case BINDER_TYPE_WEAK_HANDLE
:
2133 object_size
= sizeof(struct flat_binder_object
);
2135 case BINDER_TYPE_FD
:
2136 object_size
= sizeof(struct binder_fd_object
);
2138 case BINDER_TYPE_PTR
:
2139 object_size
= sizeof(struct binder_buffer_object
);
2141 case BINDER_TYPE_FDA
:
2142 object_size
= sizeof(struct binder_fd_array_object
);
2147 if (offset
<= buffer
->data_size
- object_size
&&
2148 buffer
->data_size
>= object_size
)
2155 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2156 * @b: binder_buffer containing the object
2157 * @index: index in offset array at which the binder_buffer_object is
2159 * @start: points to the start of the offset array
2160 * @num_valid: the number of valid offsets in the offset array
2162 * Return: If @index is within the valid range of the offset array
2163 * described by @start and @num_valid, and if there's a valid
2164 * binder_buffer_object at the offset found in index @index
2165 * of the offset array, that object is returned. Otherwise,
2166 * %NULL is returned.
2167 * Note that the offset found in index @index itself is not
2168 * verified; this function assumes that @num_valid elements
2169 * from @start were previously verified to have valid offsets.
2171 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2172 binder_size_t index
,
2173 binder_size_t
*start
,
2174 binder_size_t num_valid
)
2176 struct binder_buffer_object
*buffer_obj
;
2177 binder_size_t
*offp
;
2179 if (index
>= num_valid
)
2182 offp
= start
+ index
;
2183 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2184 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2191 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2192 * @b: transaction buffer
2193 * @objects_start start of objects buffer
2194 * @buffer: binder_buffer_object in which to fix up
2195 * @offset: start offset in @buffer to fix up
2196 * @last_obj: last binder_buffer_object that we fixed up in
2197 * @last_min_offset: minimum fixup offset in @last_obj
2199 * Return: %true if a fixup in buffer @buffer at offset @offset is
2202 * For safety reasons, we only allow fixups inside a buffer to happen
2203 * at increasing offsets; additionally, we only allow fixup on the last
2204 * buffer object that was verified, or one of its parents.
2206 * Example of what is allowed:
2209 * B (parent = A, offset = 0)
2210 * C (parent = A, offset = 16)
2211 * D (parent = C, offset = 0)
2212 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2214 * Examples of what is not allowed:
2216 * Decreasing offsets within the same parent:
2218 * C (parent = A, offset = 16)
2219 * B (parent = A, offset = 0) // decreasing offset within A
2221 * Referring to a parent that wasn't the last object or any of its parents:
2223 * B (parent = A, offset = 0)
2224 * C (parent = A, offset = 0)
2225 * C (parent = A, offset = 16)
2226 * D (parent = B, offset = 0) // B is not A or any of A's parents
2228 static bool binder_validate_fixup(struct binder_buffer
*b
,
2229 binder_size_t
*objects_start
,
2230 struct binder_buffer_object
*buffer
,
2231 binder_size_t fixup_offset
,
2232 struct binder_buffer_object
*last_obj
,
2233 binder_size_t last_min_offset
)
2236 /* Nothing to fix up in */
2240 while (last_obj
!= buffer
) {
2242 * Safe to retrieve the parent of last_obj, since it
2243 * was already previously verified by the driver.
2245 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2247 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2248 last_obj
= (struct binder_buffer_object
*)
2249 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2251 return (fixup_offset
>= last_min_offset
);
2254 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2255 struct binder_buffer
*buffer
,
2256 binder_size_t
*failed_at
)
2258 binder_size_t
*offp
, *off_start
, *off_end
;
2259 int debug_id
= buffer
->debug_id
;
2261 binder_debug(BINDER_DEBUG_TRANSACTION
,
2262 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2263 proc
->pid
, buffer
->debug_id
,
2264 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2266 if (buffer
->target_node
)
2267 binder_dec_node(buffer
->target_node
, 1, 0);
2269 off_start
= (binder_size_t
*)(buffer
->data
+
2270 ALIGN(buffer
->data_size
, sizeof(void *)));
2272 off_end
= failed_at
;
2274 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2275 for (offp
= off_start
; offp
< off_end
; offp
++) {
2276 struct binder_object_header
*hdr
;
2277 size_t object_size
= binder_validate_object(buffer
, *offp
);
2279 if (object_size
== 0) {
2280 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2281 debug_id
, (u64
)*offp
, buffer
->data_size
);
2284 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2285 switch (hdr
->type
) {
2286 case BINDER_TYPE_BINDER
:
2287 case BINDER_TYPE_WEAK_BINDER
: {
2288 struct flat_binder_object
*fp
;
2289 struct binder_node
*node
;
2291 fp
= to_flat_binder_object(hdr
);
2292 node
= binder_get_node(proc
, fp
->binder
);
2294 pr_err("transaction release %d bad node %016llx\n",
2295 debug_id
, (u64
)fp
->binder
);
2298 binder_debug(BINDER_DEBUG_TRANSACTION
,
2299 " node %d u%016llx\n",
2300 node
->debug_id
, (u64
)node
->ptr
);
2301 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2303 binder_put_node(node
);
2305 case BINDER_TYPE_HANDLE
:
2306 case BINDER_TYPE_WEAK_HANDLE
: {
2307 struct flat_binder_object
*fp
;
2308 struct binder_ref_data rdata
;
2311 fp
= to_flat_binder_object(hdr
);
2312 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2313 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2316 pr_err("transaction release %d bad handle %d, ret = %d\n",
2317 debug_id
, fp
->handle
, ret
);
2320 binder_debug(BINDER_DEBUG_TRANSACTION
,
2321 " ref %d desc %d\n",
2322 rdata
.debug_id
, rdata
.desc
);
2325 case BINDER_TYPE_FD
: {
2326 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2328 binder_debug(BINDER_DEBUG_TRANSACTION
,
2329 " fd %d\n", fp
->fd
);
2331 task_close_fd(proc
, fp
->fd
);
2333 case BINDER_TYPE_PTR
:
2335 * Nothing to do here, this will get cleaned up when the
2336 * transaction buffer gets freed
2339 case BINDER_TYPE_FDA
: {
2340 struct binder_fd_array_object
*fda
;
2341 struct binder_buffer_object
*parent
;
2342 uintptr_t parent_buffer
;
2345 binder_size_t fd_buf_size
;
2347 fda
= to_binder_fd_array_object(hdr
);
2348 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2352 pr_err("transaction release %d bad parent offset",
2357 * Since the parent was already fixed up, convert it
2358 * back to kernel address space to access it
2360 parent_buffer
= parent
->buffer
-
2361 binder_alloc_get_user_buffer_offset(
2364 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2365 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2366 pr_err("transaction release %d invalid number of fds (%lld)\n",
2367 debug_id
, (u64
)fda
->num_fds
);
2370 if (fd_buf_size
> parent
->length
||
2371 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2372 /* No space for all file descriptors here. */
2373 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2374 debug_id
, (u64
)fda
->num_fds
);
2377 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2378 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2379 task_close_fd(proc
, fd_array
[fd_index
]);
2382 pr_err("transaction release %d bad object type %x\n",
2383 debug_id
, hdr
->type
);
2389 static int binder_translate_binder(struct flat_binder_object
*fp
,
2390 struct binder_transaction
*t
,
2391 struct binder_thread
*thread
)
2393 struct binder_node
*node
;
2394 struct binder_proc
*proc
= thread
->proc
;
2395 struct binder_proc
*target_proc
= t
->to_proc
;
2396 struct binder_ref_data rdata
;
2399 node
= binder_get_node(proc
, fp
->binder
);
2401 node
= binder_new_node(proc
, fp
);
2405 if (fp
->cookie
!= node
->cookie
) {
2406 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2407 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2408 node
->debug_id
, (u64
)fp
->cookie
,
2413 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2418 ret
= binder_inc_ref_for_node(target_proc
, node
,
2419 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2420 &thread
->todo
, &rdata
);
2424 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2425 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2427 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2429 fp
->handle
= rdata
.desc
;
2432 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2433 binder_debug(BINDER_DEBUG_TRANSACTION
,
2434 " node %d u%016llx -> ref %d desc %d\n",
2435 node
->debug_id
, (u64
)node
->ptr
,
2436 rdata
.debug_id
, rdata
.desc
);
2438 binder_put_node(node
);
2442 static int binder_translate_handle(struct flat_binder_object
*fp
,
2443 struct binder_transaction
*t
,
2444 struct binder_thread
*thread
)
2446 struct binder_proc
*proc
= thread
->proc
;
2447 struct binder_proc
*target_proc
= t
->to_proc
;
2448 struct binder_node
*node
;
2449 struct binder_ref_data src_rdata
;
2452 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2453 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2455 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2456 proc
->pid
, thread
->pid
, fp
->handle
);
2459 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2464 binder_node_lock(node
);
2465 if (node
->proc
== target_proc
) {
2466 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2467 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2469 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2470 fp
->binder
= node
->ptr
;
2471 fp
->cookie
= node
->cookie
;
2473 binder_inner_proc_lock(node
->proc
);
2474 binder_inc_node_nilocked(node
,
2475 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2478 binder_inner_proc_unlock(node
->proc
);
2479 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2480 binder_debug(BINDER_DEBUG_TRANSACTION
,
2481 " ref %d desc %d -> node %d u%016llx\n",
2482 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2484 binder_node_unlock(node
);
2486 struct binder_ref_data dest_rdata
;
2488 binder_node_unlock(node
);
2489 ret
= binder_inc_ref_for_node(target_proc
, node
,
2490 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2496 fp
->handle
= dest_rdata
.desc
;
2498 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2500 binder_debug(BINDER_DEBUG_TRANSACTION
,
2501 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2502 src_rdata
.debug_id
, src_rdata
.desc
,
2503 dest_rdata
.debug_id
, dest_rdata
.desc
,
2507 binder_put_node(node
);
2511 static int binder_translate_fd(int fd
,
2512 struct binder_transaction
*t
,
2513 struct binder_thread
*thread
,
2514 struct binder_transaction
*in_reply_to
)
2516 struct binder_proc
*proc
= thread
->proc
;
2517 struct binder_proc
*target_proc
= t
->to_proc
;
2521 bool target_allows_fd
;
2524 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2526 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2527 if (!target_allows_fd
) {
2528 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2529 proc
->pid
, thread
->pid
,
2530 in_reply_to
? "reply" : "transaction",
2533 goto err_fd_not_accepted
;
2538 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2539 proc
->pid
, thread
->pid
, fd
);
2543 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2549 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2550 if (target_fd
< 0) {
2552 goto err_get_unused_fd
;
2554 task_fd_install(target_proc
, target_fd
, file
);
2555 trace_binder_transaction_fd(t
, fd
, target_fd
);
2556 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2565 err_fd_not_accepted
:
2569 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2570 struct binder_buffer_object
*parent
,
2571 struct binder_transaction
*t
,
2572 struct binder_thread
*thread
,
2573 struct binder_transaction
*in_reply_to
)
2575 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2577 uintptr_t parent_buffer
;
2579 struct binder_proc
*proc
= thread
->proc
;
2580 struct binder_proc
*target_proc
= t
->to_proc
;
2582 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2583 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2584 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2585 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2588 if (fd_buf_size
> parent
->length
||
2589 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2590 /* No space for all file descriptors here. */
2591 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2592 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2596 * Since the parent was already fixed up, convert it
2597 * back to the kernel address space to access it
2599 parent_buffer
= parent
->buffer
-
2600 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2601 fd_array
= (u32
*)(parent_buffer
+ (uintptr_t)fda
->parent_offset
);
2602 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2603 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2604 proc
->pid
, thread
->pid
);
2607 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2608 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2611 goto err_translate_fd_failed
;
2612 fd_array
[fdi
] = target_fd
;
2616 err_translate_fd_failed
:
2618 * Failed to allocate fd or security error, free fds
2621 num_installed_fds
= fdi
;
2622 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2623 task_close_fd(target_proc
, fd_array
[fdi
]);
2627 static int binder_fixup_parent(struct binder_transaction
*t
,
2628 struct binder_thread
*thread
,
2629 struct binder_buffer_object
*bp
,
2630 binder_size_t
*off_start
,
2631 binder_size_t num_valid
,
2632 struct binder_buffer_object
*last_fixup_obj
,
2633 binder_size_t last_fixup_min_off
)
2635 struct binder_buffer_object
*parent
;
2637 struct binder_buffer
*b
= t
->buffer
;
2638 struct binder_proc
*proc
= thread
->proc
;
2639 struct binder_proc
*target_proc
= t
->to_proc
;
2641 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2644 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2646 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2647 proc
->pid
, thread
->pid
);
2651 if (!binder_validate_fixup(b
, off_start
,
2652 parent
, bp
->parent_offset
,
2654 last_fixup_min_off
)) {
2655 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2656 proc
->pid
, thread
->pid
);
2660 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2661 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2662 /* No space for a pointer here! */
2663 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2664 proc
->pid
, thread
->pid
);
2667 parent_buffer
= (u8
*)((uintptr_t)parent
->buffer
-
2668 binder_alloc_get_user_buffer_offset(
2669 &target_proc
->alloc
));
2670 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2676 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2677 * @t: transaction to send
2678 * @proc: process to send the transaction to
2679 * @thread: thread in @proc to send the transaction to (may be NULL)
2681 * This function queues a transaction to the specified process. It will try
2682 * to find a thread in the target process to handle the transaction and
2683 * wake it up. If no thread is found, the work is queued to the proc
2686 * If the @thread parameter is not NULL, the transaction is always queued
2687 * to the waitlist of that specific thread.
2689 * Return: true if the transactions was successfully queued
2690 * false if the target process or thread is dead
2692 static bool binder_proc_transaction(struct binder_transaction
*t
,
2693 struct binder_proc
*proc
,
2694 struct binder_thread
*thread
)
2696 struct list_head
*target_list
= NULL
;
2697 struct binder_node
*node
= t
->buffer
->target_node
;
2698 struct binder_priority node_prio
;
2699 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2703 binder_node_lock(node
);
2704 node_prio
.prio
= node
->min_priority
;
2705 node_prio
.sched_policy
= node
->sched_policy
;
2709 if (node
->has_async_transaction
) {
2710 target_list
= &node
->async_todo
;
2713 node
->has_async_transaction
= 1;
2717 binder_inner_proc_lock(proc
);
2719 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2720 binder_inner_proc_unlock(proc
);
2721 binder_node_unlock(node
);
2725 if (!thread
&& !target_list
)
2726 thread
= binder_select_thread_ilocked(proc
);
2729 target_list
= &thread
->todo
;
2730 binder_transaction_priority(thread
->task
, t
, node_prio
,
2732 } else if (!target_list
) {
2733 target_list
= &proc
->todo
;
2735 BUG_ON(target_list
!= &node
->async_todo
);
2738 binder_enqueue_work_ilocked(&t
->work
, target_list
);
2741 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2743 binder_inner_proc_unlock(proc
);
2744 binder_node_unlock(node
);
2750 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2751 * @node: struct binder_node for which to get refs
2752 * @proc: returns @node->proc if valid
2753 * @error: if no @proc then returns BR_DEAD_REPLY
2755 * User-space normally keeps the node alive when creating a transaction
2756 * since it has a reference to the target. The local strong ref keeps it
2757 * alive if the sending process dies before the target process processes
2758 * the transaction. If the source process is malicious or has a reference
2759 * counting bug, relying on the local strong ref can fail.
2761 * Since user-space can cause the local strong ref to go away, we also take
2762 * a tmpref on the node to ensure it survives while we are constructing
2763 * the transaction. We also need a tmpref on the proc while we are
2764 * constructing the transaction, so we take that here as well.
2766 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2767 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2768 * target proc has died, @error is set to BR_DEAD_REPLY
2770 static struct binder_node
*binder_get_node_refs_for_txn(
2771 struct binder_node
*node
,
2772 struct binder_proc
**procp
,
2775 struct binder_node
*target_node
= NULL
;
2777 binder_node_inner_lock(node
);
2780 binder_inc_node_nilocked(node
, 1, 0, NULL
);
2781 binder_inc_node_tmpref_ilocked(node
);
2782 node
->proc
->tmp_ref
++;
2783 *procp
= node
->proc
;
2785 *error
= BR_DEAD_REPLY
;
2786 binder_node_inner_unlock(node
);
2791 static void binder_transaction(struct binder_proc
*proc
,
2792 struct binder_thread
*thread
,
2793 struct binder_transaction_data
*tr
, int reply
,
2794 binder_size_t extra_buffers_size
)
2797 struct binder_transaction
*t
;
2798 struct binder_work
*tcomplete
;
2799 binder_size_t
*offp
, *off_end
, *off_start
;
2800 binder_size_t off_min
;
2801 u8
*sg_bufp
, *sg_buf_end
;
2802 struct binder_proc
*target_proc
= NULL
;
2803 struct binder_thread
*target_thread
= NULL
;
2804 struct binder_node
*target_node
= NULL
;
2805 struct binder_transaction
*in_reply_to
= NULL
;
2806 struct binder_transaction_log_entry
*e
;
2807 uint32_t return_error
= 0;
2808 uint32_t return_error_param
= 0;
2809 uint32_t return_error_line
= 0;
2810 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2811 binder_size_t last_fixup_min_off
= 0;
2812 struct binder_context
*context
= proc
->context
;
2813 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2815 e
= binder_transaction_log_add(&binder_transaction_log
);
2816 e
->debug_id
= t_debug_id
;
2817 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2818 e
->from_proc
= proc
->pid
;
2819 e
->from_thread
= thread
->pid
;
2820 e
->target_handle
= tr
->target
.handle
;
2821 e
->data_size
= tr
->data_size
;
2822 e
->offsets_size
= tr
->offsets_size
;
2823 e
->context_name
= proc
->context
->name
;
2826 binder_inner_proc_lock(proc
);
2827 in_reply_to
= thread
->transaction_stack
;
2828 if (in_reply_to
== NULL
) {
2829 binder_inner_proc_unlock(proc
);
2830 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2831 proc
->pid
, thread
->pid
);
2832 return_error
= BR_FAILED_REPLY
;
2833 return_error_param
= -EPROTO
;
2834 return_error_line
= __LINE__
;
2835 goto err_empty_call_stack
;
2837 if (in_reply_to
->to_thread
!= thread
) {
2838 spin_lock(&in_reply_to
->lock
);
2839 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2840 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2841 in_reply_to
->to_proc
?
2842 in_reply_to
->to_proc
->pid
: 0,
2843 in_reply_to
->to_thread
?
2844 in_reply_to
->to_thread
->pid
: 0);
2845 spin_unlock(&in_reply_to
->lock
);
2846 binder_inner_proc_unlock(proc
);
2847 return_error
= BR_FAILED_REPLY
;
2848 return_error_param
= -EPROTO
;
2849 return_error_line
= __LINE__
;
2851 goto err_bad_call_stack
;
2853 thread
->transaction_stack
= in_reply_to
->to_parent
;
2854 binder_inner_proc_unlock(proc
);
2855 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2856 if (target_thread
== NULL
) {
2857 return_error
= BR_DEAD_REPLY
;
2858 return_error_line
= __LINE__
;
2859 goto err_dead_binder
;
2861 if (target_thread
->transaction_stack
!= in_reply_to
) {
2862 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2863 proc
->pid
, thread
->pid
,
2864 target_thread
->transaction_stack
?
2865 target_thread
->transaction_stack
->debug_id
: 0,
2866 in_reply_to
->debug_id
);
2867 binder_inner_proc_unlock(target_thread
->proc
);
2868 return_error
= BR_FAILED_REPLY
;
2869 return_error_param
= -EPROTO
;
2870 return_error_line
= __LINE__
;
2872 target_thread
= NULL
;
2873 goto err_dead_binder
;
2875 target_proc
= target_thread
->proc
;
2876 target_proc
->tmp_ref
++;
2877 binder_inner_proc_unlock(target_thread
->proc
);
2879 if (tr
->target
.handle
) {
2880 struct binder_ref
*ref
;
2883 * There must already be a strong ref
2884 * on this node. If so, do a strong
2885 * increment on the node to ensure it
2886 * stays alive until the transaction is
2889 binder_proc_lock(proc
);
2890 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2893 target_node
= binder_get_node_refs_for_txn(
2894 ref
->node
, &target_proc
,
2897 binder_user_error("%d:%d got transaction to invalid handle\n",
2898 proc
->pid
, thread
->pid
);
2899 return_error
= BR_FAILED_REPLY
;
2901 binder_proc_unlock(proc
);
2903 mutex_lock(&context
->context_mgr_node_lock
);
2904 target_node
= context
->binder_context_mgr_node
;
2906 target_node
= binder_get_node_refs_for_txn(
2907 target_node
, &target_proc
,
2910 return_error
= BR_DEAD_REPLY
;
2911 mutex_unlock(&context
->context_mgr_node_lock
);
2915 * return_error is set above
2917 return_error_param
= -EINVAL
;
2918 return_error_line
= __LINE__
;
2919 goto err_dead_binder
;
2921 e
->to_node
= target_node
->debug_id
;
2922 if (security_binder_transaction(proc
->tsk
,
2923 target_proc
->tsk
) < 0) {
2924 return_error
= BR_FAILED_REPLY
;
2925 return_error_param
= -EPERM
;
2926 return_error_line
= __LINE__
;
2927 goto err_invalid_target_handle
;
2929 binder_inner_proc_lock(proc
);
2930 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2931 struct binder_transaction
*tmp
;
2933 tmp
= thread
->transaction_stack
;
2934 if (tmp
->to_thread
!= thread
) {
2935 spin_lock(&tmp
->lock
);
2936 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2937 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2938 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2940 tmp
->to_thread
->pid
: 0);
2941 spin_unlock(&tmp
->lock
);
2942 binder_inner_proc_unlock(proc
);
2943 return_error
= BR_FAILED_REPLY
;
2944 return_error_param
= -EPROTO
;
2945 return_error_line
= __LINE__
;
2946 goto err_bad_call_stack
;
2949 struct binder_thread
*from
;
2951 spin_lock(&tmp
->lock
);
2953 if (from
&& from
->proc
== target_proc
) {
2954 atomic_inc(&from
->tmp_ref
);
2955 target_thread
= from
;
2956 spin_unlock(&tmp
->lock
);
2959 spin_unlock(&tmp
->lock
);
2960 tmp
= tmp
->from_parent
;
2963 binder_inner_proc_unlock(proc
);
2966 e
->to_thread
= target_thread
->pid
;
2967 e
->to_proc
= target_proc
->pid
;
2969 /* TODO: reuse incoming transaction for reply */
2970 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2972 return_error
= BR_FAILED_REPLY
;
2973 return_error_param
= -ENOMEM
;
2974 return_error_line
= __LINE__
;
2975 goto err_alloc_t_failed
;
2977 binder_stats_created(BINDER_STAT_TRANSACTION
);
2978 spin_lock_init(&t
->lock
);
2980 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2981 if (tcomplete
== NULL
) {
2982 return_error
= BR_FAILED_REPLY
;
2983 return_error_param
= -ENOMEM
;
2984 return_error_line
= __LINE__
;
2985 goto err_alloc_tcomplete_failed
;
2987 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2989 t
->debug_id
= t_debug_id
;
2992 binder_debug(BINDER_DEBUG_TRANSACTION
,
2993 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2994 proc
->pid
, thread
->pid
, t
->debug_id
,
2995 target_proc
->pid
, target_thread
->pid
,
2996 (u64
)tr
->data
.ptr
.buffer
,
2997 (u64
)tr
->data
.ptr
.offsets
,
2998 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2999 (u64
)extra_buffers_size
);
3001 binder_debug(BINDER_DEBUG_TRANSACTION
,
3002 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3003 proc
->pid
, thread
->pid
, t
->debug_id
,
3004 target_proc
->pid
, target_node
->debug_id
,
3005 (u64
)tr
->data
.ptr
.buffer
,
3006 (u64
)tr
->data
.ptr
.offsets
,
3007 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3008 (u64
)extra_buffers_size
);
3010 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
3014 t
->sender_euid
= task_euid(proc
->tsk
);
3015 t
->to_proc
= target_proc
;
3016 t
->to_thread
= target_thread
;
3018 t
->flags
= tr
->flags
;
3019 if (!(t
->flags
& TF_ONE_WAY
) &&
3020 binder_supported_policy(current
->policy
)) {
3021 /* Inherit supported policies for synchronous transactions */
3022 t
->priority
.sched_policy
= current
->policy
;
3023 t
->priority
.prio
= current
->normal_prio
;
3025 /* Otherwise, fall back to the default priority */
3026 t
->priority
= target_proc
->default_priority
;
3029 trace_binder_transaction(reply
, t
, target_node
);
3031 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3032 tr
->offsets_size
, extra_buffers_size
,
3033 !reply
&& (t
->flags
& TF_ONE_WAY
));
3034 if (IS_ERR(t
->buffer
)) {
3036 * -ESRCH indicates VMA cleared. The target is dying.
3038 return_error_param
= PTR_ERR(t
->buffer
);
3039 return_error
= return_error_param
== -ESRCH
?
3040 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3041 return_error_line
= __LINE__
;
3043 goto err_binder_alloc_buf_failed
;
3045 t
->buffer
->allow_user_free
= 0;
3046 t
->buffer
->debug_id
= t
->debug_id
;
3047 t
->buffer
->transaction
= t
;
3048 t
->buffer
->target_node
= target_node
;
3049 trace_binder_transaction_alloc_buf(t
->buffer
);
3050 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3051 ALIGN(tr
->data_size
, sizeof(void *)));
3054 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3055 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3056 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3057 proc
->pid
, thread
->pid
);
3058 return_error
= BR_FAILED_REPLY
;
3059 return_error_param
= -EFAULT
;
3060 return_error_line
= __LINE__
;
3061 goto err_copy_data_failed
;
3063 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3064 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3065 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3066 proc
->pid
, thread
->pid
);
3067 return_error
= BR_FAILED_REPLY
;
3068 return_error_param
= -EFAULT
;
3069 return_error_line
= __LINE__
;
3070 goto err_copy_data_failed
;
3072 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3073 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3074 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3075 return_error
= BR_FAILED_REPLY
;
3076 return_error_param
= -EINVAL
;
3077 return_error_line
= __LINE__
;
3078 goto err_bad_offset
;
3080 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3081 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3082 proc
->pid
, thread
->pid
,
3083 (u64
)extra_buffers_size
);
3084 return_error
= BR_FAILED_REPLY
;
3085 return_error_param
= -EINVAL
;
3086 return_error_line
= __LINE__
;
3087 goto err_bad_offset
;
3089 off_end
= (void *)off_start
+ tr
->offsets_size
;
3090 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3091 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3093 for (; offp
< off_end
; offp
++) {
3094 struct binder_object_header
*hdr
;
3095 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3097 if (object_size
== 0 || *offp
< off_min
) {
3098 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3099 proc
->pid
, thread
->pid
, (u64
)*offp
,
3101 (u64
)t
->buffer
->data_size
);
3102 return_error
= BR_FAILED_REPLY
;
3103 return_error_param
= -EINVAL
;
3104 return_error_line
= __LINE__
;
3105 goto err_bad_offset
;
3108 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3109 off_min
= *offp
+ object_size
;
3110 switch (hdr
->type
) {
3111 case BINDER_TYPE_BINDER
:
3112 case BINDER_TYPE_WEAK_BINDER
: {
3113 struct flat_binder_object
*fp
;
3115 fp
= to_flat_binder_object(hdr
);
3116 ret
= binder_translate_binder(fp
, t
, thread
);
3118 return_error
= BR_FAILED_REPLY
;
3119 return_error_param
= ret
;
3120 return_error_line
= __LINE__
;
3121 goto err_translate_failed
;
3124 case BINDER_TYPE_HANDLE
:
3125 case BINDER_TYPE_WEAK_HANDLE
: {
3126 struct flat_binder_object
*fp
;
3128 fp
= to_flat_binder_object(hdr
);
3129 ret
= binder_translate_handle(fp
, t
, thread
);
3131 return_error
= BR_FAILED_REPLY
;
3132 return_error_param
= ret
;
3133 return_error_line
= __LINE__
;
3134 goto err_translate_failed
;
3138 case BINDER_TYPE_FD
: {
3139 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3140 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3143 if (target_fd
< 0) {
3144 return_error
= BR_FAILED_REPLY
;
3145 return_error_param
= target_fd
;
3146 return_error_line
= __LINE__
;
3147 goto err_translate_failed
;
3152 case BINDER_TYPE_FDA
: {
3153 struct binder_fd_array_object
*fda
=
3154 to_binder_fd_array_object(hdr
);
3155 struct binder_buffer_object
*parent
=
3156 binder_validate_ptr(t
->buffer
, fda
->parent
,
3160 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3161 proc
->pid
, thread
->pid
);
3162 return_error
= BR_FAILED_REPLY
;
3163 return_error_param
= -EINVAL
;
3164 return_error_line
= __LINE__
;
3165 goto err_bad_parent
;
3167 if (!binder_validate_fixup(t
->buffer
, off_start
,
3168 parent
, fda
->parent_offset
,
3170 last_fixup_min_off
)) {
3171 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3172 proc
->pid
, thread
->pid
);
3173 return_error
= BR_FAILED_REPLY
;
3174 return_error_param
= -EINVAL
;
3175 return_error_line
= __LINE__
;
3176 goto err_bad_parent
;
3178 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3181 return_error
= BR_FAILED_REPLY
;
3182 return_error_param
= ret
;
3183 return_error_line
= __LINE__
;
3184 goto err_translate_failed
;
3186 last_fixup_obj
= parent
;
3187 last_fixup_min_off
=
3188 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3190 case BINDER_TYPE_PTR
: {
3191 struct binder_buffer_object
*bp
=
3192 to_binder_buffer_object(hdr
);
3193 size_t buf_left
= sg_buf_end
- sg_bufp
;
3195 if (bp
->length
> buf_left
) {
3196 binder_user_error("%d:%d got transaction with too large buffer\n",
3197 proc
->pid
, thread
->pid
);
3198 return_error
= BR_FAILED_REPLY
;
3199 return_error_param
= -EINVAL
;
3200 return_error_line
= __LINE__
;
3201 goto err_bad_offset
;
3203 if (copy_from_user(sg_bufp
,
3204 (const void __user
*)(uintptr_t)
3205 bp
->buffer
, bp
->length
)) {
3206 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3207 proc
->pid
, thread
->pid
);
3208 return_error_param
= -EFAULT
;
3209 return_error
= BR_FAILED_REPLY
;
3210 return_error_line
= __LINE__
;
3211 goto err_copy_data_failed
;
3213 /* Fixup buffer pointer to target proc address space */
3214 bp
->buffer
= (uintptr_t)sg_bufp
+
3215 binder_alloc_get_user_buffer_offset(
3216 &target_proc
->alloc
);
3217 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3219 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3222 last_fixup_min_off
);
3224 return_error
= BR_FAILED_REPLY
;
3225 return_error_param
= ret
;
3226 return_error_line
= __LINE__
;
3227 goto err_translate_failed
;
3229 last_fixup_obj
= bp
;
3230 last_fixup_min_off
= 0;
3233 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3234 proc
->pid
, thread
->pid
, hdr
->type
);
3235 return_error
= BR_FAILED_REPLY
;
3236 return_error_param
= -EINVAL
;
3237 return_error_line
= __LINE__
;
3238 goto err_bad_object_type
;
3241 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3242 binder_enqueue_work(proc
, tcomplete
, &thread
->todo
);
3243 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3246 binder_inner_proc_lock(target_proc
);
3247 if (target_thread
->is_dead
) {
3248 binder_inner_proc_unlock(target_proc
);
3249 goto err_dead_proc_or_thread
;
3251 BUG_ON(t
->buffer
->async_transaction
!= 0);
3252 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3253 binder_enqueue_work_ilocked(&t
->work
, &target_thread
->todo
);
3254 binder_inner_proc_unlock(target_proc
);
3255 wake_up_interruptible_sync(&target_thread
->wait
);
3256 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3257 binder_free_transaction(in_reply_to
);
3258 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3259 BUG_ON(t
->buffer
->async_transaction
!= 0);
3260 binder_inner_proc_lock(proc
);
3262 t
->from_parent
= thread
->transaction_stack
;
3263 thread
->transaction_stack
= t
;
3264 binder_inner_proc_unlock(proc
);
3265 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3266 binder_inner_proc_lock(proc
);
3267 binder_pop_transaction_ilocked(thread
, t
);
3268 binder_inner_proc_unlock(proc
);
3269 goto err_dead_proc_or_thread
;
3272 BUG_ON(target_node
== NULL
);
3273 BUG_ON(t
->buffer
->async_transaction
!= 1);
3274 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3275 goto err_dead_proc_or_thread
;
3278 binder_thread_dec_tmpref(target_thread
);
3279 binder_proc_dec_tmpref(target_proc
);
3281 binder_dec_node_tmpref(target_node
);
3283 * write barrier to synchronize with initialization
3287 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3290 err_dead_proc_or_thread
:
3291 return_error
= BR_DEAD_REPLY
;
3292 return_error_line
= __LINE__
;
3293 binder_dequeue_work(proc
, tcomplete
);
3294 err_translate_failed
:
3295 err_bad_object_type
:
3298 err_copy_data_failed
:
3299 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3300 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3302 binder_dec_node_tmpref(target_node
);
3304 t
->buffer
->transaction
= NULL
;
3305 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3306 err_binder_alloc_buf_failed
:
3308 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3309 err_alloc_tcomplete_failed
:
3311 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3314 err_empty_call_stack
:
3316 err_invalid_target_handle
:
3318 binder_thread_dec_tmpref(target_thread
);
3320 binder_proc_dec_tmpref(target_proc
);
3322 binder_dec_node(target_node
, 1, 0);
3323 binder_dec_node_tmpref(target_node
);
3326 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3327 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3328 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3329 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3333 struct binder_transaction_log_entry
*fe
;
3335 e
->return_error
= return_error
;
3336 e
->return_error_param
= return_error_param
;
3337 e
->return_error_line
= return_error_line
;
3338 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3341 * write barrier to synchronize with initialization
3345 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3346 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3349 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3351 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3352 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3353 binder_enqueue_work(thread
->proc
,
3354 &thread
->return_error
.work
,
3356 binder_send_failed_reply(in_reply_to
, return_error
);
3358 thread
->return_error
.cmd
= return_error
;
3359 binder_enqueue_work(thread
->proc
,
3360 &thread
->return_error
.work
,
3365 static int binder_thread_write(struct binder_proc
*proc
,
3366 struct binder_thread
*thread
,
3367 binder_uintptr_t binder_buffer
, size_t size
,
3368 binder_size_t
*consumed
)
3371 struct binder_context
*context
= proc
->context
;
3372 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3373 void __user
*ptr
= buffer
+ *consumed
;
3374 void __user
*end
= buffer
+ size
;
3376 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3379 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3381 ptr
+= sizeof(uint32_t);
3382 trace_binder_command(cmd
);
3383 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3384 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3385 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3386 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3394 const char *debug_string
;
3395 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3396 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3397 struct binder_ref_data rdata
;
3399 if (get_user(target
, (uint32_t __user
*)ptr
))
3402 ptr
+= sizeof(uint32_t);
3404 if (increment
&& !target
) {
3405 struct binder_node
*ctx_mgr_node
;
3406 mutex_lock(&context
->context_mgr_node_lock
);
3407 ctx_mgr_node
= context
->binder_context_mgr_node
;
3409 ret
= binder_inc_ref_for_node(
3411 strong
, NULL
, &rdata
);
3412 mutex_unlock(&context
->context_mgr_node_lock
);
3415 ret
= binder_update_ref_for_handle(
3416 proc
, target
, increment
, strong
,
3418 if (!ret
&& rdata
.desc
!= target
) {
3419 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3420 proc
->pid
, thread
->pid
,
3421 target
, rdata
.desc
);
3425 debug_string
= "IncRefs";
3428 debug_string
= "Acquire";
3431 debug_string
= "Release";
3435 debug_string
= "DecRefs";
3439 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3440 proc
->pid
, thread
->pid
, debug_string
,
3441 strong
, target
, ret
);
3444 binder_debug(BINDER_DEBUG_USER_REFS
,
3445 "%d:%d %s ref %d desc %d s %d w %d\n",
3446 proc
->pid
, thread
->pid
, debug_string
,
3447 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3451 case BC_INCREFS_DONE
:
3452 case BC_ACQUIRE_DONE
: {
3453 binder_uintptr_t node_ptr
;
3454 binder_uintptr_t cookie
;
3455 struct binder_node
*node
;
3458 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3460 ptr
+= sizeof(binder_uintptr_t
);
3461 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3463 ptr
+= sizeof(binder_uintptr_t
);
3464 node
= binder_get_node(proc
, node_ptr
);
3466 binder_user_error("%d:%d %s u%016llx no match\n",
3467 proc
->pid
, thread
->pid
,
3468 cmd
== BC_INCREFS_DONE
?
3474 if (cookie
!= node
->cookie
) {
3475 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3476 proc
->pid
, thread
->pid
,
3477 cmd
== BC_INCREFS_DONE
?
3478 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3479 (u64
)node_ptr
, node
->debug_id
,
3480 (u64
)cookie
, (u64
)node
->cookie
);
3481 binder_put_node(node
);
3484 binder_node_inner_lock(node
);
3485 if (cmd
== BC_ACQUIRE_DONE
) {
3486 if (node
->pending_strong_ref
== 0) {
3487 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3488 proc
->pid
, thread
->pid
,
3490 binder_node_inner_unlock(node
);
3491 binder_put_node(node
);
3494 node
->pending_strong_ref
= 0;
3496 if (node
->pending_weak_ref
== 0) {
3497 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3498 proc
->pid
, thread
->pid
,
3500 binder_node_inner_unlock(node
);
3501 binder_put_node(node
);
3504 node
->pending_weak_ref
= 0;
3506 free_node
= binder_dec_node_nilocked(node
,
3507 cmd
== BC_ACQUIRE_DONE
, 0);
3509 binder_debug(BINDER_DEBUG_USER_REFS
,
3510 "%d:%d %s node %d ls %d lw %d tr %d\n",
3511 proc
->pid
, thread
->pid
,
3512 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3513 node
->debug_id
, node
->local_strong_refs
,
3514 node
->local_weak_refs
, node
->tmp_refs
);
3515 binder_node_inner_unlock(node
);
3516 binder_put_node(node
);
3519 case BC_ATTEMPT_ACQUIRE
:
3520 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3522 case BC_ACQUIRE_RESULT
:
3523 pr_err("BC_ACQUIRE_RESULT not supported\n");
3526 case BC_FREE_BUFFER
: {
3527 binder_uintptr_t data_ptr
;
3528 struct binder_buffer
*buffer
;
3530 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3532 ptr
+= sizeof(binder_uintptr_t
);
3534 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3536 if (buffer
== NULL
) {
3537 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3538 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3541 if (!buffer
->allow_user_free
) {
3542 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3543 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3546 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3547 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3548 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3550 buffer
->transaction
? "active" : "finished");
3552 if (buffer
->transaction
) {
3553 buffer
->transaction
->buffer
= NULL
;
3554 buffer
->transaction
= NULL
;
3556 if (buffer
->async_transaction
&& buffer
->target_node
) {
3557 struct binder_node
*buf_node
;
3558 struct binder_work
*w
;
3560 buf_node
= buffer
->target_node
;
3561 binder_node_inner_lock(buf_node
);
3562 BUG_ON(!buf_node
->has_async_transaction
);
3563 BUG_ON(buf_node
->proc
!= proc
);
3564 w
= binder_dequeue_work_head_ilocked(
3565 &buf_node
->async_todo
);
3567 buf_node
->has_async_transaction
= 0;
3569 binder_enqueue_work_ilocked(
3571 binder_wakeup_proc_ilocked(proc
);
3573 binder_node_inner_unlock(buf_node
);
3575 trace_binder_transaction_buffer_release(buffer
);
3576 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3577 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3581 case BC_TRANSACTION_SG
:
3583 struct binder_transaction_data_sg tr
;
3585 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3588 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3589 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3592 case BC_TRANSACTION
:
3594 struct binder_transaction_data tr
;
3596 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3599 binder_transaction(proc
, thread
, &tr
,
3600 cmd
== BC_REPLY
, 0);
3604 case BC_REGISTER_LOOPER
:
3605 binder_debug(BINDER_DEBUG_THREADS
,
3606 "%d:%d BC_REGISTER_LOOPER\n",
3607 proc
->pid
, thread
->pid
);
3608 binder_inner_proc_lock(proc
);
3609 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3610 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3611 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3612 proc
->pid
, thread
->pid
);
3613 } else if (proc
->requested_threads
== 0) {
3614 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3615 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3616 proc
->pid
, thread
->pid
);
3618 proc
->requested_threads
--;
3619 proc
->requested_threads_started
++;
3621 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3622 binder_inner_proc_unlock(proc
);
3624 case BC_ENTER_LOOPER
:
3625 binder_debug(BINDER_DEBUG_THREADS
,
3626 "%d:%d BC_ENTER_LOOPER\n",
3627 proc
->pid
, thread
->pid
);
3628 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3629 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3630 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3631 proc
->pid
, thread
->pid
);
3633 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3635 case BC_EXIT_LOOPER
:
3636 binder_debug(BINDER_DEBUG_THREADS
,
3637 "%d:%d BC_EXIT_LOOPER\n",
3638 proc
->pid
, thread
->pid
);
3639 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3642 case BC_REQUEST_DEATH_NOTIFICATION
:
3643 case BC_CLEAR_DEATH_NOTIFICATION
: {
3645 binder_uintptr_t cookie
;
3646 struct binder_ref
*ref
;
3647 struct binder_ref_death
*death
= NULL
;
3649 if (get_user(target
, (uint32_t __user
*)ptr
))
3651 ptr
+= sizeof(uint32_t);
3652 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3654 ptr
+= sizeof(binder_uintptr_t
);
3655 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3657 * Allocate memory for death notification
3658 * before taking lock
3660 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3661 if (death
== NULL
) {
3662 WARN_ON(thread
->return_error
.cmd
!=
3664 thread
->return_error
.cmd
= BR_ERROR
;
3665 binder_enqueue_work(
3667 &thread
->return_error
.work
,
3670 BINDER_DEBUG_FAILED_TRANSACTION
,
3671 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3672 proc
->pid
, thread
->pid
);
3676 binder_proc_lock(proc
);
3677 ref
= binder_get_ref_olocked(proc
, target
, false);
3679 binder_user_error("%d:%d %s invalid ref %d\n",
3680 proc
->pid
, thread
->pid
,
3681 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3682 "BC_REQUEST_DEATH_NOTIFICATION" :
3683 "BC_CLEAR_DEATH_NOTIFICATION",
3685 binder_proc_unlock(proc
);
3690 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3691 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3692 proc
->pid
, thread
->pid
,
3693 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3694 "BC_REQUEST_DEATH_NOTIFICATION" :
3695 "BC_CLEAR_DEATH_NOTIFICATION",
3696 (u64
)cookie
, ref
->data
.debug_id
,
3697 ref
->data
.desc
, ref
->data
.strong
,
3698 ref
->data
.weak
, ref
->node
->debug_id
);
3700 binder_node_lock(ref
->node
);
3701 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3703 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3704 proc
->pid
, thread
->pid
);
3705 binder_node_unlock(ref
->node
);
3706 binder_proc_unlock(proc
);
3710 binder_stats_created(BINDER_STAT_DEATH
);
3711 INIT_LIST_HEAD(&death
->work
.entry
);
3712 death
->cookie
= cookie
;
3714 if (ref
->node
->proc
== NULL
) {
3715 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3717 binder_inner_proc_lock(proc
);
3718 binder_enqueue_work_ilocked(
3719 &ref
->death
->work
, &proc
->todo
);
3720 binder_wakeup_proc_ilocked(proc
);
3721 binder_inner_proc_unlock(proc
);
3724 if (ref
->death
== NULL
) {
3725 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3726 proc
->pid
, thread
->pid
);
3727 binder_node_unlock(ref
->node
);
3728 binder_proc_unlock(proc
);
3732 if (death
->cookie
!= cookie
) {
3733 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3734 proc
->pid
, thread
->pid
,
3737 binder_node_unlock(ref
->node
);
3738 binder_proc_unlock(proc
);
3742 binder_inner_proc_lock(proc
);
3743 if (list_empty(&death
->work
.entry
)) {
3744 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3745 if (thread
->looper
&
3746 (BINDER_LOOPER_STATE_REGISTERED
|
3747 BINDER_LOOPER_STATE_ENTERED
))
3748 binder_enqueue_work_ilocked(
3752 binder_enqueue_work_ilocked(
3755 binder_wakeup_proc_ilocked(
3759 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3760 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3762 binder_inner_proc_unlock(proc
);
3764 binder_node_unlock(ref
->node
);
3765 binder_proc_unlock(proc
);
3767 case BC_DEAD_BINDER_DONE
: {
3768 struct binder_work
*w
;
3769 binder_uintptr_t cookie
;
3770 struct binder_ref_death
*death
= NULL
;
3772 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3775 ptr
+= sizeof(cookie
);
3776 binder_inner_proc_lock(proc
);
3777 list_for_each_entry(w
, &proc
->delivered_death
,
3779 struct binder_ref_death
*tmp_death
=
3781 struct binder_ref_death
,
3784 if (tmp_death
->cookie
== cookie
) {
3789 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3790 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3791 proc
->pid
, thread
->pid
, (u64
)cookie
,
3793 if (death
== NULL
) {
3794 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3795 proc
->pid
, thread
->pid
, (u64
)cookie
);
3796 binder_inner_proc_unlock(proc
);
3799 binder_dequeue_work_ilocked(&death
->work
);
3800 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3801 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3802 if (thread
->looper
&
3803 (BINDER_LOOPER_STATE_REGISTERED
|
3804 BINDER_LOOPER_STATE_ENTERED
))
3805 binder_enqueue_work_ilocked(
3806 &death
->work
, &thread
->todo
);
3808 binder_enqueue_work_ilocked(
3811 binder_wakeup_proc_ilocked(proc
);
3814 binder_inner_proc_unlock(proc
);
3818 pr_err("%d:%d unknown command %d\n",
3819 proc
->pid
, thread
->pid
, cmd
);
3822 *consumed
= ptr
- buffer
;
3827 static void binder_stat_br(struct binder_proc
*proc
,
3828 struct binder_thread
*thread
, uint32_t cmd
)
3830 trace_binder_return(cmd
);
3831 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3832 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3833 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3834 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3838 static int binder_put_node_cmd(struct binder_proc
*proc
,
3839 struct binder_thread
*thread
,
3841 binder_uintptr_t node_ptr
,
3842 binder_uintptr_t node_cookie
,
3844 uint32_t cmd
, const char *cmd_name
)
3846 void __user
*ptr
= *ptrp
;
3848 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3850 ptr
+= sizeof(uint32_t);
3852 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3854 ptr
+= sizeof(binder_uintptr_t
);
3856 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3858 ptr
+= sizeof(binder_uintptr_t
);
3860 binder_stat_br(proc
, thread
, cmd
);
3861 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3862 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3863 (u64
)node_ptr
, (u64
)node_cookie
);
3869 static int binder_wait_for_work(struct binder_thread
*thread
,
3873 struct binder_proc
*proc
= thread
->proc
;
3876 freezer_do_not_count();
3877 binder_inner_proc_lock(proc
);
3879 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3880 if (binder_has_work_ilocked(thread
, do_proc_work
))
3883 list_add(&thread
->waiting_thread_node
,
3884 &proc
->waiting_threads
);
3885 binder_inner_proc_unlock(proc
);
3887 binder_inner_proc_lock(proc
);
3888 list_del_init(&thread
->waiting_thread_node
);
3889 if (signal_pending(current
)) {
3894 finish_wait(&thread
->wait
, &wait
);
3895 binder_inner_proc_unlock(proc
);
3901 static int binder_thread_read(struct binder_proc
*proc
,
3902 struct binder_thread
*thread
,
3903 binder_uintptr_t binder_buffer
, size_t size
,
3904 binder_size_t
*consumed
, int non_block
)
3906 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3907 void __user
*ptr
= buffer
+ *consumed
;
3908 void __user
*end
= buffer
+ size
;
3911 int wait_for_proc_work
;
3913 if (*consumed
== 0) {
3914 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3916 ptr
+= sizeof(uint32_t);
3920 binder_inner_proc_lock(proc
);
3921 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
3922 binder_inner_proc_unlock(proc
);
3924 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3926 trace_binder_wait_for_work(wait_for_proc_work
,
3927 !!thread
->transaction_stack
,
3928 !binder_worklist_empty(proc
, &thread
->todo
));
3929 if (wait_for_proc_work
) {
3930 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3931 BINDER_LOOPER_STATE_ENTERED
))) {
3932 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3933 proc
->pid
, thread
->pid
, thread
->looper
);
3934 wait_event_interruptible(binder_user_error_wait
,
3935 binder_stop_on_user_error
< 2);
3937 binder_restore_priority(current
, proc
->default_priority
);
3941 if (!binder_has_work(thread
, wait_for_proc_work
))
3944 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
3947 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3954 struct binder_transaction_data tr
;
3955 struct binder_work
*w
= NULL
;
3956 struct list_head
*list
= NULL
;
3957 struct binder_transaction
*t
= NULL
;
3958 struct binder_thread
*t_from
;
3960 binder_inner_proc_lock(proc
);
3961 if (!binder_worklist_empty_ilocked(&thread
->todo
))
3962 list
= &thread
->todo
;
3963 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
3967 binder_inner_proc_unlock(proc
);
3970 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
3975 if (end
- ptr
< sizeof(tr
) + 4) {
3976 binder_inner_proc_unlock(proc
);
3979 w
= binder_dequeue_work_head_ilocked(list
);
3982 case BINDER_WORK_TRANSACTION
: {
3983 binder_inner_proc_unlock(proc
);
3984 t
= container_of(w
, struct binder_transaction
, work
);
3986 case BINDER_WORK_RETURN_ERROR
: {
3987 struct binder_error
*e
= container_of(
3988 w
, struct binder_error
, work
);
3990 WARN_ON(e
->cmd
== BR_OK
);
3991 binder_inner_proc_unlock(proc
);
3992 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
3995 ptr
+= sizeof(uint32_t);
3997 binder_stat_br(proc
, thread
, e
->cmd
);
3999 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4000 binder_inner_proc_unlock(proc
);
4001 cmd
= BR_TRANSACTION_COMPLETE
;
4002 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4004 ptr
+= sizeof(uint32_t);
4006 binder_stat_br(proc
, thread
, cmd
);
4007 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
4008 "%d:%d BR_TRANSACTION_COMPLETE\n",
4009 proc
->pid
, thread
->pid
);
4011 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4013 case BINDER_WORK_NODE
: {
4014 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
4016 binder_uintptr_t node_ptr
= node
->ptr
;
4017 binder_uintptr_t node_cookie
= node
->cookie
;
4018 int node_debug_id
= node
->debug_id
;
4021 void __user
*orig_ptr
= ptr
;
4023 BUG_ON(proc
!= node
->proc
);
4024 strong
= node
->internal_strong_refs
||
4025 node
->local_strong_refs
;
4026 weak
= !hlist_empty(&node
->refs
) ||
4027 node
->local_weak_refs
||
4028 node
->tmp_refs
|| strong
;
4029 has_strong_ref
= node
->has_strong_ref
;
4030 has_weak_ref
= node
->has_weak_ref
;
4032 if (weak
&& !has_weak_ref
) {
4033 node
->has_weak_ref
= 1;
4034 node
->pending_weak_ref
= 1;
4035 node
->local_weak_refs
++;
4037 if (strong
&& !has_strong_ref
) {
4038 node
->has_strong_ref
= 1;
4039 node
->pending_strong_ref
= 1;
4040 node
->local_strong_refs
++;
4042 if (!strong
&& has_strong_ref
)
4043 node
->has_strong_ref
= 0;
4044 if (!weak
&& has_weak_ref
)
4045 node
->has_weak_ref
= 0;
4046 if (!weak
&& !strong
) {
4047 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4048 "%d:%d node %d u%016llx c%016llx deleted\n",
4049 proc
->pid
, thread
->pid
,
4053 rb_erase(&node
->rb_node
, &proc
->nodes
);
4054 binder_inner_proc_unlock(proc
);
4055 binder_node_lock(node
);
4057 * Acquire the node lock before freeing the
4058 * node to serialize with other threads that
4059 * may have been holding the node lock while
4060 * decrementing this node (avoids race where
4061 * this thread frees while the other thread
4062 * is unlocking the node after the final
4065 binder_node_unlock(node
);
4066 binder_free_node(node
);
4068 binder_inner_proc_unlock(proc
);
4070 if (weak
&& !has_weak_ref
)
4071 ret
= binder_put_node_cmd(
4072 proc
, thread
, &ptr
, node_ptr
,
4073 node_cookie
, node_debug_id
,
4074 BR_INCREFS
, "BR_INCREFS");
4075 if (!ret
&& strong
&& !has_strong_ref
)
4076 ret
= binder_put_node_cmd(
4077 proc
, thread
, &ptr
, node_ptr
,
4078 node_cookie
, node_debug_id
,
4079 BR_ACQUIRE
, "BR_ACQUIRE");
4080 if (!ret
&& !strong
&& has_strong_ref
)
4081 ret
= binder_put_node_cmd(
4082 proc
, thread
, &ptr
, node_ptr
,
4083 node_cookie
, node_debug_id
,
4084 BR_RELEASE
, "BR_RELEASE");
4085 if (!ret
&& !weak
&& has_weak_ref
)
4086 ret
= binder_put_node_cmd(
4087 proc
, thread
, &ptr
, node_ptr
,
4088 node_cookie
, node_debug_id
,
4089 BR_DECREFS
, "BR_DECREFS");
4090 if (orig_ptr
== ptr
)
4091 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4092 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4093 proc
->pid
, thread
->pid
,
4100 case BINDER_WORK_DEAD_BINDER
:
4101 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4102 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4103 struct binder_ref_death
*death
;
4105 binder_uintptr_t cookie
;
4107 death
= container_of(w
, struct binder_ref_death
, work
);
4108 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4109 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4111 cmd
= BR_DEAD_BINDER
;
4112 cookie
= death
->cookie
;
4114 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4115 "%d:%d %s %016llx\n",
4116 proc
->pid
, thread
->pid
,
4117 cmd
== BR_DEAD_BINDER
?
4119 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4121 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4122 binder_inner_proc_unlock(proc
);
4124 binder_stats_deleted(BINDER_STAT_DEATH
);
4126 binder_enqueue_work_ilocked(
4127 w
, &proc
->delivered_death
);
4128 binder_inner_proc_unlock(proc
);
4130 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4132 ptr
+= sizeof(uint32_t);
4133 if (put_user(cookie
,
4134 (binder_uintptr_t __user
*)ptr
))
4136 ptr
+= sizeof(binder_uintptr_t
);
4137 binder_stat_br(proc
, thread
, cmd
);
4138 if (cmd
== BR_DEAD_BINDER
)
4139 goto done
; /* DEAD_BINDER notifications can cause transactions */
4146 BUG_ON(t
->buffer
== NULL
);
4147 if (t
->buffer
->target_node
) {
4148 struct binder_node
*target_node
= t
->buffer
->target_node
;
4149 struct binder_priority node_prio
;
4151 tr
.target
.ptr
= target_node
->ptr
;
4152 tr
.cookie
= target_node
->cookie
;
4153 node_prio
.sched_policy
= target_node
->sched_policy
;
4154 node_prio
.prio
= target_node
->min_priority
;
4155 binder_transaction_priority(current
, t
, node_prio
,
4156 target_node
->inherit_rt
);
4157 cmd
= BR_TRANSACTION
;
4164 tr
.flags
= t
->flags
;
4165 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4167 t_from
= binder_get_txn_from(t
);
4169 struct task_struct
*sender
= t_from
->proc
->tsk
;
4171 tr
.sender_pid
= task_tgid_nr_ns(sender
,
4172 task_active_pid_ns(current
));
4177 tr
.data_size
= t
->buffer
->data_size
;
4178 tr
.offsets_size
= t
->buffer
->offsets_size
;
4179 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
4180 ((uintptr_t)t
->buffer
->data
+
4181 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4182 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
4183 ALIGN(t
->buffer
->data_size
,
4186 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4188 binder_thread_dec_tmpref(t_from
);
4191 ptr
+= sizeof(uint32_t);
4192 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
4194 binder_thread_dec_tmpref(t_from
);
4199 trace_binder_transaction_received(t
);
4200 binder_stat_br(proc
, thread
, cmd
);
4201 binder_debug(BINDER_DEBUG_TRANSACTION
,
4202 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4203 proc
->pid
, thread
->pid
,
4204 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4206 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4207 t_from
? t_from
->pid
: 0, cmd
,
4208 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4209 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
4212 binder_thread_dec_tmpref(t_from
);
4213 t
->buffer
->allow_user_free
= 1;
4214 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
4215 binder_inner_proc_lock(thread
->proc
);
4216 t
->to_parent
= thread
->transaction_stack
;
4217 t
->to_thread
= thread
;
4218 thread
->transaction_stack
= t
;
4219 binder_inner_proc_unlock(thread
->proc
);
4221 binder_free_transaction(t
);
4228 *consumed
= ptr
- buffer
;
4229 binder_inner_proc_lock(proc
);
4230 if (proc
->requested_threads
== 0 &&
4231 list_empty(&thread
->proc
->waiting_threads
) &&
4232 proc
->requested_threads_started
< proc
->max_threads
&&
4233 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4234 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4235 /*spawn a new thread if we leave this out */) {
4236 proc
->requested_threads
++;
4237 binder_inner_proc_unlock(proc
);
4238 binder_debug(BINDER_DEBUG_THREADS
,
4239 "%d:%d BR_SPAWN_LOOPER\n",
4240 proc
->pid
, thread
->pid
);
4241 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4243 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4245 binder_inner_proc_unlock(proc
);
4249 static void binder_release_work(struct binder_proc
*proc
,
4250 struct list_head
*list
)
4252 struct binder_work
*w
;
4255 w
= binder_dequeue_work_head(proc
, list
);
4260 case BINDER_WORK_TRANSACTION
: {
4261 struct binder_transaction
*t
;
4263 t
= container_of(w
, struct binder_transaction
, work
);
4264 if (t
->buffer
->target_node
&&
4265 !(t
->flags
& TF_ONE_WAY
)) {
4266 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
4268 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4269 "undelivered transaction %d\n",
4271 binder_free_transaction(t
);
4274 case BINDER_WORK_RETURN_ERROR
: {
4275 struct binder_error
*e
= container_of(
4276 w
, struct binder_error
, work
);
4278 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4279 "undelivered TRANSACTION_ERROR: %u\n",
4282 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4283 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4284 "undelivered TRANSACTION_COMPLETE\n");
4286 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4288 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4289 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4290 struct binder_ref_death
*death
;
4292 death
= container_of(w
, struct binder_ref_death
, work
);
4293 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4294 "undelivered death notification, %016llx\n",
4295 (u64
)death
->cookie
);
4297 binder_stats_deleted(BINDER_STAT_DEATH
);
4300 pr_err("unexpected work type, %d, not freed\n",
4308 static struct binder_thread
*binder_get_thread_ilocked(
4309 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4311 struct binder_thread
*thread
= NULL
;
4312 struct rb_node
*parent
= NULL
;
4313 struct rb_node
**p
= &proc
->threads
.rb_node
;
4317 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4319 if (current
->pid
< thread
->pid
)
4321 else if (current
->pid
> thread
->pid
)
4322 p
= &(*p
)->rb_right
;
4328 thread
= new_thread
;
4329 binder_stats_created(BINDER_STAT_THREAD
);
4330 thread
->proc
= proc
;
4331 thread
->pid
= current
->pid
;
4332 get_task_struct(current
);
4333 thread
->task
= current
;
4334 atomic_set(&thread
->tmp_ref
, 0);
4335 init_waitqueue_head(&thread
->wait
);
4336 INIT_LIST_HEAD(&thread
->todo
);
4337 rb_link_node(&thread
->rb_node
, parent
, p
);
4338 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4339 thread
->looper_need_return
= true;
4340 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4341 thread
->return_error
.cmd
= BR_OK
;
4342 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4343 thread
->reply_error
.cmd
= BR_OK
;
4344 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4348 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4350 struct binder_thread
*thread
;
4351 struct binder_thread
*new_thread
;
4353 binder_inner_proc_lock(proc
);
4354 thread
= binder_get_thread_ilocked(proc
, NULL
);
4355 binder_inner_proc_unlock(proc
);
4357 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4358 if (new_thread
== NULL
)
4360 binder_inner_proc_lock(proc
);
4361 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4362 binder_inner_proc_unlock(proc
);
4363 if (thread
!= new_thread
)
4369 static void binder_free_proc(struct binder_proc
*proc
)
4371 BUG_ON(!list_empty(&proc
->todo
));
4372 BUG_ON(!list_empty(&proc
->delivered_death
));
4373 binder_alloc_deferred_release(&proc
->alloc
);
4374 put_task_struct(proc
->tsk
);
4375 binder_stats_deleted(BINDER_STAT_PROC
);
4379 static void binder_free_thread(struct binder_thread
*thread
)
4381 BUG_ON(!list_empty(&thread
->todo
));
4382 binder_stats_deleted(BINDER_STAT_THREAD
);
4383 binder_proc_dec_tmpref(thread
->proc
);
4384 put_task_struct(thread
->task
);
4388 static int binder_thread_release(struct binder_proc
*proc
,
4389 struct binder_thread
*thread
)
4391 struct binder_transaction
*t
;
4392 struct binder_transaction
*send_reply
= NULL
;
4393 int active_transactions
= 0;
4394 struct binder_transaction
*last_t
= NULL
;
4396 binder_inner_proc_lock(thread
->proc
);
4398 * take a ref on the proc so it survives
4399 * after we remove this thread from proc->threads.
4400 * The corresponding dec is when we actually
4401 * free the thread in binder_free_thread()
4405 * take a ref on this thread to ensure it
4406 * survives while we are releasing it
4408 atomic_inc(&thread
->tmp_ref
);
4409 rb_erase(&thread
->rb_node
, &proc
->threads
);
4410 t
= thread
->transaction_stack
;
4412 spin_lock(&t
->lock
);
4413 if (t
->to_thread
== thread
)
4416 thread
->is_dead
= true;
4420 active_transactions
++;
4421 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4422 "release %d:%d transaction %d %s, still active\n",
4423 proc
->pid
, thread
->pid
,
4425 (t
->to_thread
== thread
) ? "in" : "out");
4427 if (t
->to_thread
== thread
) {
4429 t
->to_thread
= NULL
;
4431 t
->buffer
->transaction
= NULL
;
4435 } else if (t
->from
== thread
) {
4440 spin_unlock(&last_t
->lock
);
4442 spin_lock(&t
->lock
);
4444 binder_inner_proc_unlock(thread
->proc
);
4447 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4448 binder_release_work(proc
, &thread
->todo
);
4449 binder_thread_dec_tmpref(thread
);
4450 return active_transactions
;
4453 static unsigned int binder_poll(struct file
*filp
,
4454 struct poll_table_struct
*wait
)
4456 struct binder_proc
*proc
= filp
->private_data
;
4457 struct binder_thread
*thread
= NULL
;
4458 bool wait_for_proc_work
;
4460 thread
= binder_get_thread(proc
);
4462 binder_inner_proc_lock(thread
->proc
);
4463 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4464 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4466 binder_inner_proc_unlock(thread
->proc
);
4468 poll_wait(filp
, &thread
->wait
, wait
);
4470 if (binder_has_work(thread
, wait_for_proc_work
))
4476 static int binder_ioctl_write_read(struct file
*filp
,
4477 unsigned int cmd
, unsigned long arg
,
4478 struct binder_thread
*thread
)
4481 struct binder_proc
*proc
= filp
->private_data
;
4482 unsigned int size
= _IOC_SIZE(cmd
);
4483 void __user
*ubuf
= (void __user
*)arg
;
4484 struct binder_write_read bwr
;
4486 if (size
!= sizeof(struct binder_write_read
)) {
4490 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4494 binder_debug(BINDER_DEBUG_READ_WRITE
,
4495 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4496 proc
->pid
, thread
->pid
,
4497 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4498 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4500 if (bwr
.write_size
> 0) {
4501 ret
= binder_thread_write(proc
, thread
,
4504 &bwr
.write_consumed
);
4505 trace_binder_write_done(ret
);
4507 bwr
.read_consumed
= 0;
4508 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4513 if (bwr
.read_size
> 0) {
4514 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4517 filp
->f_flags
& O_NONBLOCK
);
4518 trace_binder_read_done(ret
);
4519 binder_inner_proc_lock(proc
);
4520 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4521 binder_wakeup_proc_ilocked(proc
);
4522 binder_inner_proc_unlock(proc
);
4524 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4529 binder_debug(BINDER_DEBUG_READ_WRITE
,
4530 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4531 proc
->pid
, thread
->pid
,
4532 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4533 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4534 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4542 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4545 struct binder_proc
*proc
= filp
->private_data
;
4546 struct binder_context
*context
= proc
->context
;
4547 struct binder_node
*new_node
;
4548 kuid_t curr_euid
= current_euid();
4550 mutex_lock(&context
->context_mgr_node_lock
);
4551 if (context
->binder_context_mgr_node
) {
4552 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4556 ret
= security_binder_set_context_mgr(proc
->tsk
);
4559 if (uid_valid(context
->binder_context_mgr_uid
)) {
4560 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4561 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4562 from_kuid(&init_user_ns
, curr_euid
),
4563 from_kuid(&init_user_ns
,
4564 context
->binder_context_mgr_uid
));
4569 context
->binder_context_mgr_uid
= curr_euid
;
4571 new_node
= binder_new_node(proc
, NULL
);
4576 binder_node_lock(new_node
);
4577 new_node
->local_weak_refs
++;
4578 new_node
->local_strong_refs
++;
4579 new_node
->has_strong_ref
= 1;
4580 new_node
->has_weak_ref
= 1;
4581 context
->binder_context_mgr_node
= new_node
;
4582 binder_node_unlock(new_node
);
4583 binder_put_node(new_node
);
4585 mutex_unlock(&context
->context_mgr_node_lock
);
4589 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4590 struct binder_node_debug_info
*info
)
4593 binder_uintptr_t ptr
= info
->ptr
;
4595 memset(info
, 0, sizeof(*info
));
4597 binder_inner_proc_lock(proc
);
4598 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4599 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4601 if (node
->ptr
> ptr
) {
4602 info
->ptr
= node
->ptr
;
4603 info
->cookie
= node
->cookie
;
4604 info
->has_strong_ref
= node
->has_strong_ref
;
4605 info
->has_weak_ref
= node
->has_weak_ref
;
4609 binder_inner_proc_unlock(proc
);
4614 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4617 struct binder_proc
*proc
= filp
->private_data
;
4618 struct binder_thread
*thread
;
4619 unsigned int size
= _IOC_SIZE(cmd
);
4620 void __user
*ubuf
= (void __user
*)arg
;
4622 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4623 proc->pid, current->pid, cmd, arg);*/
4625 binder_selftest_alloc(&proc
->alloc
);
4627 trace_binder_ioctl(cmd
, arg
);
4629 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4633 thread
= binder_get_thread(proc
);
4634 if (thread
== NULL
) {
4640 case BINDER_WRITE_READ
:
4641 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4645 case BINDER_SET_MAX_THREADS
: {
4648 if (copy_from_user(&max_threads
, ubuf
,
4649 sizeof(max_threads
))) {
4653 binder_inner_proc_lock(proc
);
4654 proc
->max_threads
= max_threads
;
4655 binder_inner_proc_unlock(proc
);
4658 case BINDER_SET_CONTEXT_MGR
:
4659 ret
= binder_ioctl_set_ctx_mgr(filp
);
4663 case BINDER_THREAD_EXIT
:
4664 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4665 proc
->pid
, thread
->pid
);
4666 binder_thread_release(proc
, thread
);
4669 case BINDER_VERSION
: {
4670 struct binder_version __user
*ver
= ubuf
;
4672 if (size
!= sizeof(struct binder_version
)) {
4676 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4677 &ver
->protocol_version
)) {
4683 case BINDER_GET_NODE_DEBUG_INFO
: {
4684 struct binder_node_debug_info info
;
4686 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4691 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4695 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4708 thread
->looper_need_return
= false;
4709 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4710 if (ret
&& ret
!= -ERESTARTSYS
)
4711 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4713 trace_binder_ioctl_done(ret
);
4717 static void binder_vma_open(struct vm_area_struct
*vma
)
4719 struct binder_proc
*proc
= vma
->vm_private_data
;
4721 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4722 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4723 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4724 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4725 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4728 static void binder_vma_close(struct vm_area_struct
*vma
)
4730 struct binder_proc
*proc
= vma
->vm_private_data
;
4732 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4733 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4734 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4735 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4736 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4737 binder_alloc_vma_close(&proc
->alloc
);
4738 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4741 static int binder_vm_fault(struct vm_fault
*vmf
)
4743 return VM_FAULT_SIGBUS
;
4746 static const struct vm_operations_struct binder_vm_ops
= {
4747 .open
= binder_vma_open
,
4748 .close
= binder_vma_close
,
4749 .fault
= binder_vm_fault
,
4752 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4755 struct binder_proc
*proc
= filp
->private_data
;
4756 const char *failure_string
;
4758 if (proc
->tsk
!= current
->group_leader
)
4761 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4762 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4764 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4765 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4766 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4767 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4768 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4770 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4772 failure_string
= "bad vm_flags";
4775 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4776 vma
->vm_ops
= &binder_vm_ops
;
4777 vma
->vm_private_data
= proc
;
4779 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4782 proc
->files
= get_files_struct(current
);
4786 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4787 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4791 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4793 struct binder_proc
*proc
;
4794 struct binder_device
*binder_dev
;
4796 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4797 current
->group_leader
->pid
, current
->pid
);
4799 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4802 spin_lock_init(&proc
->inner_lock
);
4803 spin_lock_init(&proc
->outer_lock
);
4804 get_task_struct(current
->group_leader
);
4805 proc
->tsk
= current
->group_leader
;
4806 INIT_LIST_HEAD(&proc
->todo
);
4807 if (binder_supported_policy(current
->policy
)) {
4808 proc
->default_priority
.sched_policy
= current
->policy
;
4809 proc
->default_priority
.prio
= current
->normal_prio
;
4811 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
4812 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
4815 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4817 proc
->context
= &binder_dev
->context
;
4818 binder_alloc_init(&proc
->alloc
);
4820 binder_stats_created(BINDER_STAT_PROC
);
4821 proc
->pid
= current
->group_leader
->pid
;
4822 INIT_LIST_HEAD(&proc
->delivered_death
);
4823 INIT_LIST_HEAD(&proc
->waiting_threads
);
4824 filp
->private_data
= proc
;
4826 mutex_lock(&binder_procs_lock
);
4827 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4828 mutex_unlock(&binder_procs_lock
);
4830 if (binder_debugfs_dir_entry_proc
) {
4833 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4835 * proc debug entries are shared between contexts, so
4836 * this will fail if the process tries to open the driver
4837 * again with a different context. The priting code will
4838 * anyway print all contexts that a given PID has, so this
4841 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4842 binder_debugfs_dir_entry_proc
,
4843 (void *)(unsigned long)proc
->pid
,
4850 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4852 struct binder_proc
*proc
= filp
->private_data
;
4854 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4859 static void binder_deferred_flush(struct binder_proc
*proc
)
4864 binder_inner_proc_lock(proc
);
4865 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4866 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4868 thread
->looper_need_return
= true;
4869 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4870 wake_up_interruptible(&thread
->wait
);
4874 binder_inner_proc_unlock(proc
);
4876 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4877 "binder_flush: %d woke %d threads\n", proc
->pid
,
4881 static int binder_release(struct inode
*nodp
, struct file
*filp
)
4883 struct binder_proc
*proc
= filp
->private_data
;
4885 debugfs_remove(proc
->debugfs_entry
);
4886 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
4891 static int binder_node_release(struct binder_node
*node
, int refs
)
4893 struct binder_ref
*ref
;
4895 struct binder_proc
*proc
= node
->proc
;
4897 binder_release_work(proc
, &node
->async_todo
);
4899 binder_node_lock(node
);
4900 binder_inner_proc_lock(proc
);
4901 binder_dequeue_work_ilocked(&node
->work
);
4903 * The caller must have taken a temporary ref on the node,
4905 BUG_ON(!node
->tmp_refs
);
4906 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
4907 binder_inner_proc_unlock(proc
);
4908 binder_node_unlock(node
);
4909 binder_free_node(node
);
4915 node
->local_strong_refs
= 0;
4916 node
->local_weak_refs
= 0;
4917 binder_inner_proc_unlock(proc
);
4919 spin_lock(&binder_dead_nodes_lock
);
4920 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
4921 spin_unlock(&binder_dead_nodes_lock
);
4923 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
4926 * Need the node lock to synchronize
4927 * with new notification requests and the
4928 * inner lock to synchronize with queued
4929 * death notifications.
4931 binder_inner_proc_lock(ref
->proc
);
4933 binder_inner_proc_unlock(ref
->proc
);
4939 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
4940 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4941 binder_enqueue_work_ilocked(&ref
->death
->work
,
4943 binder_wakeup_proc_ilocked(ref
->proc
);
4944 binder_inner_proc_unlock(ref
->proc
);
4947 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4948 "node %d now dead, refs %d, death %d\n",
4949 node
->debug_id
, refs
, death
);
4950 binder_node_unlock(node
);
4951 binder_put_node(node
);
4956 static void binder_deferred_release(struct binder_proc
*proc
)
4958 struct binder_context
*context
= proc
->context
;
4960 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
4962 BUG_ON(proc
->files
);
4964 mutex_lock(&binder_procs_lock
);
4965 hlist_del(&proc
->proc_node
);
4966 mutex_unlock(&binder_procs_lock
);
4968 mutex_lock(&context
->context_mgr_node_lock
);
4969 if (context
->binder_context_mgr_node
&&
4970 context
->binder_context_mgr_node
->proc
== proc
) {
4971 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4972 "%s: %d context_mgr_node gone\n",
4973 __func__
, proc
->pid
);
4974 context
->binder_context_mgr_node
= NULL
;
4976 mutex_unlock(&context
->context_mgr_node_lock
);
4977 binder_inner_proc_lock(proc
);
4979 * Make sure proc stays alive after we
4980 * remove all the threads
4984 proc
->is_dead
= true;
4986 active_transactions
= 0;
4987 while ((n
= rb_first(&proc
->threads
))) {
4988 struct binder_thread
*thread
;
4990 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4991 binder_inner_proc_unlock(proc
);
4993 active_transactions
+= binder_thread_release(proc
, thread
);
4994 binder_inner_proc_lock(proc
);
4999 while ((n
= rb_first(&proc
->nodes
))) {
5000 struct binder_node
*node
;
5002 node
= rb_entry(n
, struct binder_node
, rb_node
);
5005 * take a temporary ref on the node before
5006 * calling binder_node_release() which will either
5007 * kfree() the node or call binder_put_node()
5009 binder_inc_node_tmpref_ilocked(node
);
5010 rb_erase(&node
->rb_node
, &proc
->nodes
);
5011 binder_inner_proc_unlock(proc
);
5012 incoming_refs
= binder_node_release(node
, incoming_refs
);
5013 binder_inner_proc_lock(proc
);
5015 binder_inner_proc_unlock(proc
);
5018 binder_proc_lock(proc
);
5019 while ((n
= rb_first(&proc
->refs_by_desc
))) {
5020 struct binder_ref
*ref
;
5022 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
5024 binder_cleanup_ref_olocked(ref
);
5025 binder_proc_unlock(proc
);
5026 binder_free_ref(ref
);
5027 binder_proc_lock(proc
);
5029 binder_proc_unlock(proc
);
5031 binder_release_work(proc
, &proc
->todo
);
5032 binder_release_work(proc
, &proc
->delivered_death
);
5034 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5035 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5036 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5037 outgoing_refs
, active_transactions
);
5039 binder_proc_dec_tmpref(proc
);
5042 static void binder_deferred_func(struct work_struct
*work
)
5044 struct binder_proc
*proc
;
5045 struct files_struct
*files
;
5050 mutex_lock(&binder_deferred_lock
);
5051 if (!hlist_empty(&binder_deferred_list
)) {
5052 proc
= hlist_entry(binder_deferred_list
.first
,
5053 struct binder_proc
, deferred_work_node
);
5054 hlist_del_init(&proc
->deferred_work_node
);
5055 defer
= proc
->deferred_work
;
5056 proc
->deferred_work
= 0;
5061 mutex_unlock(&binder_deferred_lock
);
5064 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5065 files
= proc
->files
;
5070 if (defer
& BINDER_DEFERRED_FLUSH
)
5071 binder_deferred_flush(proc
);
5073 if (defer
& BINDER_DEFERRED_RELEASE
)
5074 binder_deferred_release(proc
); /* frees proc */
5077 put_files_struct(files
);
5080 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5083 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5085 mutex_lock(&binder_deferred_lock
);
5086 proc
->deferred_work
|= defer
;
5087 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5088 hlist_add_head(&proc
->deferred_work_node
,
5089 &binder_deferred_list
);
5090 schedule_work(&binder_deferred_work
);
5092 mutex_unlock(&binder_deferred_lock
);
5095 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5096 struct binder_proc
*proc
,
5098 struct binder_transaction
*t
)
5100 struct binder_proc
*to_proc
;
5101 struct binder_buffer
*buffer
= t
->buffer
;
5103 spin_lock(&t
->lock
);
5104 to_proc
= t
->to_proc
;
5106 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5107 prefix
, t
->debug_id
, t
,
5108 t
->from
? t
->from
->proc
->pid
: 0,
5109 t
->from
? t
->from
->pid
: 0,
5110 to_proc
? to_proc
->pid
: 0,
5111 t
->to_thread
? t
->to_thread
->pid
: 0,
5112 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5113 t
->priority
.prio
, t
->need_reply
);
5114 spin_unlock(&t
->lock
);
5116 if (proc
!= to_proc
) {
5118 * Can only safely deref buffer if we are holding the
5119 * correct proc inner lock for this node
5125 if (buffer
== NULL
) {
5126 seq_puts(m
, " buffer free\n");
5129 if (buffer
->target_node
)
5130 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5131 seq_printf(m
, " size %zd:%zd data %p\n",
5132 buffer
->data_size
, buffer
->offsets_size
,
5136 static void print_binder_work_ilocked(struct seq_file
*m
,
5137 struct binder_proc
*proc
,
5139 const char *transaction_prefix
,
5140 struct binder_work
*w
)
5142 struct binder_node
*node
;
5143 struct binder_transaction
*t
;
5146 case BINDER_WORK_TRANSACTION
:
5147 t
= container_of(w
, struct binder_transaction
, work
);
5148 print_binder_transaction_ilocked(
5149 m
, proc
, transaction_prefix
, t
);
5151 case BINDER_WORK_RETURN_ERROR
: {
5152 struct binder_error
*e
= container_of(
5153 w
, struct binder_error
, work
);
5155 seq_printf(m
, "%stransaction error: %u\n",
5158 case BINDER_WORK_TRANSACTION_COMPLETE
:
5159 seq_printf(m
, "%stransaction complete\n", prefix
);
5161 case BINDER_WORK_NODE
:
5162 node
= container_of(w
, struct binder_node
, work
);
5163 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5164 prefix
, node
->debug_id
,
5165 (u64
)node
->ptr
, (u64
)node
->cookie
);
5167 case BINDER_WORK_DEAD_BINDER
:
5168 seq_printf(m
, "%shas dead binder\n", prefix
);
5170 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5171 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5173 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5174 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5177 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5182 static void print_binder_thread_ilocked(struct seq_file
*m
,
5183 struct binder_thread
*thread
,
5186 struct binder_transaction
*t
;
5187 struct binder_work
*w
;
5188 size_t start_pos
= m
->count
;
5191 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5192 thread
->pid
, thread
->looper
,
5193 thread
->looper_need_return
,
5194 atomic_read(&thread
->tmp_ref
));
5195 header_pos
= m
->count
;
5196 t
= thread
->transaction_stack
;
5198 if (t
->from
== thread
) {
5199 print_binder_transaction_ilocked(m
, thread
->proc
,
5200 " outgoing transaction", t
);
5202 } else if (t
->to_thread
== thread
) {
5203 print_binder_transaction_ilocked(m
, thread
->proc
,
5204 " incoming transaction", t
);
5207 print_binder_transaction_ilocked(m
, thread
->proc
,
5208 " bad transaction", t
);
5212 list_for_each_entry(w
, &thread
->todo
, entry
) {
5213 print_binder_work_ilocked(m
, thread
->proc
, " ",
5214 " pending transaction", w
);
5216 if (!print_always
&& m
->count
== header_pos
)
5217 m
->count
= start_pos
;
5220 static void print_binder_node_nilocked(struct seq_file
*m
,
5221 struct binder_node
*node
)
5223 struct binder_ref
*ref
;
5224 struct binder_work
*w
;
5228 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5231 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5232 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5233 node
->sched_policy
, node
->min_priority
,
5234 node
->has_strong_ref
, node
->has_weak_ref
,
5235 node
->local_strong_refs
, node
->local_weak_refs
,
5236 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5238 seq_puts(m
, " proc");
5239 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5240 seq_printf(m
, " %d", ref
->proc
->pid
);
5244 list_for_each_entry(w
, &node
->async_todo
, entry
)
5245 print_binder_work_ilocked(m
, node
->proc
, " ",
5246 " pending async transaction", w
);
5250 static void print_binder_ref_olocked(struct seq_file
*m
,
5251 struct binder_ref
*ref
)
5253 binder_node_lock(ref
->node
);
5254 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5255 ref
->data
.debug_id
, ref
->data
.desc
,
5256 ref
->node
->proc
? "" : "dead ",
5257 ref
->node
->debug_id
, ref
->data
.strong
,
5258 ref
->data
.weak
, ref
->death
);
5259 binder_node_unlock(ref
->node
);
5262 static void print_binder_proc(struct seq_file
*m
,
5263 struct binder_proc
*proc
, int print_all
)
5265 struct binder_work
*w
;
5267 size_t start_pos
= m
->count
;
5269 struct binder_node
*last_node
= NULL
;
5271 seq_printf(m
, "proc %d\n", proc
->pid
);
5272 seq_printf(m
, "context %s\n", proc
->context
->name
);
5273 header_pos
= m
->count
;
5275 binder_inner_proc_lock(proc
);
5276 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5277 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5278 rb_node
), print_all
);
5280 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5281 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5284 * take a temporary reference on the node so it
5285 * survives and isn't removed from the tree
5286 * while we print it.
5288 binder_inc_node_tmpref_ilocked(node
);
5289 /* Need to drop inner lock to take node lock */
5290 binder_inner_proc_unlock(proc
);
5292 binder_put_node(last_node
);
5293 binder_node_inner_lock(node
);
5294 print_binder_node_nilocked(m
, node
);
5295 binder_node_inner_unlock(node
);
5297 binder_inner_proc_lock(proc
);
5299 binder_inner_proc_unlock(proc
);
5301 binder_put_node(last_node
);
5304 binder_proc_lock(proc
);
5305 for (n
= rb_first(&proc
->refs_by_desc
);
5308 print_binder_ref_olocked(m
, rb_entry(n
,
5311 binder_proc_unlock(proc
);
5313 binder_alloc_print_allocated(m
, &proc
->alloc
);
5314 binder_inner_proc_lock(proc
);
5315 list_for_each_entry(w
, &proc
->todo
, entry
)
5316 print_binder_work_ilocked(m
, proc
, " ",
5317 " pending transaction", w
);
5318 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5319 seq_puts(m
, " has delivered dead binder\n");
5322 binder_inner_proc_unlock(proc
);
5323 if (!print_all
&& m
->count
== header_pos
)
5324 m
->count
= start_pos
;
5327 static const char * const binder_return_strings
[] = {
5332 "BR_ACQUIRE_RESULT",
5334 "BR_TRANSACTION_COMPLETE",
5339 "BR_ATTEMPT_ACQUIRE",
5344 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5348 static const char * const binder_command_strings
[] = {
5351 "BC_ACQUIRE_RESULT",
5359 "BC_ATTEMPT_ACQUIRE",
5360 "BC_REGISTER_LOOPER",
5363 "BC_REQUEST_DEATH_NOTIFICATION",
5364 "BC_CLEAR_DEATH_NOTIFICATION",
5365 "BC_DEAD_BINDER_DONE",
5366 "BC_TRANSACTION_SG",
5370 static const char * const binder_objstat_strings
[] = {
5377 "transaction_complete"
5380 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5381 struct binder_stats
*stats
)
5385 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5386 ARRAY_SIZE(binder_command_strings
));
5387 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5388 int temp
= atomic_read(&stats
->bc
[i
]);
5391 seq_printf(m
, "%s%s: %d\n", prefix
,
5392 binder_command_strings
[i
], temp
);
5395 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5396 ARRAY_SIZE(binder_return_strings
));
5397 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5398 int temp
= atomic_read(&stats
->br
[i
]);
5401 seq_printf(m
, "%s%s: %d\n", prefix
,
5402 binder_return_strings
[i
], temp
);
5405 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5406 ARRAY_SIZE(binder_objstat_strings
));
5407 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5408 ARRAY_SIZE(stats
->obj_deleted
));
5409 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5410 int created
= atomic_read(&stats
->obj_created
[i
]);
5411 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5413 if (created
|| deleted
)
5414 seq_printf(m
, "%s%s: active %d total %d\n",
5416 binder_objstat_strings
[i
],
5422 static void print_binder_proc_stats(struct seq_file
*m
,
5423 struct binder_proc
*proc
)
5425 struct binder_work
*w
;
5426 struct binder_thread
*thread
;
5428 int count
, strong
, weak
, ready_threads
;
5429 size_t free_async_space
=
5430 binder_alloc_get_free_async_space(&proc
->alloc
);
5432 seq_printf(m
, "proc %d\n", proc
->pid
);
5433 seq_printf(m
, "context %s\n", proc
->context
->name
);
5436 binder_inner_proc_lock(proc
);
5437 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5440 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5443 seq_printf(m
, " threads: %d\n", count
);
5444 seq_printf(m
, " requested threads: %d+%d/%d\n"
5445 " ready threads %d\n"
5446 " free async space %zd\n", proc
->requested_threads
,
5447 proc
->requested_threads_started
, proc
->max_threads
,
5451 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5453 binder_inner_proc_unlock(proc
);
5454 seq_printf(m
, " nodes: %d\n", count
);
5458 binder_proc_lock(proc
);
5459 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5460 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5463 strong
+= ref
->data
.strong
;
5464 weak
+= ref
->data
.weak
;
5466 binder_proc_unlock(proc
);
5467 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5469 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5470 seq_printf(m
, " buffers: %d\n", count
);
5472 binder_alloc_print_pages(m
, &proc
->alloc
);
5475 binder_inner_proc_lock(proc
);
5476 list_for_each_entry(w
, &proc
->todo
, entry
) {
5477 if (w
->type
== BINDER_WORK_TRANSACTION
)
5480 binder_inner_proc_unlock(proc
);
5481 seq_printf(m
, " pending transactions: %d\n", count
);
5483 print_binder_stats(m
, " ", &proc
->stats
);
5487 static int binder_state_show(struct seq_file
*m
, void *unused
)
5489 struct binder_proc
*proc
;
5490 struct binder_node
*node
;
5491 struct binder_node
*last_node
= NULL
;
5493 seq_puts(m
, "binder state:\n");
5495 spin_lock(&binder_dead_nodes_lock
);
5496 if (!hlist_empty(&binder_dead_nodes
))
5497 seq_puts(m
, "dead nodes:\n");
5498 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5500 * take a temporary reference on the node so it
5501 * survives and isn't removed from the list
5502 * while we print it.
5505 spin_unlock(&binder_dead_nodes_lock
);
5507 binder_put_node(last_node
);
5508 binder_node_lock(node
);
5509 print_binder_node_nilocked(m
, node
);
5510 binder_node_unlock(node
);
5512 spin_lock(&binder_dead_nodes_lock
);
5514 spin_unlock(&binder_dead_nodes_lock
);
5516 binder_put_node(last_node
);
5518 mutex_lock(&binder_procs_lock
);
5519 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5520 print_binder_proc(m
, proc
, 1);
5521 mutex_unlock(&binder_procs_lock
);
5526 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5528 struct binder_proc
*proc
;
5530 seq_puts(m
, "binder stats:\n");
5532 print_binder_stats(m
, "", &binder_stats
);
5534 mutex_lock(&binder_procs_lock
);
5535 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5536 print_binder_proc_stats(m
, proc
);
5537 mutex_unlock(&binder_procs_lock
);
5542 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5544 struct binder_proc
*proc
;
5546 seq_puts(m
, "binder transactions:\n");
5547 mutex_lock(&binder_procs_lock
);
5548 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5549 print_binder_proc(m
, proc
, 0);
5550 mutex_unlock(&binder_procs_lock
);
5555 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5557 struct binder_proc
*itr
;
5558 int pid
= (unsigned long)m
->private;
5560 mutex_lock(&binder_procs_lock
);
5561 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5562 if (itr
->pid
== pid
) {
5563 seq_puts(m
, "binder proc state:\n");
5564 print_binder_proc(m
, itr
, 1);
5567 mutex_unlock(&binder_procs_lock
);
5572 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5573 struct binder_transaction_log_entry
*e
)
5575 int debug_id
= READ_ONCE(e
->debug_id_done
);
5577 * read barrier to guarantee debug_id_done read before
5578 * we print the log values
5582 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5583 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5584 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5585 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5586 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5587 e
->return_error
, e
->return_error_param
,
5588 e
->return_error_line
);
5590 * read-barrier to guarantee read of debug_id_done after
5591 * done printing the fields of the entry
5594 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5595 "\n" : " (incomplete)\n");
5598 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5600 struct binder_transaction_log
*log
= m
->private;
5601 unsigned int log_cur
= atomic_read(&log
->cur
);
5606 count
= log_cur
+ 1;
5607 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5608 0 : count
% ARRAY_SIZE(log
->entry
);
5609 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5610 count
= ARRAY_SIZE(log
->entry
);
5611 for (i
= 0; i
< count
; i
++) {
5612 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5614 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5619 static const struct file_operations binder_fops
= {
5620 .owner
= THIS_MODULE
,
5621 .poll
= binder_poll
,
5622 .unlocked_ioctl
= binder_ioctl
,
5623 .compat_ioctl
= binder_ioctl
,
5624 .mmap
= binder_mmap
,
5625 .open
= binder_open
,
5626 .flush
= binder_flush
,
5627 .release
= binder_release
,
5630 BINDER_DEBUG_ENTRY(state
);
5631 BINDER_DEBUG_ENTRY(stats
);
5632 BINDER_DEBUG_ENTRY(transactions
);
5633 BINDER_DEBUG_ENTRY(transaction_log
);
5635 static int __init
init_binder_device(const char *name
)
5638 struct binder_device
*binder_device
;
5640 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5644 binder_device
->miscdev
.fops
= &binder_fops
;
5645 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5646 binder_device
->miscdev
.name
= name
;
5648 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5649 binder_device
->context
.name
= name
;
5650 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5652 ret
= misc_register(&binder_device
->miscdev
);
5654 kfree(binder_device
);
5658 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5663 static int __init
binder_init(void)
5666 char *device_name
, *device_names
, *device_tmp
;
5667 struct binder_device
*device
;
5668 struct hlist_node
*tmp
;
5670 binder_alloc_shrinker_init();
5672 atomic_set(&binder_transaction_log
.cur
, ~0U);
5673 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5675 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5676 if (binder_debugfs_dir_entry_root
)
5677 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5678 binder_debugfs_dir_entry_root
);
5680 if (binder_debugfs_dir_entry_root
) {
5681 debugfs_create_file("state",
5683 binder_debugfs_dir_entry_root
,
5685 &binder_state_fops
);
5686 debugfs_create_file("stats",
5688 binder_debugfs_dir_entry_root
,
5690 &binder_stats_fops
);
5691 debugfs_create_file("transactions",
5693 binder_debugfs_dir_entry_root
,
5695 &binder_transactions_fops
);
5696 debugfs_create_file("transaction_log",
5698 binder_debugfs_dir_entry_root
,
5699 &binder_transaction_log
,
5700 &binder_transaction_log_fops
);
5701 debugfs_create_file("failed_transaction_log",
5703 binder_debugfs_dir_entry_root
,
5704 &binder_transaction_log_failed
,
5705 &binder_transaction_log_fops
);
5709 * Copy the module_parameter string, because we don't want to
5710 * tokenize it in-place.
5712 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5713 if (!device_names
) {
5715 goto err_alloc_device_names_failed
;
5717 strcpy(device_names
, binder_devices_param
);
5719 device_tmp
= device_names
;
5720 while ((device_name
= strsep(&device_tmp
, ","))) {
5721 ret
= init_binder_device(device_name
);
5723 goto err_init_binder_device_failed
;
5728 err_init_binder_device_failed
:
5729 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5730 misc_deregister(&device
->miscdev
);
5731 hlist_del(&device
->hlist
);
5735 kfree(device_names
);
5737 err_alloc_device_names_failed
:
5738 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5743 device_initcall(binder_init
);
5745 #define CREATE_TRACE_POINTS
5746 #include "binder_trace.h"
5748 MODULE_LICENSE("GPL v2");