3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
74 #include <linux/sched/rt.h>
77 #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
78 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
79 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
81 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
82 #define BINDER_IPC_32BIT 1
85 #include <uapi/linux/android/binder.h>
86 #include "binder_alloc.h"
87 #include "binder_trace.h"
89 static HLIST_HEAD(binder_deferred_list
);
90 static DEFINE_MUTEX(binder_deferred_lock
);
92 static HLIST_HEAD(binder_devices
);
93 static HLIST_HEAD(binder_procs
);
94 static DEFINE_MUTEX(binder_procs_lock
);
96 static HLIST_HEAD(binder_dead_nodes
);
97 static DEFINE_SPINLOCK(binder_dead_nodes_lock
);
99 static struct dentry
*binder_debugfs_dir_entry_root
;
100 static struct dentry
*binder_debugfs_dir_entry_proc
;
101 static atomic_t binder_last_id
;
102 static struct workqueue_struct
*binder_deferred_workqueue
;
104 #define BINDER_DEBUG_ENTRY(name) \
105 static int binder_##name##_open(struct inode *inode, struct file *file) \
107 return single_open(file, binder_##name##_show, inode->i_private); \
110 static const struct file_operations binder_##name##_fops = { \
111 .owner = THIS_MODULE, \
112 .open = binder_##name##_open, \
114 .llseek = seq_lseek, \
115 .release = single_release, \
118 static int binder_proc_show(struct seq_file
*m
, void *unused
);
119 BINDER_DEBUG_ENTRY(proc
);
121 /* This is only defined in include/asm-arm/sizes.h */
127 #define SZ_4M 0x400000
130 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
132 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
135 BINDER_DEBUG_USER_ERROR
= 1U << 0,
136 BINDER_DEBUG_FAILED_TRANSACTION
= 1U << 1,
137 BINDER_DEBUG_DEAD_TRANSACTION
= 1U << 2,
138 BINDER_DEBUG_OPEN_CLOSE
= 1U << 3,
139 BINDER_DEBUG_DEAD_BINDER
= 1U << 4,
140 BINDER_DEBUG_DEATH_NOTIFICATION
= 1U << 5,
141 BINDER_DEBUG_READ_WRITE
= 1U << 6,
142 BINDER_DEBUG_USER_REFS
= 1U << 7,
143 BINDER_DEBUG_THREADS
= 1U << 8,
144 BINDER_DEBUG_TRANSACTION
= 1U << 9,
145 BINDER_DEBUG_TRANSACTION_COMPLETE
= 1U << 10,
146 BINDER_DEBUG_FREE_BUFFER
= 1U << 11,
147 BINDER_DEBUG_INTERNAL_REFS
= 1U << 12,
148 BINDER_DEBUG_PRIORITY_CAP
= 1U << 13,
149 BINDER_DEBUG_SPINLOCKS
= 1U << 14,
151 static uint32_t binder_debug_mask
;
153 module_param_named(debug_mask
, binder_debug_mask
, uint
, S_IWUSR
| S_IRUGO
);
155 static char *binder_devices_param
= CONFIG_ANDROID_BINDER_DEVICES
;
156 module_param_named(devices
, binder_devices_param
, charp
, S_IRUGO
);
158 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait
);
159 static int binder_stop_on_user_error
;
161 static int binder_set_stop_on_user_error(const char *val
,
162 struct kernel_param
*kp
)
166 ret
= param_set_int(val
, kp
);
167 if (binder_stop_on_user_error
< 2)
168 wake_up(&binder_user_error_wait
);
171 module_param_call(stop_on_user_error
, binder_set_stop_on_user_error
,
172 param_get_int
, &binder_stop_on_user_error
, S_IWUSR
| S_IRUGO
);
174 #define binder_debug(mask, x...) \
176 if (binder_debug_mask & mask) \
180 #define binder_user_error(x...) \
182 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
184 if (binder_stop_on_user_error) \
185 binder_stop_on_user_error = 2; \
188 #define to_flat_binder_object(hdr) \
189 container_of(hdr, struct flat_binder_object, hdr)
191 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
193 #define to_binder_buffer_object(hdr) \
194 container_of(hdr, struct binder_buffer_object, hdr)
196 #define to_binder_fd_array_object(hdr) \
197 container_of(hdr, struct binder_fd_array_object, hdr)
199 enum binder_stat_types
{
205 BINDER_STAT_TRANSACTION
,
206 BINDER_STAT_TRANSACTION_COMPLETE
,
210 struct binder_stats
{
211 atomic_t br
[_IOC_NR(BR_FAILED_REPLY
) + 1];
212 atomic_t bc
[_IOC_NR(BC_REPLY_SG
) + 1];
213 atomic_t obj_created
[BINDER_STAT_COUNT
];
214 atomic_t obj_deleted
[BINDER_STAT_COUNT
];
217 static struct binder_stats binder_stats
;
219 static inline void binder_stats_deleted(enum binder_stat_types type
)
221 atomic_inc(&binder_stats
.obj_deleted
[type
]);
224 static inline void binder_stats_created(enum binder_stat_types type
)
226 atomic_inc(&binder_stats
.obj_created
[type
]);
229 struct binder_transaction_log_entry
{
241 int return_error_line
;
242 uint32_t return_error
;
243 uint32_t return_error_param
;
244 const char *context_name
;
246 struct binder_transaction_log
{
249 struct binder_transaction_log_entry entry
[32];
251 static struct binder_transaction_log binder_transaction_log
;
252 static struct binder_transaction_log binder_transaction_log_failed
;
254 static struct binder_transaction_log_entry
*binder_transaction_log_add(
255 struct binder_transaction_log
*log
)
257 struct binder_transaction_log_entry
*e
;
258 unsigned int cur
= atomic_inc_return(&log
->cur
);
260 if (cur
>= ARRAY_SIZE(log
->entry
))
262 e
= &log
->entry
[cur
% ARRAY_SIZE(log
->entry
)];
263 WRITE_ONCE(e
->debug_id_done
, 0);
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
270 memset(e
, 0, sizeof(*e
));
274 struct binder_context
{
275 struct binder_node
*binder_context_mgr_node
;
276 struct mutex context_mgr_node_lock
;
278 kuid_t binder_context_mgr_uid
;
282 struct binder_device
{
283 struct hlist_node hlist
;
284 struct miscdevice miscdev
;
285 struct binder_context context
;
289 * struct binder_work - work enqueued on a worklist
290 * @entry: node enqueued on list
291 * @type: type of work to be performed
293 * There are separate work lists for proc, thread, and node (async).
296 struct list_head entry
;
299 BINDER_WORK_TRANSACTION
= 1,
300 BINDER_WORK_TRANSACTION_COMPLETE
,
301 BINDER_WORK_RETURN_ERROR
,
303 BINDER_WORK_DEAD_BINDER
,
304 BINDER_WORK_DEAD_BINDER_AND_CLEAR
,
305 BINDER_WORK_CLEAR_DEATH_NOTIFICATION
,
309 struct binder_error
{
310 struct binder_work work
;
315 * struct binder_node - binder node bookkeeping
316 * @debug_id: unique ID for debugging
317 * (invariant after initialized)
318 * @lock: lock for node fields
319 * @work: worklist element for node work
320 * (protected by @proc->inner_lock)
321 * @rb_node: element for proc->nodes tree
322 * (protected by @proc->inner_lock)
323 * @dead_node: element for binder_dead_nodes list
324 * (protected by binder_dead_nodes_lock)
325 * @proc: binder_proc that owns this node
326 * (invariant after initialized)
327 * @refs: list of references on this node
328 * (protected by @lock)
329 * @internal_strong_refs: used to take strong references when
330 * initiating a transaction
331 * (protected by @proc->inner_lock if @proc
333 * @local_weak_refs: weak user refs from local process
334 * (protected by @proc->inner_lock if @proc
336 * @local_strong_refs: strong user refs from local process
337 * (protected by @proc->inner_lock if @proc
339 * @tmp_refs: temporary kernel refs
340 * (protected by @proc->inner_lock while @proc
341 * is valid, and by binder_dead_nodes_lock
342 * if @proc is NULL. During inc/dec and node release
343 * it is also protected by @lock to provide safety
344 * as the node dies and @proc becomes NULL)
345 * @ptr: userspace pointer for node
346 * (invariant, no lock needed)
347 * @cookie: userspace cookie for node
348 * (invariant, no lock needed)
349 * @has_strong_ref: userspace notified of strong ref
350 * (protected by @proc->inner_lock if @proc
352 * @pending_strong_ref: userspace has acked notification of strong ref
353 * (protected by @proc->inner_lock if @proc
355 * @has_weak_ref: userspace notified of weak ref
356 * (protected by @proc->inner_lock if @proc
358 * @pending_weak_ref: userspace has acked notification of weak ref
359 * (protected by @proc->inner_lock if @proc
361 * @has_async_transaction: async transaction to node in progress
362 * (protected by @lock)
363 * @sched_policy: minimum scheduling policy for node
364 * (invariant after initialized)
365 * @accept_fds: file descriptor operations supported for node
366 * (invariant after initialized)
367 * @min_priority: minimum scheduling priority
368 * (invariant after initialized)
369 * @inherit_rt: inherit RT scheduling policy from caller
370 * (invariant after initialized)
371 * @async_todo: list of async work items
372 * (protected by @proc->inner_lock)
374 * Bookkeeping structure for binder nodes.
379 struct binder_work work
;
381 struct rb_node rb_node
;
382 struct hlist_node dead_node
;
384 struct binder_proc
*proc
;
385 struct hlist_head refs
;
386 int internal_strong_refs
;
388 int local_strong_refs
;
390 binder_uintptr_t ptr
;
391 binder_uintptr_t cookie
;
394 * bitfield elements protected by
398 u8 pending_strong_ref
:1;
400 u8 pending_weak_ref
:1;
404 * invariant after initialization
411 bool has_async_transaction
;
412 struct list_head async_todo
;
415 struct binder_ref_death
{
417 * @work: worklist element for death notifications
418 * (protected by inner_lock of the proc that
419 * this ref belongs to)
421 struct binder_work work
;
422 binder_uintptr_t cookie
;
426 * struct binder_ref_data - binder_ref counts and id
427 * @debug_id: unique ID for the ref
428 * @desc: unique userspace handle for ref
429 * @strong: strong ref count (debugging only if not locked)
430 * @weak: weak ref count (debugging only if not locked)
432 * Structure to hold ref count and ref id information. Since
433 * the actual ref can only be accessed with a lock, this structure
434 * is used to return information about the ref to callers of
435 * ref inc/dec functions.
437 struct binder_ref_data
{
445 * struct binder_ref - struct to track references on nodes
446 * @data: binder_ref_data containing id, handle, and current refcounts
447 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
448 * @rb_node_node: node for lookup by @node in proc's rb_tree
449 * @node_entry: list entry for node->refs list in target node
450 * (protected by @node->lock)
451 * @proc: binder_proc containing ref
452 * @node: binder_node of target node. When cleaning up a
453 * ref for deletion in binder_cleanup_ref, a non-NULL
454 * @node indicates the node must be freed
455 * @death: pointer to death notification (ref_death) if requested
456 * (protected by @node->lock)
458 * Structure to track references from procA to target node (on procB). This
459 * structure is unsafe to access without holding @proc->outer_lock.
462 /* Lookups needed: */
463 /* node + proc => ref (transaction) */
464 /* desc + proc => ref (transaction, inc/dec ref) */
465 /* node => refs + procs (proc exit) */
466 struct binder_ref_data data
;
467 struct rb_node rb_node_desc
;
468 struct rb_node rb_node_node
;
469 struct hlist_node node_entry
;
470 struct binder_proc
*proc
;
471 struct binder_node
*node
;
472 struct binder_ref_death
*death
;
475 enum binder_deferred_state
{
476 BINDER_DEFERRED_PUT_FILES
= 0x01,
477 BINDER_DEFERRED_FLUSH
= 0x02,
478 BINDER_DEFERRED_RELEASE
= 0x04,
482 * struct binder_priority - scheduler policy and priority
483 * @sched_policy scheduler policy
484 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
486 * The binder driver supports inheriting the following scheduler policies:
492 struct binder_priority
{
493 unsigned int sched_policy
;
498 * struct binder_proc - binder process bookkeeping
499 * @proc_node: element for binder_procs list
500 * @threads: rbtree of binder_threads in this proc
501 * (protected by @inner_lock)
502 * @nodes: rbtree of binder nodes associated with
503 * this proc ordered by node->ptr
504 * (protected by @inner_lock)
505 * @refs_by_desc: rbtree of refs ordered by ref->desc
506 * (protected by @outer_lock)
507 * @refs_by_node: rbtree of refs ordered by ref->node
508 * (protected by @outer_lock)
509 * @waiting_threads: threads currently waiting for proc work
510 * (protected by @inner_lock)
511 * @pid PID of group_leader of process
512 * (invariant after initialized)
513 * @tsk task_struct for group_leader of process
514 * (invariant after initialized)
515 * @files files_struct for process
516 * (invariant after initialized)
517 * @deferred_work_node: element for binder_deferred_list
518 * (protected by binder_deferred_lock)
519 * @deferred_work: bitmap of deferred work to perform
520 * (protected by binder_deferred_lock)
521 * @is_dead: process is dead and awaiting free
522 * when outstanding transactions are cleaned up
523 * (protected by @inner_lock)
524 * @todo: list of work for this process
525 * (protected by @inner_lock)
526 * @wait: wait queue head to wait for proc work
527 * (invariant after initialized)
528 * @stats: per-process binder statistics
529 * (atomics, no lock needed)
530 * @delivered_death: list of delivered death notification
531 * (protected by @inner_lock)
532 * @max_threads: cap on number of binder threads
533 * (protected by @inner_lock)
534 * @requested_threads: number of binder threads requested but not
535 * yet started. In current implementation, can
537 * (protected by @inner_lock)
538 * @requested_threads_started: number binder threads started
539 * (protected by @inner_lock)
540 * @tmp_ref: temporary reference to indicate proc is in use
541 * (protected by @inner_lock)
542 * @default_priority: default scheduler priority
543 * (invariant after initialized)
544 * @debugfs_entry: debugfs node
545 * @alloc: binder allocator bookkeeping
546 * @context: binder_context for this proc
547 * (invariant after initialized)
548 * @inner_lock: can nest under outer_lock and/or node lock
549 * @outer_lock: no nesting under innor or node lock
550 * Lock order: 1) outer, 2) node, 3) inner
552 * Bookkeeping structure for binder processes
555 struct hlist_node proc_node
;
556 struct rb_root threads
;
557 struct rb_root nodes
;
558 struct rb_root refs_by_desc
;
559 struct rb_root refs_by_node
;
560 struct list_head waiting_threads
;
562 struct task_struct
*tsk
;
563 struct files_struct
*files
;
564 struct hlist_node deferred_work_node
;
568 struct list_head todo
;
569 wait_queue_head_t wait
;
570 struct binder_stats stats
;
571 struct list_head delivered_death
;
573 int requested_threads
;
574 int requested_threads_started
;
576 struct binder_priority default_priority
;
577 struct dentry
*debugfs_entry
;
578 struct binder_alloc alloc
;
579 struct binder_context
*context
;
580 spinlock_t inner_lock
;
581 spinlock_t outer_lock
;
585 BINDER_LOOPER_STATE_REGISTERED
= 0x01,
586 BINDER_LOOPER_STATE_ENTERED
= 0x02,
587 BINDER_LOOPER_STATE_EXITED
= 0x04,
588 BINDER_LOOPER_STATE_INVALID
= 0x08,
589 BINDER_LOOPER_STATE_WAITING
= 0x10,
590 BINDER_LOOPER_STATE_POLL
= 0x20,
594 * struct binder_thread - binder thread bookkeeping
595 * @proc: binder process for this thread
596 * (invariant after initialization)
597 * @rb_node: element for proc->threads rbtree
598 * (protected by @proc->inner_lock)
599 * @waiting_thread_node: element for @proc->waiting_threads list
600 * (protected by @proc->inner_lock)
601 * @pid: PID for this thread
602 * (invariant after initialization)
603 * @looper: bitmap of looping state
604 * (only accessed by this thread)
605 * @looper_needs_return: looping thread needs to exit driver
607 * @transaction_stack: stack of in-progress transactions for this thread
608 * (protected by @proc->inner_lock)
609 * @todo: list of work to do for this thread
610 * (protected by @proc->inner_lock)
611 * @return_error: transaction errors reported by this thread
612 * (only accessed by this thread)
613 * @reply_error: transaction errors reported by target thread
614 * (protected by @proc->inner_lock)
615 * @wait: wait queue for thread work
616 * @stats: per-thread statistics
617 * (atomics, no lock needed)
618 * @tmp_ref: temporary reference to indicate thread is in use
619 * (atomic since @proc->inner_lock cannot
620 * always be acquired)
621 * @is_dead: thread is dead and awaiting free
622 * when outstanding transactions are cleaned up
623 * (protected by @proc->inner_lock)
624 * @task: struct task_struct for this thread
626 * Bookkeeping structure for binder threads.
628 struct binder_thread
{
629 struct binder_proc
*proc
;
630 struct rb_node rb_node
;
631 struct list_head waiting_thread_node
;
633 int looper
; /* only modified by this thread */
634 bool looper_need_return
; /* can be written by other thread */
635 struct binder_transaction
*transaction_stack
;
636 struct list_head todo
;
637 struct binder_error return_error
;
638 struct binder_error reply_error
;
639 wait_queue_head_t wait
;
640 struct binder_stats stats
;
643 struct task_struct
*task
;
646 struct binder_transaction
{
648 struct binder_work work
;
649 struct binder_thread
*from
;
650 struct binder_transaction
*from_parent
;
651 struct binder_proc
*to_proc
;
652 struct binder_thread
*to_thread
;
653 struct binder_transaction
*to_parent
;
654 unsigned need_reply
:1;
655 /* unsigned is_dead:1; */ /* not used at the moment */
657 struct binder_buffer
*buffer
;
660 struct binder_priority priority
;
661 struct binder_priority saved_priority
;
662 bool set_priority_called
;
665 * @lock: protects @from, @to_proc, and @to_thread
667 * @from, @to_proc, and @to_thread can be set to NULL
668 * during thread teardown
674 * binder_proc_lock() - Acquire outer lock for given binder_proc
675 * @proc: struct binder_proc to acquire
677 * Acquires proc->outer_lock. Used to protect binder_ref
678 * structures associated with the given proc.
680 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
682 _binder_proc_lock(struct binder_proc
*proc
, int line
)
684 binder_debug(BINDER_DEBUG_SPINLOCKS
,
685 "%s: line=%d\n", __func__
, line
);
686 spin_lock(&proc
->outer_lock
);
690 * binder_proc_unlock() - Release spinlock for given binder_proc
691 * @proc: struct binder_proc to acquire
693 * Release lock acquired via binder_proc_lock()
695 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
697 _binder_proc_unlock(struct binder_proc
*proc
, int line
)
699 binder_debug(BINDER_DEBUG_SPINLOCKS
,
700 "%s: line=%d\n", __func__
, line
);
701 spin_unlock(&proc
->outer_lock
);
705 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
706 * @proc: struct binder_proc to acquire
708 * Acquires proc->inner_lock. Used to protect todo lists
710 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
712 _binder_inner_proc_lock(struct binder_proc
*proc
, int line
)
714 binder_debug(BINDER_DEBUG_SPINLOCKS
,
715 "%s: line=%d\n", __func__
, line
);
716 spin_lock(&proc
->inner_lock
);
720 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
721 * @proc: struct binder_proc to acquire
723 * Release lock acquired via binder_inner_proc_lock()
725 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
727 _binder_inner_proc_unlock(struct binder_proc
*proc
, int line
)
729 binder_debug(BINDER_DEBUG_SPINLOCKS
,
730 "%s: line=%d\n", __func__
, line
);
731 spin_unlock(&proc
->inner_lock
);
735 * binder_node_lock() - Acquire spinlock for given binder_node
736 * @node: struct binder_node to acquire
738 * Acquires node->lock. Used to protect binder_node fields
740 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
742 _binder_node_lock(struct binder_node
*node
, int line
)
744 binder_debug(BINDER_DEBUG_SPINLOCKS
,
745 "%s: line=%d\n", __func__
, line
);
746 spin_lock(&node
->lock
);
750 * binder_node_unlock() - Release spinlock for given binder_proc
751 * @node: struct binder_node to acquire
753 * Release lock acquired via binder_node_lock()
755 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
757 _binder_node_unlock(struct binder_node
*node
, int line
)
759 binder_debug(BINDER_DEBUG_SPINLOCKS
,
760 "%s: line=%d\n", __func__
, line
);
761 spin_unlock(&node
->lock
);
765 * binder_node_inner_lock() - Acquire node and inner locks
766 * @node: struct binder_node to acquire
768 * Acquires node->lock. If node->proc also acquires
769 * proc->inner_lock. Used to protect binder_node fields
771 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
773 _binder_node_inner_lock(struct binder_node
*node
, int line
)
775 binder_debug(BINDER_DEBUG_SPINLOCKS
,
776 "%s: line=%d\n", __func__
, line
);
777 spin_lock(&node
->lock
);
779 binder_inner_proc_lock(node
->proc
);
783 * binder_node_unlock() - Release node and inner locks
784 * @node: struct binder_node to acquire
786 * Release lock acquired via binder_node_lock()
788 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
790 _binder_node_inner_unlock(struct binder_node
*node
, int line
)
792 struct binder_proc
*proc
= node
->proc
;
794 binder_debug(BINDER_DEBUG_SPINLOCKS
,
795 "%s: line=%d\n", __func__
, line
);
797 binder_inner_proc_unlock(proc
);
798 spin_unlock(&node
->lock
);
801 static bool binder_worklist_empty_ilocked(struct list_head
*list
)
803 return list_empty(list
);
807 * binder_worklist_empty() - Check if no items on the work list
808 * @proc: binder_proc associated with list
809 * @list: list to check
811 * Return: true if there are no items on list, else false
813 static bool binder_worklist_empty(struct binder_proc
*proc
,
814 struct list_head
*list
)
818 binder_inner_proc_lock(proc
);
819 ret
= binder_worklist_empty_ilocked(list
);
820 binder_inner_proc_unlock(proc
);
825 binder_enqueue_work_ilocked(struct binder_work
*work
,
826 struct list_head
*target_list
)
828 BUG_ON(target_list
== NULL
);
829 BUG_ON(work
->entry
.next
&& !list_empty(&work
->entry
));
830 list_add_tail(&work
->entry
, target_list
);
834 * binder_enqueue_work() - Add an item to the work list
835 * @proc: binder_proc associated with list
836 * @work: struct binder_work to add to list
837 * @target_list: list to add work to
839 * Adds the work to the specified list. Asserts that work
840 * is not already on a list.
843 binder_enqueue_work(struct binder_proc
*proc
,
844 struct binder_work
*work
,
845 struct list_head
*target_list
)
847 binder_inner_proc_lock(proc
);
848 binder_enqueue_work_ilocked(work
, target_list
);
849 binder_inner_proc_unlock(proc
);
853 binder_dequeue_work_ilocked(struct binder_work
*work
)
855 list_del_init(&work
->entry
);
859 * binder_dequeue_work() - Removes an item from the work list
860 * @proc: binder_proc associated with list
861 * @work: struct binder_work to remove from list
863 * Removes the specified work item from whatever list it is on.
864 * Can safely be called if work is not on any list.
867 binder_dequeue_work(struct binder_proc
*proc
, struct binder_work
*work
)
869 binder_inner_proc_lock(proc
);
870 binder_dequeue_work_ilocked(work
);
871 binder_inner_proc_unlock(proc
);
874 static struct binder_work
*binder_dequeue_work_head_ilocked(
875 struct list_head
*list
)
877 struct binder_work
*w
;
879 w
= list_first_entry_or_null(list
, struct binder_work
, entry
);
881 list_del_init(&w
->entry
);
886 * binder_dequeue_work_head() - Dequeues the item at head of list
887 * @proc: binder_proc associated with list
888 * @list: list to dequeue head
890 * Removes the head of the list if there are items on the list
892 * Return: pointer dequeued binder_work, NULL if list was empty
894 static struct binder_work
*binder_dequeue_work_head(
895 struct binder_proc
*proc
,
896 struct list_head
*list
)
898 struct binder_work
*w
;
900 binder_inner_proc_lock(proc
);
901 w
= binder_dequeue_work_head_ilocked(list
);
902 binder_inner_proc_unlock(proc
);
907 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
);
908 static void binder_free_thread(struct binder_thread
*thread
);
909 static void binder_free_proc(struct binder_proc
*proc
);
910 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
);
912 static int task_get_unused_fd_flags(struct binder_proc
*proc
, int flags
)
914 struct files_struct
*files
= proc
->files
;
915 unsigned long rlim_cur
;
921 if (!lock_task_sighand(proc
->tsk
, &irqs
))
924 rlim_cur
= task_rlimit(proc
->tsk
, RLIMIT_NOFILE
);
925 unlock_task_sighand(proc
->tsk
, &irqs
);
927 return __alloc_fd(files
, 0, rlim_cur
, flags
);
931 * copied from fd_install
933 static void task_fd_install(
934 struct binder_proc
*proc
, unsigned int fd
, struct file
*file
)
937 __fd_install(proc
->files
, fd
, file
);
941 * copied from sys_close
943 static long task_close_fd(struct binder_proc
*proc
, unsigned int fd
)
947 if (proc
->files
== NULL
)
950 retval
= __close_fd(proc
->files
, fd
);
951 /* can't restart close syscall because file table entry was cleared */
952 if (unlikely(retval
== -ERESTARTSYS
||
953 retval
== -ERESTARTNOINTR
||
954 retval
== -ERESTARTNOHAND
||
955 retval
== -ERESTART_RESTARTBLOCK
))
961 static bool binder_has_work_ilocked(struct binder_thread
*thread
,
964 return !binder_worklist_empty_ilocked(&thread
->todo
) ||
965 thread
->looper_need_return
||
967 !binder_worklist_empty_ilocked(&thread
->proc
->todo
));
970 static bool binder_has_work(struct binder_thread
*thread
, bool do_proc_work
)
974 binder_inner_proc_lock(thread
->proc
);
975 has_work
= binder_has_work_ilocked(thread
, do_proc_work
);
976 binder_inner_proc_unlock(thread
->proc
);
981 static bool binder_available_for_proc_work_ilocked(struct binder_thread
*thread
)
983 return !thread
->transaction_stack
&&
984 binder_worklist_empty_ilocked(&thread
->todo
) &&
985 (thread
->looper
& (BINDER_LOOPER_STATE_ENTERED
|
986 BINDER_LOOPER_STATE_REGISTERED
));
989 static void binder_wakeup_poll_threads_ilocked(struct binder_proc
*proc
,
993 struct binder_thread
*thread
;
995 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
996 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
997 if (thread
->looper
& BINDER_LOOPER_STATE_POLL
&&
998 binder_available_for_proc_work_ilocked(thread
)) {
1000 wake_up_interruptible_sync(&thread
->wait
);
1002 wake_up_interruptible(&thread
->wait
);
1008 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1009 * @proc: process to select a thread from
1011 * Note that calling this function moves the thread off the waiting_threads
1012 * list, so it can only be woken up by the caller of this function, or a
1013 * signal. Therefore, callers *should* always wake up the thread this function
1016 * Return: If there's a thread currently waiting for process work,
1017 * returns that thread. Otherwise returns NULL.
1019 static struct binder_thread
*
1020 binder_select_thread_ilocked(struct binder_proc
*proc
)
1022 struct binder_thread
*thread
;
1024 assert_spin_locked(&proc
->inner_lock
);
1025 thread
= list_first_entry_or_null(&proc
->waiting_threads
,
1026 struct binder_thread
,
1027 waiting_thread_node
);
1030 list_del_init(&thread
->waiting_thread_node
);
1036 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1037 * @proc: process to wake up a thread in
1038 * @thread: specific thread to wake-up (may be NULL)
1039 * @sync: whether to do a synchronous wake-up
1041 * This function wakes up a thread in the @proc process.
1042 * The caller may provide a specific thread to wake-up in
1043 * the @thread parameter. If @thread is NULL, this function
1044 * will wake up threads that have called poll().
1046 * Note that for this function to work as expected, callers
1047 * should first call binder_select_thread() to find a thread
1048 * to handle the work (if they don't have a thread already),
1049 * and pass the result into the @thread parameter.
1051 static void binder_wakeup_thread_ilocked(struct binder_proc
*proc
,
1052 struct binder_thread
*thread
,
1055 assert_spin_locked(&proc
->inner_lock
);
1059 wake_up_interruptible_sync(&thread
->wait
);
1061 wake_up_interruptible(&thread
->wait
);
1065 /* Didn't find a thread waiting for proc work; this can happen
1067 * 1. All threads are busy handling transactions
1068 * In that case, one of those threads should call back into
1069 * the kernel driver soon and pick up this work.
1070 * 2. Threads are using the (e)poll interface, in which case
1071 * they may be blocked on the waitqueue without having been
1072 * added to waiting_threads. For this case, we just iterate
1073 * over all threads not handling transaction work, and
1074 * wake them all up. We wake all because we don't know whether
1075 * a thread that called into (e)poll is handling non-binder
1078 binder_wakeup_poll_threads_ilocked(proc
, sync
);
1081 static void binder_wakeup_proc_ilocked(struct binder_proc
*proc
)
1083 struct binder_thread
*thread
= binder_select_thread_ilocked(proc
);
1085 binder_wakeup_thread_ilocked(proc
, thread
, /* sync = */false);
1088 static bool is_rt_policy(int policy
)
1090 return policy
== SCHED_FIFO
|| policy
== SCHED_RR
;
1093 static bool is_fair_policy(int policy
)
1095 return policy
== SCHED_NORMAL
|| policy
== SCHED_BATCH
;
1098 static bool binder_supported_policy(int policy
)
1100 return is_fair_policy(policy
) || is_rt_policy(policy
);
1103 static int to_userspace_prio(int policy
, int kernel_priority
)
1105 if (is_fair_policy(policy
))
1106 return PRIO_TO_NICE(kernel_priority
);
1108 return MAX_USER_RT_PRIO
- 1 - kernel_priority
;
1111 static int to_kernel_prio(int policy
, int user_priority
)
1113 if (is_fair_policy(policy
))
1114 return NICE_TO_PRIO(user_priority
);
1116 return MAX_USER_RT_PRIO
- 1 - user_priority
;
1119 static void binder_do_set_priority(struct task_struct
*task
,
1120 struct binder_priority desired
,
1123 int priority
; /* user-space prio value */
1125 unsigned int policy
= desired
.sched_policy
;
1127 if (task
->policy
== policy
&& task
->normal_prio
== desired
.prio
)
1130 has_cap_nice
= has_capability_noaudit(task
, CAP_SYS_NICE
);
1132 priority
= to_userspace_prio(policy
, desired
.prio
);
1134 if (verify
&& is_rt_policy(policy
) && !has_cap_nice
) {
1135 long max_rtprio
= task_rlimit(task
, RLIMIT_RTPRIO
);
1137 if (max_rtprio
== 0) {
1138 policy
= SCHED_NORMAL
;
1139 priority
= MIN_NICE
;
1140 } else if (priority
> max_rtprio
) {
1141 priority
= max_rtprio
;
1145 if (verify
&& is_fair_policy(policy
) && !has_cap_nice
) {
1146 long min_nice
= (MAX_NICE
- task_rlimit(task
, RLIMIT_NICE
) + 1);
1148 if (min_nice
> MAX_NICE
) {
1149 binder_user_error("%d RLIMIT_NICE not set\n",
1152 } else if (priority
< min_nice
) {
1153 priority
= min_nice
;
1157 if (policy
!= desired
.sched_policy
||
1158 to_kernel_prio(policy
, priority
) != desired
.prio
)
1159 binder_debug(BINDER_DEBUG_PRIORITY_CAP
,
1160 "%d: priority %d not allowed, using %d instead\n",
1161 task
->pid
, desired
.prio
,
1162 to_kernel_prio(policy
, priority
));
1164 /* Set the actual priority */
1165 if (task
->policy
!= policy
|| is_rt_policy(policy
)) {
1166 struct sched_param params
;
1168 params
.sched_priority
= is_rt_policy(policy
) ? priority
: 0;
1170 sched_setscheduler_nocheck(task
,
1171 policy
| SCHED_RESET_ON_FORK
,
1174 if (is_fair_policy(policy
))
1175 set_user_nice(task
, priority
);
1178 static void binder_set_priority(struct task_struct
*task
,
1179 struct binder_priority desired
)
1181 binder_do_set_priority(task
, desired
, /* verify = */ true);
1184 static void binder_restore_priority(struct task_struct
*task
,
1185 struct binder_priority desired
)
1187 binder_do_set_priority(task
, desired
, /* verify = */ false);
1190 static void binder_transaction_priority(struct task_struct
*task
,
1191 struct binder_transaction
*t
,
1192 struct binder_priority node_prio
,
1195 struct binder_priority desired_prio
;
1197 if (t
->set_priority_called
)
1200 t
->set_priority_called
= true;
1201 t
->saved_priority
.sched_policy
= task
->policy
;
1202 t
->saved_priority
.prio
= task
->normal_prio
;
1204 if (!inherit_rt
&& is_rt_policy(desired_prio
.sched_policy
)) {
1205 desired_prio
.prio
= NICE_TO_PRIO(0);
1206 desired_prio
.sched_policy
= SCHED_NORMAL
;
1208 desired_prio
.prio
= t
->priority
.prio
;
1209 desired_prio
.sched_policy
= t
->priority
.sched_policy
;
1212 if (node_prio
.prio
< t
->priority
.prio
||
1213 (node_prio
.prio
== t
->priority
.prio
&&
1214 node_prio
.sched_policy
== SCHED_FIFO
)) {
1216 * In case the minimum priority on the node is
1217 * higher (lower value), use that priority. If
1218 * the priority is the same, but the node uses
1219 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1220 * run unbounded, unlike SCHED_RR.
1222 desired_prio
= node_prio
;
1225 binder_set_priority(task
, desired_prio
);
1228 static struct binder_node
*binder_get_node_ilocked(struct binder_proc
*proc
,
1229 binder_uintptr_t ptr
)
1231 struct rb_node
*n
= proc
->nodes
.rb_node
;
1232 struct binder_node
*node
;
1234 assert_spin_locked(&proc
->inner_lock
);
1237 node
= rb_entry(n
, struct binder_node
, rb_node
);
1239 if (ptr
< node
->ptr
)
1241 else if (ptr
> node
->ptr
)
1245 * take an implicit weak reference
1246 * to ensure node stays alive until
1247 * call to binder_put_node()
1249 binder_inc_node_tmpref_ilocked(node
);
1256 static struct binder_node
*binder_get_node(struct binder_proc
*proc
,
1257 binder_uintptr_t ptr
)
1259 struct binder_node
*node
;
1261 binder_inner_proc_lock(proc
);
1262 node
= binder_get_node_ilocked(proc
, ptr
);
1263 binder_inner_proc_unlock(proc
);
1267 static struct binder_node
*binder_init_node_ilocked(
1268 struct binder_proc
*proc
,
1269 struct binder_node
*new_node
,
1270 struct flat_binder_object
*fp
)
1272 struct rb_node
**p
= &proc
->nodes
.rb_node
;
1273 struct rb_node
*parent
= NULL
;
1274 struct binder_node
*node
;
1275 binder_uintptr_t ptr
= fp
? fp
->binder
: 0;
1276 binder_uintptr_t cookie
= fp
? fp
->cookie
: 0;
1277 __u32 flags
= fp
? fp
->flags
: 0;
1280 assert_spin_locked(&proc
->inner_lock
);
1285 node
= rb_entry(parent
, struct binder_node
, rb_node
);
1287 if (ptr
< node
->ptr
)
1289 else if (ptr
> node
->ptr
)
1290 p
= &(*p
)->rb_right
;
1293 * A matching node is already in
1294 * the rb tree. Abandon the init
1297 binder_inc_node_tmpref_ilocked(node
);
1302 binder_stats_created(BINDER_STAT_NODE
);
1304 rb_link_node(&node
->rb_node
, parent
, p
);
1305 rb_insert_color(&node
->rb_node
, &proc
->nodes
);
1306 node
->debug_id
= atomic_inc_return(&binder_last_id
);
1309 node
->cookie
= cookie
;
1310 node
->work
.type
= BINDER_WORK_NODE
;
1311 priority
= flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
;
1312 node
->sched_policy
= (flags
& FLAT_BINDER_FLAG_PRIORITY_MASK
) >>
1313 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT
;
1314 node
->min_priority
= to_kernel_prio(node
->sched_policy
, priority
);
1315 node
->accept_fds
= !!(flags
& FLAT_BINDER_FLAG_ACCEPTS_FDS
);
1316 node
->inherit_rt
= !!(flags
& FLAT_BINDER_FLAG_INHERIT_RT
);
1317 spin_lock_init(&node
->lock
);
1318 INIT_LIST_HEAD(&node
->work
.entry
);
1319 INIT_LIST_HEAD(&node
->async_todo
);
1320 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1321 "%d:%d node %d u%016llx c%016llx created\n",
1322 proc
->pid
, current
->pid
, node
->debug_id
,
1323 (u64
)node
->ptr
, (u64
)node
->cookie
);
1328 static struct binder_node
*binder_new_node(struct binder_proc
*proc
,
1329 struct flat_binder_object
*fp
)
1331 struct binder_node
*node
;
1332 struct binder_node
*new_node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
1336 binder_inner_proc_lock(proc
);
1337 node
= binder_init_node_ilocked(proc
, new_node
, fp
);
1338 binder_inner_proc_unlock(proc
);
1339 if (node
!= new_node
)
1341 * The node was already added by another thread
1348 static void binder_free_node(struct binder_node
*node
)
1351 binder_stats_deleted(BINDER_STAT_NODE
);
1354 static int binder_inc_node_nilocked(struct binder_node
*node
, int strong
,
1356 struct list_head
*target_list
)
1358 struct binder_proc
*proc
= node
->proc
;
1360 assert_spin_locked(&node
->lock
);
1362 assert_spin_locked(&proc
->inner_lock
);
1365 if (target_list
== NULL
&&
1366 node
->internal_strong_refs
== 0 &&
1368 node
== node
->proc
->context
->
1369 binder_context_mgr_node
&&
1370 node
->has_strong_ref
)) {
1371 pr_err("invalid inc strong node for %d\n",
1375 node
->internal_strong_refs
++;
1377 node
->local_strong_refs
++;
1378 if (!node
->has_strong_ref
&& target_list
) {
1379 binder_dequeue_work_ilocked(&node
->work
);
1380 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1384 node
->local_weak_refs
++;
1385 if (!node
->has_weak_ref
&& list_empty(&node
->work
.entry
)) {
1386 if (target_list
== NULL
) {
1387 pr_err("invalid inc weak node for %d\n",
1391 binder_enqueue_work_ilocked(&node
->work
, target_list
);
1397 static int binder_inc_node(struct binder_node
*node
, int strong
, int internal
,
1398 struct list_head
*target_list
)
1402 binder_node_inner_lock(node
);
1403 ret
= binder_inc_node_nilocked(node
, strong
, internal
, target_list
);
1404 binder_node_inner_unlock(node
);
1409 static bool binder_dec_node_nilocked(struct binder_node
*node
,
1410 int strong
, int internal
)
1412 struct binder_proc
*proc
= node
->proc
;
1414 assert_spin_locked(&node
->lock
);
1416 assert_spin_locked(&proc
->inner_lock
);
1419 node
->internal_strong_refs
--;
1421 node
->local_strong_refs
--;
1422 if (node
->local_strong_refs
|| node
->internal_strong_refs
)
1426 node
->local_weak_refs
--;
1427 if (node
->local_weak_refs
|| node
->tmp_refs
||
1428 !hlist_empty(&node
->refs
))
1432 if (proc
&& (node
->has_strong_ref
|| node
->has_weak_ref
)) {
1433 if (list_empty(&node
->work
.entry
)) {
1434 binder_enqueue_work_ilocked(&node
->work
, &proc
->todo
);
1435 binder_wakeup_proc_ilocked(proc
);
1438 if (hlist_empty(&node
->refs
) && !node
->local_strong_refs
&&
1439 !node
->local_weak_refs
&& !node
->tmp_refs
) {
1441 binder_dequeue_work_ilocked(&node
->work
);
1442 rb_erase(&node
->rb_node
, &proc
->nodes
);
1443 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1444 "refless node %d deleted\n",
1447 BUG_ON(!list_empty(&node
->work
.entry
));
1448 spin_lock(&binder_dead_nodes_lock
);
1450 * tmp_refs could have changed so
1453 if (node
->tmp_refs
) {
1454 spin_unlock(&binder_dead_nodes_lock
);
1457 hlist_del(&node
->dead_node
);
1458 spin_unlock(&binder_dead_nodes_lock
);
1459 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1460 "dead node %d deleted\n",
1469 static void binder_dec_node(struct binder_node
*node
, int strong
, int internal
)
1473 binder_node_inner_lock(node
);
1474 free_node
= binder_dec_node_nilocked(node
, strong
, internal
);
1475 binder_node_inner_unlock(node
);
1477 binder_free_node(node
);
1480 static void binder_inc_node_tmpref_ilocked(struct binder_node
*node
)
1483 * No call to binder_inc_node() is needed since we
1484 * don't need to inform userspace of any changes to
1491 * binder_inc_node_tmpref() - take a temporary reference on node
1492 * @node: node to reference
1494 * Take reference on node to prevent the node from being freed
1495 * while referenced only by a local variable. The inner lock is
1496 * needed to serialize with the node work on the queue (which
1497 * isn't needed after the node is dead). If the node is dead
1498 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1499 * node->tmp_refs against dead-node-only cases where the node
1500 * lock cannot be acquired (eg traversing the dead node list to
1503 static void binder_inc_node_tmpref(struct binder_node
*node
)
1505 binder_node_lock(node
);
1507 binder_inner_proc_lock(node
->proc
);
1509 spin_lock(&binder_dead_nodes_lock
);
1510 binder_inc_node_tmpref_ilocked(node
);
1512 binder_inner_proc_unlock(node
->proc
);
1514 spin_unlock(&binder_dead_nodes_lock
);
1515 binder_node_unlock(node
);
1519 * binder_dec_node_tmpref() - remove a temporary reference on node
1520 * @node: node to reference
1522 * Release temporary reference on node taken via binder_inc_node_tmpref()
1524 static void binder_dec_node_tmpref(struct binder_node
*node
)
1528 binder_node_inner_lock(node
);
1530 spin_lock(&binder_dead_nodes_lock
);
1532 BUG_ON(node
->tmp_refs
< 0);
1534 spin_unlock(&binder_dead_nodes_lock
);
1536 * Call binder_dec_node() to check if all refcounts are 0
1537 * and cleanup is needed. Calling with strong=0 and internal=1
1538 * causes no actual reference to be released in binder_dec_node().
1539 * If that changes, a change is needed here too.
1541 free_node
= binder_dec_node_nilocked(node
, 0, 1);
1542 binder_node_inner_unlock(node
);
1544 binder_free_node(node
);
1547 static void binder_put_node(struct binder_node
*node
)
1549 binder_dec_node_tmpref(node
);
1552 static struct binder_ref
*binder_get_ref_olocked(struct binder_proc
*proc
,
1553 u32 desc
, bool need_strong_ref
)
1555 struct rb_node
*n
= proc
->refs_by_desc
.rb_node
;
1556 struct binder_ref
*ref
;
1559 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1561 if (desc
< ref
->data
.desc
) {
1563 } else if (desc
> ref
->data
.desc
) {
1565 } else if (need_strong_ref
&& !ref
->data
.strong
) {
1566 binder_user_error("tried to use weak ref as strong ref\n");
1576 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1577 * @proc: binder_proc that owns the ref
1578 * @node: binder_node of target
1579 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1581 * Look up the ref for the given node and return it if it exists
1583 * If it doesn't exist and the caller provides a newly allocated
1584 * ref, initialize the fields of the newly allocated ref and insert
1585 * into the given proc rb_trees and node refs list.
1587 * Return: the ref for node. It is possible that another thread
1588 * allocated/initialized the ref first in which case the
1589 * returned ref would be different than the passed-in
1590 * new_ref. new_ref must be kfree'd by the caller in
1593 static struct binder_ref
*binder_get_ref_for_node_olocked(
1594 struct binder_proc
*proc
,
1595 struct binder_node
*node
,
1596 struct binder_ref
*new_ref
)
1598 struct binder_context
*context
= proc
->context
;
1599 struct rb_node
**p
= &proc
->refs_by_node
.rb_node
;
1600 struct rb_node
*parent
= NULL
;
1601 struct binder_ref
*ref
;
1606 ref
= rb_entry(parent
, struct binder_ref
, rb_node_node
);
1608 if (node
< ref
->node
)
1610 else if (node
> ref
->node
)
1611 p
= &(*p
)->rb_right
;
1618 binder_stats_created(BINDER_STAT_REF
);
1619 new_ref
->data
.debug_id
= atomic_inc_return(&binder_last_id
);
1620 new_ref
->proc
= proc
;
1621 new_ref
->node
= node
;
1622 rb_link_node(&new_ref
->rb_node_node
, parent
, p
);
1623 rb_insert_color(&new_ref
->rb_node_node
, &proc
->refs_by_node
);
1625 new_ref
->data
.desc
= (node
== context
->binder_context_mgr_node
) ? 0 : 1;
1626 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
1627 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
1628 if (ref
->data
.desc
> new_ref
->data
.desc
)
1630 new_ref
->data
.desc
= ref
->data
.desc
+ 1;
1633 p
= &proc
->refs_by_desc
.rb_node
;
1636 ref
= rb_entry(parent
, struct binder_ref
, rb_node_desc
);
1638 if (new_ref
->data
.desc
< ref
->data
.desc
)
1640 else if (new_ref
->data
.desc
> ref
->data
.desc
)
1641 p
= &(*p
)->rb_right
;
1645 rb_link_node(&new_ref
->rb_node_desc
, parent
, p
);
1646 rb_insert_color(&new_ref
->rb_node_desc
, &proc
->refs_by_desc
);
1648 binder_node_lock(node
);
1649 hlist_add_head(&new_ref
->node_entry
, &node
->refs
);
1651 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1652 "%d new ref %d desc %d for node %d\n",
1653 proc
->pid
, new_ref
->data
.debug_id
, new_ref
->data
.desc
,
1655 binder_node_unlock(node
);
1659 static void binder_cleanup_ref_olocked(struct binder_ref
*ref
)
1661 bool delete_node
= false;
1663 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
1664 "%d delete ref %d desc %d for node %d\n",
1665 ref
->proc
->pid
, ref
->data
.debug_id
, ref
->data
.desc
,
1666 ref
->node
->debug_id
);
1668 rb_erase(&ref
->rb_node_desc
, &ref
->proc
->refs_by_desc
);
1669 rb_erase(&ref
->rb_node_node
, &ref
->proc
->refs_by_node
);
1671 binder_node_inner_lock(ref
->node
);
1672 if (ref
->data
.strong
)
1673 binder_dec_node_nilocked(ref
->node
, 1, 1);
1675 hlist_del(&ref
->node_entry
);
1676 delete_node
= binder_dec_node_nilocked(ref
->node
, 0, 1);
1677 binder_node_inner_unlock(ref
->node
);
1679 * Clear ref->node unless we want the caller to free the node
1683 * The caller uses ref->node to determine
1684 * whether the node needs to be freed. Clear
1685 * it since the node is still alive.
1691 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
1692 "%d delete ref %d desc %d has death notification\n",
1693 ref
->proc
->pid
, ref
->data
.debug_id
,
1695 binder_dequeue_work(ref
->proc
, &ref
->death
->work
);
1696 binder_stats_deleted(BINDER_STAT_DEATH
);
1698 binder_stats_deleted(BINDER_STAT_REF
);
1702 * binder_inc_ref_olocked() - increment the ref for given handle
1703 * @ref: ref to be incremented
1704 * @strong: if true, strong increment, else weak
1705 * @target_list: list to queue node work on
1707 * Increment the ref. @ref->proc->outer_lock must be held on entry
1709 * Return: 0, if successful, else errno
1711 static int binder_inc_ref_olocked(struct binder_ref
*ref
, int strong
,
1712 struct list_head
*target_list
)
1717 if (ref
->data
.strong
== 0) {
1718 ret
= binder_inc_node(ref
->node
, 1, 1, target_list
);
1724 if (ref
->data
.weak
== 0) {
1725 ret
= binder_inc_node(ref
->node
, 0, 1, target_list
);
1735 * binder_dec_ref() - dec the ref for given handle
1736 * @ref: ref to be decremented
1737 * @strong: if true, strong decrement, else weak
1739 * Decrement the ref.
1741 * Return: true if ref is cleaned up and ready to be freed
1743 static bool binder_dec_ref_olocked(struct binder_ref
*ref
, int strong
)
1746 if (ref
->data
.strong
== 0) {
1747 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1748 ref
->proc
->pid
, ref
->data
.debug_id
,
1749 ref
->data
.desc
, ref
->data
.strong
,
1754 if (ref
->data
.strong
== 0)
1755 binder_dec_node(ref
->node
, strong
, 1);
1757 if (ref
->data
.weak
== 0) {
1758 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1759 ref
->proc
->pid
, ref
->data
.debug_id
,
1760 ref
->data
.desc
, ref
->data
.strong
,
1766 if (ref
->data
.strong
== 0 && ref
->data
.weak
== 0) {
1767 binder_cleanup_ref_olocked(ref
);
1774 * binder_get_node_from_ref() - get the node from the given proc/desc
1775 * @proc: proc containing the ref
1776 * @desc: the handle associated with the ref
1777 * @need_strong_ref: if true, only return node if ref is strong
1778 * @rdata: the id/refcount data for the ref
1780 * Given a proc and ref handle, return the associated binder_node
1782 * Return: a binder_node or NULL if not found or not strong when strong required
1784 static struct binder_node
*binder_get_node_from_ref(
1785 struct binder_proc
*proc
,
1786 u32 desc
, bool need_strong_ref
,
1787 struct binder_ref_data
*rdata
)
1789 struct binder_node
*node
;
1790 struct binder_ref
*ref
;
1792 binder_proc_lock(proc
);
1793 ref
= binder_get_ref_olocked(proc
, desc
, need_strong_ref
);
1798 * Take an implicit reference on the node to ensure
1799 * it stays alive until the call to binder_put_node()
1801 binder_inc_node_tmpref(node
);
1804 binder_proc_unlock(proc
);
1809 binder_proc_unlock(proc
);
1814 * binder_free_ref() - free the binder_ref
1817 * Free the binder_ref. Free the binder_node indicated by ref->node
1818 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1820 static void binder_free_ref(struct binder_ref
*ref
)
1823 binder_free_node(ref
->node
);
1829 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1830 * @proc: proc containing the ref
1831 * @desc: the handle associated with the ref
1832 * @increment: true=inc reference, false=dec reference
1833 * @strong: true=strong reference, false=weak reference
1834 * @rdata: the id/refcount data for the ref
1836 * Given a proc and ref handle, increment or decrement the ref
1837 * according to "increment" arg.
1839 * Return: 0 if successful, else errno
1841 static int binder_update_ref_for_handle(struct binder_proc
*proc
,
1842 uint32_t desc
, bool increment
, bool strong
,
1843 struct binder_ref_data
*rdata
)
1846 struct binder_ref
*ref
;
1847 bool delete_ref
= false;
1849 binder_proc_lock(proc
);
1850 ref
= binder_get_ref_olocked(proc
, desc
, strong
);
1856 ret
= binder_inc_ref_olocked(ref
, strong
, NULL
);
1858 delete_ref
= binder_dec_ref_olocked(ref
, strong
);
1862 binder_proc_unlock(proc
);
1865 binder_free_ref(ref
);
1869 binder_proc_unlock(proc
);
1874 * binder_dec_ref_for_handle() - dec the ref for given handle
1875 * @proc: proc containing the ref
1876 * @desc: the handle associated with the ref
1877 * @strong: true=strong reference, false=weak reference
1878 * @rdata: the id/refcount data for the ref
1880 * Just calls binder_update_ref_for_handle() to decrement the ref.
1882 * Return: 0 if successful, else errno
1884 static int binder_dec_ref_for_handle(struct binder_proc
*proc
,
1885 uint32_t desc
, bool strong
, struct binder_ref_data
*rdata
)
1887 return binder_update_ref_for_handle(proc
, desc
, false, strong
, rdata
);
1892 * binder_inc_ref_for_node() - increment the ref for given proc/node
1893 * @proc: proc containing the ref
1894 * @node: target node
1895 * @strong: true=strong reference, false=weak reference
1896 * @target_list: worklist to use if node is incremented
1897 * @rdata: the id/refcount data for the ref
1899 * Given a proc and node, increment the ref. Create the ref if it
1900 * doesn't already exist
1902 * Return: 0 if successful, else errno
1904 static int binder_inc_ref_for_node(struct binder_proc
*proc
,
1905 struct binder_node
*node
,
1907 struct list_head
*target_list
,
1908 struct binder_ref_data
*rdata
)
1910 struct binder_ref
*ref
;
1911 struct binder_ref
*new_ref
= NULL
;
1914 binder_proc_lock(proc
);
1915 ref
= binder_get_ref_for_node_olocked(proc
, node
, NULL
);
1917 binder_proc_unlock(proc
);
1918 new_ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
1921 binder_proc_lock(proc
);
1922 ref
= binder_get_ref_for_node_olocked(proc
, node
, new_ref
);
1924 ret
= binder_inc_ref_olocked(ref
, strong
, target_list
);
1926 binder_proc_unlock(proc
);
1927 if (new_ref
&& ref
!= new_ref
)
1929 * Another thread created the ref first so
1930 * free the one we allocated
1936 static void binder_pop_transaction_ilocked(struct binder_thread
*target_thread
,
1937 struct binder_transaction
*t
)
1939 BUG_ON(!target_thread
);
1940 assert_spin_locked(&target_thread
->proc
->inner_lock
);
1941 BUG_ON(target_thread
->transaction_stack
!= t
);
1942 BUG_ON(target_thread
->transaction_stack
->from
!= target_thread
);
1943 target_thread
->transaction_stack
=
1944 target_thread
->transaction_stack
->from_parent
;
1949 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1950 * @thread: thread to decrement
1952 * A thread needs to be kept alive while being used to create or
1953 * handle a transaction. binder_get_txn_from() is used to safely
1954 * extract t->from from a binder_transaction and keep the thread
1955 * indicated by t->from from being freed. When done with that
1956 * binder_thread, this function is called to decrement the
1957 * tmp_ref and free if appropriate (thread has been released
1958 * and no transaction being processed by the driver)
1960 static void binder_thread_dec_tmpref(struct binder_thread
*thread
)
1963 * atomic is used to protect the counter value while
1964 * it cannot reach zero or thread->is_dead is false
1966 binder_inner_proc_lock(thread
->proc
);
1967 atomic_dec(&thread
->tmp_ref
);
1968 if (thread
->is_dead
&& !atomic_read(&thread
->tmp_ref
)) {
1969 binder_inner_proc_unlock(thread
->proc
);
1970 binder_free_thread(thread
);
1973 binder_inner_proc_unlock(thread
->proc
);
1977 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1978 * @proc: proc to decrement
1980 * A binder_proc needs to be kept alive while being used to create or
1981 * handle a transaction. proc->tmp_ref is incremented when
1982 * creating a new transaction or the binder_proc is currently in-use
1983 * by threads that are being released. When done with the binder_proc,
1984 * this function is called to decrement the counter and free the
1985 * proc if appropriate (proc has been released, all threads have
1986 * been released and not currenly in-use to process a transaction).
1988 static void binder_proc_dec_tmpref(struct binder_proc
*proc
)
1990 binder_inner_proc_lock(proc
);
1992 if (proc
->is_dead
&& RB_EMPTY_ROOT(&proc
->threads
) &&
1994 binder_inner_proc_unlock(proc
);
1995 binder_free_proc(proc
);
1998 binder_inner_proc_unlock(proc
);
2002 * binder_get_txn_from() - safely extract the "from" thread in transaction
2003 * @t: binder transaction for t->from
2005 * Atomically return the "from" thread and increment the tmp_ref
2006 * count for the thread to ensure it stays alive until
2007 * binder_thread_dec_tmpref() is called.
2009 * Return: the value of t->from
2011 static struct binder_thread
*binder_get_txn_from(
2012 struct binder_transaction
*t
)
2014 struct binder_thread
*from
;
2016 spin_lock(&t
->lock
);
2019 atomic_inc(&from
->tmp_ref
);
2020 spin_unlock(&t
->lock
);
2025 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2026 * @t: binder transaction for t->from
2028 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2029 * to guarantee that the thread cannot be released while operating on it.
2030 * The caller must call binder_inner_proc_unlock() to release the inner lock
2031 * as well as call binder_dec_thread_txn() to release the reference.
2033 * Return: the value of t->from
2035 static struct binder_thread
*binder_get_txn_from_and_acq_inner(
2036 struct binder_transaction
*t
)
2038 struct binder_thread
*from
;
2040 from
= binder_get_txn_from(t
);
2043 binder_inner_proc_lock(from
->proc
);
2045 BUG_ON(from
!= t
->from
);
2048 binder_inner_proc_unlock(from
->proc
);
2049 binder_thread_dec_tmpref(from
);
2053 static void binder_free_transaction(struct binder_transaction
*t
)
2056 t
->buffer
->transaction
= NULL
;
2058 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
2061 static void binder_send_failed_reply(struct binder_transaction
*t
,
2062 uint32_t error_code
)
2064 struct binder_thread
*target_thread
;
2065 struct binder_transaction
*next
;
2067 BUG_ON(t
->flags
& TF_ONE_WAY
);
2069 target_thread
= binder_get_txn_from_and_acq_inner(t
);
2070 if (target_thread
) {
2071 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2072 "send failed reply for transaction %d to %d:%d\n",
2074 target_thread
->proc
->pid
,
2075 target_thread
->pid
);
2077 binder_pop_transaction_ilocked(target_thread
, t
);
2078 if (target_thread
->reply_error
.cmd
== BR_OK
) {
2079 target_thread
->reply_error
.cmd
= error_code
;
2080 binder_enqueue_work_ilocked(
2081 &target_thread
->reply_error
.work
,
2082 &target_thread
->todo
);
2083 wake_up_interruptible(&target_thread
->wait
);
2085 WARN(1, "Unexpected reply error: %u\n",
2086 target_thread
->reply_error
.cmd
);
2088 binder_inner_proc_unlock(target_thread
->proc
);
2089 binder_thread_dec_tmpref(target_thread
);
2090 binder_free_transaction(t
);
2093 next
= t
->from_parent
;
2095 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
2096 "send failed reply for transaction %d, target dead\n",
2099 binder_free_transaction(t
);
2101 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2102 "reply failed, no target thread at root\n");
2106 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
2107 "reply failed, no target thread -- retry %d\n",
2113 * binder_validate_object() - checks for a valid metadata object in a buffer.
2114 * @buffer: binder_buffer that we're parsing.
2115 * @offset: offset in the buffer at which to validate an object.
2117 * Return: If there's a valid metadata object at @offset in @buffer, the
2118 * size of that object. Otherwise, it returns zero.
2120 static size_t binder_validate_object(struct binder_buffer
*buffer
, u64 offset
)
2122 /* Check if we can read a header first */
2123 struct binder_object_header
*hdr
;
2124 size_t object_size
= 0;
2126 if (offset
> buffer
->data_size
- sizeof(*hdr
) ||
2127 buffer
->data_size
< sizeof(*hdr
) ||
2128 !IS_ALIGNED(offset
, sizeof(u32
)))
2131 /* Ok, now see if we can read a complete object. */
2132 hdr
= (struct binder_object_header
*)(buffer
->data
+ offset
);
2133 switch (hdr
->type
) {
2134 case BINDER_TYPE_BINDER
:
2135 case BINDER_TYPE_WEAK_BINDER
:
2136 case BINDER_TYPE_HANDLE
:
2137 case BINDER_TYPE_WEAK_HANDLE
:
2138 object_size
= sizeof(struct flat_binder_object
);
2140 case BINDER_TYPE_FD
:
2141 object_size
= sizeof(struct binder_fd_object
);
2143 case BINDER_TYPE_PTR
:
2144 object_size
= sizeof(struct binder_buffer_object
);
2146 case BINDER_TYPE_FDA
:
2147 object_size
= sizeof(struct binder_fd_array_object
);
2152 if (offset
<= buffer
->data_size
- object_size
&&
2153 buffer
->data_size
>= object_size
)
2160 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2161 * @b: binder_buffer containing the object
2162 * @index: index in offset array at which the binder_buffer_object is
2164 * @start: points to the start of the offset array
2165 * @num_valid: the number of valid offsets in the offset array
2167 * Return: If @index is within the valid range of the offset array
2168 * described by @start and @num_valid, and if there's a valid
2169 * binder_buffer_object at the offset found in index @index
2170 * of the offset array, that object is returned. Otherwise,
2171 * %NULL is returned.
2172 * Note that the offset found in index @index itself is not
2173 * verified; this function assumes that @num_valid elements
2174 * from @start were previously verified to have valid offsets.
2176 static struct binder_buffer_object
*binder_validate_ptr(struct binder_buffer
*b
,
2177 binder_size_t index
,
2178 binder_size_t
*start
,
2179 binder_size_t num_valid
)
2181 struct binder_buffer_object
*buffer_obj
;
2182 binder_size_t
*offp
;
2184 if (index
>= num_valid
)
2187 offp
= start
+ index
;
2188 buffer_obj
= (struct binder_buffer_object
*)(b
->data
+ *offp
);
2189 if (buffer_obj
->hdr
.type
!= BINDER_TYPE_PTR
)
2196 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2197 * @b: transaction buffer
2198 * @objects_start start of objects buffer
2199 * @buffer: binder_buffer_object in which to fix up
2200 * @offset: start offset in @buffer to fix up
2201 * @last_obj: last binder_buffer_object that we fixed up in
2202 * @last_min_offset: minimum fixup offset in @last_obj
2204 * Return: %true if a fixup in buffer @buffer at offset @offset is
2207 * For safety reasons, we only allow fixups inside a buffer to happen
2208 * at increasing offsets; additionally, we only allow fixup on the last
2209 * buffer object that was verified, or one of its parents.
2211 * Example of what is allowed:
2214 * B (parent = A, offset = 0)
2215 * C (parent = A, offset = 16)
2216 * D (parent = C, offset = 0)
2217 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2219 * Examples of what is not allowed:
2221 * Decreasing offsets within the same parent:
2223 * C (parent = A, offset = 16)
2224 * B (parent = A, offset = 0) // decreasing offset within A
2226 * Referring to a parent that wasn't the last object or any of its parents:
2228 * B (parent = A, offset = 0)
2229 * C (parent = A, offset = 0)
2230 * C (parent = A, offset = 16)
2231 * D (parent = B, offset = 0) // B is not A or any of A's parents
2233 static bool binder_validate_fixup(struct binder_buffer
*b
,
2234 binder_size_t
*objects_start
,
2235 struct binder_buffer_object
*buffer
,
2236 binder_size_t fixup_offset
,
2237 struct binder_buffer_object
*last_obj
,
2238 binder_size_t last_min_offset
)
2241 /* Nothing to fix up in */
2245 while (last_obj
!= buffer
) {
2247 * Safe to retrieve the parent of last_obj, since it
2248 * was already previously verified by the driver.
2250 if ((last_obj
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
) == 0)
2252 last_min_offset
= last_obj
->parent_offset
+ sizeof(uintptr_t);
2253 last_obj
= (struct binder_buffer_object
*)
2254 (b
->data
+ *(objects_start
+ last_obj
->parent
));
2256 return (fixup_offset
>= last_min_offset
);
2259 static void binder_transaction_buffer_release(struct binder_proc
*proc
,
2260 struct binder_buffer
*buffer
,
2261 binder_size_t
*failed_at
)
2263 binder_size_t
*offp
, *off_start
, *off_end
;
2264 int debug_id
= buffer
->debug_id
;
2266 binder_debug(BINDER_DEBUG_TRANSACTION
,
2267 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2268 proc
->pid
, buffer
->debug_id
,
2269 buffer
->data_size
, buffer
->offsets_size
, failed_at
);
2271 if (buffer
->target_node
)
2272 binder_dec_node(buffer
->target_node
, 1, 0);
2274 off_start
= (binder_size_t
*)(buffer
->data
+
2275 ALIGN(buffer
->data_size
, sizeof(void *)));
2277 off_end
= failed_at
;
2279 off_end
= (void *)off_start
+ buffer
->offsets_size
;
2280 for (offp
= off_start
; offp
< off_end
; offp
++) {
2281 struct binder_object_header
*hdr
;
2282 size_t object_size
= binder_validate_object(buffer
, *offp
);
2284 if (object_size
== 0) {
2285 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2286 debug_id
, (u64
)*offp
, buffer
->data_size
);
2289 hdr
= (struct binder_object_header
*)(buffer
->data
+ *offp
);
2290 switch (hdr
->type
) {
2291 case BINDER_TYPE_BINDER
:
2292 case BINDER_TYPE_WEAK_BINDER
: {
2293 struct flat_binder_object
*fp
;
2294 struct binder_node
*node
;
2296 fp
= to_flat_binder_object(hdr
);
2297 node
= binder_get_node(proc
, fp
->binder
);
2299 pr_err("transaction release %d bad node %016llx\n",
2300 debug_id
, (u64
)fp
->binder
);
2303 binder_debug(BINDER_DEBUG_TRANSACTION
,
2304 " node %d u%016llx\n",
2305 node
->debug_id
, (u64
)node
->ptr
);
2306 binder_dec_node(node
, hdr
->type
== BINDER_TYPE_BINDER
,
2308 binder_put_node(node
);
2310 case BINDER_TYPE_HANDLE
:
2311 case BINDER_TYPE_WEAK_HANDLE
: {
2312 struct flat_binder_object
*fp
;
2313 struct binder_ref_data rdata
;
2316 fp
= to_flat_binder_object(hdr
);
2317 ret
= binder_dec_ref_for_handle(proc
, fp
->handle
,
2318 hdr
->type
== BINDER_TYPE_HANDLE
, &rdata
);
2321 pr_err("transaction release %d bad handle %d, ret = %d\n",
2322 debug_id
, fp
->handle
, ret
);
2325 binder_debug(BINDER_DEBUG_TRANSACTION
,
2326 " ref %d desc %d\n",
2327 rdata
.debug_id
, rdata
.desc
);
2330 case BINDER_TYPE_FD
: {
2331 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
2333 binder_debug(BINDER_DEBUG_TRANSACTION
,
2334 " fd %d\n", fp
->fd
);
2336 task_close_fd(proc
, fp
->fd
);
2338 case BINDER_TYPE_PTR
:
2340 * Nothing to do here, this will get cleaned up when the
2341 * transaction buffer gets freed
2344 case BINDER_TYPE_FDA
: {
2345 struct binder_fd_array_object
*fda
;
2346 struct binder_buffer_object
*parent
;
2347 uintptr_t parent_buffer
;
2350 binder_size_t fd_buf_size
;
2352 fda
= to_binder_fd_array_object(hdr
);
2353 parent
= binder_validate_ptr(buffer
, fda
->parent
,
2357 pr_err("transaction release %d bad parent offset",
2362 * Since the parent was already fixed up, convert it
2363 * back to kernel address space to access it
2365 parent_buffer
= parent
->buffer
-
2366 binder_alloc_get_user_buffer_offset(
2369 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2370 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2371 pr_err("transaction release %d invalid number of fds (%lld)\n",
2372 debug_id
, (u64
)fda
->num_fds
);
2375 if (fd_buf_size
> parent
->length
||
2376 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2377 /* No space for all file descriptors here. */
2378 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2379 debug_id
, (u64
)fda
->num_fds
);
2382 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
2383 for (fd_index
= 0; fd_index
< fda
->num_fds
; fd_index
++)
2384 task_close_fd(proc
, fd_array
[fd_index
]);
2387 pr_err("transaction release %d bad object type %x\n",
2388 debug_id
, hdr
->type
);
2394 static int binder_translate_binder(struct flat_binder_object
*fp
,
2395 struct binder_transaction
*t
,
2396 struct binder_thread
*thread
)
2398 struct binder_node
*node
;
2399 struct binder_proc
*proc
= thread
->proc
;
2400 struct binder_proc
*target_proc
= t
->to_proc
;
2401 struct binder_ref_data rdata
;
2404 node
= binder_get_node(proc
, fp
->binder
);
2406 node
= binder_new_node(proc
, fp
);
2410 if (fp
->cookie
!= node
->cookie
) {
2411 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2412 proc
->pid
, thread
->pid
, (u64
)fp
->binder
,
2413 node
->debug_id
, (u64
)fp
->cookie
,
2418 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2423 ret
= binder_inc_ref_for_node(target_proc
, node
,
2424 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2425 &thread
->todo
, &rdata
);
2429 if (fp
->hdr
.type
== BINDER_TYPE_BINDER
)
2430 fp
->hdr
.type
= BINDER_TYPE_HANDLE
;
2432 fp
->hdr
.type
= BINDER_TYPE_WEAK_HANDLE
;
2434 fp
->handle
= rdata
.desc
;
2437 trace_binder_transaction_node_to_ref(t
, node
, &rdata
);
2438 binder_debug(BINDER_DEBUG_TRANSACTION
,
2439 " node %d u%016llx -> ref %d desc %d\n",
2440 node
->debug_id
, (u64
)node
->ptr
,
2441 rdata
.debug_id
, rdata
.desc
);
2443 binder_put_node(node
);
2447 static int binder_translate_handle(struct flat_binder_object
*fp
,
2448 struct binder_transaction
*t
,
2449 struct binder_thread
*thread
)
2451 struct binder_proc
*proc
= thread
->proc
;
2452 struct binder_proc
*target_proc
= t
->to_proc
;
2453 struct binder_node
*node
;
2454 struct binder_ref_data src_rdata
;
2457 node
= binder_get_node_from_ref(proc
, fp
->handle
,
2458 fp
->hdr
.type
== BINDER_TYPE_HANDLE
, &src_rdata
);
2460 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2461 proc
->pid
, thread
->pid
, fp
->handle
);
2464 if (security_binder_transfer_binder(proc
->tsk
, target_proc
->tsk
)) {
2469 binder_node_lock(node
);
2470 if (node
->proc
== target_proc
) {
2471 if (fp
->hdr
.type
== BINDER_TYPE_HANDLE
)
2472 fp
->hdr
.type
= BINDER_TYPE_BINDER
;
2474 fp
->hdr
.type
= BINDER_TYPE_WEAK_BINDER
;
2475 fp
->binder
= node
->ptr
;
2476 fp
->cookie
= node
->cookie
;
2478 binder_inner_proc_lock(node
->proc
);
2479 binder_inc_node_nilocked(node
,
2480 fp
->hdr
.type
== BINDER_TYPE_BINDER
,
2483 binder_inner_proc_unlock(node
->proc
);
2484 trace_binder_transaction_ref_to_node(t
, node
, &src_rdata
);
2485 binder_debug(BINDER_DEBUG_TRANSACTION
,
2486 " ref %d desc %d -> node %d u%016llx\n",
2487 src_rdata
.debug_id
, src_rdata
.desc
, node
->debug_id
,
2489 binder_node_unlock(node
);
2491 struct binder_ref_data dest_rdata
;
2493 binder_node_unlock(node
);
2494 ret
= binder_inc_ref_for_node(target_proc
, node
,
2495 fp
->hdr
.type
== BINDER_TYPE_HANDLE
,
2501 fp
->handle
= dest_rdata
.desc
;
2503 trace_binder_transaction_ref_to_ref(t
, node
, &src_rdata
,
2505 binder_debug(BINDER_DEBUG_TRANSACTION
,
2506 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2507 src_rdata
.debug_id
, src_rdata
.desc
,
2508 dest_rdata
.debug_id
, dest_rdata
.desc
,
2512 binder_put_node(node
);
2516 static int binder_translate_fd(int fd
,
2517 struct binder_transaction
*t
,
2518 struct binder_thread
*thread
,
2519 struct binder_transaction
*in_reply_to
)
2521 struct binder_proc
*proc
= thread
->proc
;
2522 struct binder_proc
*target_proc
= t
->to_proc
;
2526 bool target_allows_fd
;
2529 target_allows_fd
= !!(in_reply_to
->flags
& TF_ACCEPT_FDS
);
2531 target_allows_fd
= t
->buffer
->target_node
->accept_fds
;
2532 if (!target_allows_fd
) {
2533 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2534 proc
->pid
, thread
->pid
,
2535 in_reply_to
? "reply" : "transaction",
2538 goto err_fd_not_accepted
;
2543 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2544 proc
->pid
, thread
->pid
, fd
);
2548 ret
= security_binder_transfer_file(proc
->tsk
, target_proc
->tsk
, file
);
2554 target_fd
= task_get_unused_fd_flags(target_proc
, O_CLOEXEC
);
2555 if (target_fd
< 0) {
2557 goto err_get_unused_fd
;
2559 task_fd_install(target_proc
, target_fd
, file
);
2560 trace_binder_transaction_fd(t
, fd
, target_fd
);
2561 binder_debug(BINDER_DEBUG_TRANSACTION
, " fd %d -> %d\n",
2570 err_fd_not_accepted
:
2574 static int binder_translate_fd_array(struct binder_fd_array_object
*fda
,
2575 struct binder_buffer_object
*parent
,
2576 struct binder_transaction
*t
,
2577 struct binder_thread
*thread
,
2578 struct binder_transaction
*in_reply_to
)
2580 binder_size_t fdi
, fd_buf_size
, num_installed_fds
;
2582 uintptr_t parent_buffer
;
2584 struct binder_proc
*proc
= thread
->proc
;
2585 struct binder_proc
*target_proc
= t
->to_proc
;
2587 fd_buf_size
= sizeof(u32
) * fda
->num_fds
;
2588 if (fda
->num_fds
>= SIZE_MAX
/ sizeof(u32
)) {
2589 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2590 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2593 if (fd_buf_size
> parent
->length
||
2594 fda
->parent_offset
> parent
->length
- fd_buf_size
) {
2595 /* No space for all file descriptors here. */
2596 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2597 proc
->pid
, thread
->pid
, (u64
)fda
->num_fds
);
2601 * Since the parent was already fixed up, convert it
2602 * back to the kernel address space to access it
2604 parent_buffer
= parent
->buffer
-
2605 binder_alloc_get_user_buffer_offset(&target_proc
->alloc
);
2606 fd_array
= (u32
*)(parent_buffer
+ fda
->parent_offset
);
2607 if (!IS_ALIGNED((unsigned long)fd_array
, sizeof(u32
))) {
2608 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2609 proc
->pid
, thread
->pid
);
2612 for (fdi
= 0; fdi
< fda
->num_fds
; fdi
++) {
2613 target_fd
= binder_translate_fd(fd_array
[fdi
], t
, thread
,
2616 goto err_translate_fd_failed
;
2617 fd_array
[fdi
] = target_fd
;
2621 err_translate_fd_failed
:
2623 * Failed to allocate fd or security error, free fds
2626 num_installed_fds
= fdi
;
2627 for (fdi
= 0; fdi
< num_installed_fds
; fdi
++)
2628 task_close_fd(target_proc
, fd_array
[fdi
]);
2632 static int binder_fixup_parent(struct binder_transaction
*t
,
2633 struct binder_thread
*thread
,
2634 struct binder_buffer_object
*bp
,
2635 binder_size_t
*off_start
,
2636 binder_size_t num_valid
,
2637 struct binder_buffer_object
*last_fixup_obj
,
2638 binder_size_t last_fixup_min_off
)
2640 struct binder_buffer_object
*parent
;
2642 struct binder_buffer
*b
= t
->buffer
;
2643 struct binder_proc
*proc
= thread
->proc
;
2644 struct binder_proc
*target_proc
= t
->to_proc
;
2646 if (!(bp
->flags
& BINDER_BUFFER_FLAG_HAS_PARENT
))
2649 parent
= binder_validate_ptr(b
, bp
->parent
, off_start
, num_valid
);
2651 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2652 proc
->pid
, thread
->pid
);
2656 if (!binder_validate_fixup(b
, off_start
,
2657 parent
, bp
->parent_offset
,
2659 last_fixup_min_off
)) {
2660 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2661 proc
->pid
, thread
->pid
);
2665 if (parent
->length
< sizeof(binder_uintptr_t
) ||
2666 bp
->parent_offset
> parent
->length
- sizeof(binder_uintptr_t
)) {
2667 /* No space for a pointer here! */
2668 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2669 proc
->pid
, thread
->pid
);
2672 parent_buffer
= (u8
*)(parent
->buffer
-
2673 binder_alloc_get_user_buffer_offset(
2674 &target_proc
->alloc
));
2675 *(binder_uintptr_t
*)(parent_buffer
+ bp
->parent_offset
) = bp
->buffer
;
2681 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2682 * @t: transaction to send
2683 * @proc: process to send the transaction to
2684 * @thread: thread in @proc to send the transaction to (may be NULL)
2686 * This function queues a transaction to the specified process. It will try
2687 * to find a thread in the target process to handle the transaction and
2688 * wake it up. If no thread is found, the work is queued to the proc
2691 * If the @thread parameter is not NULL, the transaction is always queued
2692 * to the waitlist of that specific thread.
2694 * Return: true if the transactions was successfully queued
2695 * false if the target process or thread is dead
2697 static bool binder_proc_transaction(struct binder_transaction
*t
,
2698 struct binder_proc
*proc
,
2699 struct binder_thread
*thread
)
2701 struct list_head
*target_list
= NULL
;
2702 struct binder_node
*node
= t
->buffer
->target_node
;
2703 struct binder_priority node_prio
;
2704 bool oneway
= !!(t
->flags
& TF_ONE_WAY
);
2708 binder_node_lock(node
);
2709 node_prio
.prio
= node
->min_priority
;
2710 node_prio
.sched_policy
= node
->sched_policy
;
2714 if (node
->has_async_transaction
) {
2715 target_list
= &node
->async_todo
;
2718 node
->has_async_transaction
= 1;
2722 binder_inner_proc_lock(proc
);
2724 if (proc
->is_dead
|| (thread
&& thread
->is_dead
)) {
2725 binder_inner_proc_unlock(proc
);
2726 binder_node_unlock(node
);
2730 if (!thread
&& !target_list
)
2731 thread
= binder_select_thread_ilocked(proc
);
2734 target_list
= &thread
->todo
;
2735 binder_transaction_priority(thread
->task
, t
, node_prio
,
2737 } else if (!target_list
) {
2738 target_list
= &proc
->todo
;
2740 BUG_ON(target_list
!= &node
->async_todo
);
2743 binder_enqueue_work_ilocked(&t
->work
, target_list
);
2746 binder_wakeup_thread_ilocked(proc
, thread
, !oneway
/* sync */);
2748 binder_inner_proc_unlock(proc
);
2749 binder_node_unlock(node
);
2754 static void binder_transaction(struct binder_proc
*proc
,
2755 struct binder_thread
*thread
,
2756 struct binder_transaction_data
*tr
, int reply
,
2757 binder_size_t extra_buffers_size
)
2760 struct binder_transaction
*t
;
2761 struct binder_work
*tcomplete
;
2762 binder_size_t
*offp
, *off_end
, *off_start
;
2763 binder_size_t off_min
;
2764 u8
*sg_bufp
, *sg_buf_end
;
2765 struct binder_proc
*target_proc
= NULL
;
2766 struct binder_thread
*target_thread
= NULL
;
2767 struct binder_node
*target_node
= NULL
;
2768 struct binder_transaction
*in_reply_to
= NULL
;
2769 struct binder_transaction_log_entry
*e
;
2770 uint32_t return_error
= 0;
2771 uint32_t return_error_param
= 0;
2772 uint32_t return_error_line
= 0;
2773 struct binder_buffer_object
*last_fixup_obj
= NULL
;
2774 binder_size_t last_fixup_min_off
= 0;
2775 struct binder_context
*context
= proc
->context
;
2776 int t_debug_id
= atomic_inc_return(&binder_last_id
);
2778 e
= binder_transaction_log_add(&binder_transaction_log
);
2779 e
->debug_id
= t_debug_id
;
2780 e
->call_type
= reply
? 2 : !!(tr
->flags
& TF_ONE_WAY
);
2781 e
->from_proc
= proc
->pid
;
2782 e
->from_thread
= thread
->pid
;
2783 e
->target_handle
= tr
->target
.handle
;
2784 e
->data_size
= tr
->data_size
;
2785 e
->offsets_size
= tr
->offsets_size
;
2786 e
->context_name
= proc
->context
->name
;
2789 binder_inner_proc_lock(proc
);
2790 in_reply_to
= thread
->transaction_stack
;
2791 if (in_reply_to
== NULL
) {
2792 binder_inner_proc_unlock(proc
);
2793 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2794 proc
->pid
, thread
->pid
);
2795 return_error
= BR_FAILED_REPLY
;
2796 return_error_param
= -EPROTO
;
2797 return_error_line
= __LINE__
;
2798 goto err_empty_call_stack
;
2800 if (in_reply_to
->to_thread
!= thread
) {
2801 spin_lock(&in_reply_to
->lock
);
2802 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2803 proc
->pid
, thread
->pid
, in_reply_to
->debug_id
,
2804 in_reply_to
->to_proc
?
2805 in_reply_to
->to_proc
->pid
: 0,
2806 in_reply_to
->to_thread
?
2807 in_reply_to
->to_thread
->pid
: 0);
2808 spin_unlock(&in_reply_to
->lock
);
2809 binder_inner_proc_unlock(proc
);
2810 return_error
= BR_FAILED_REPLY
;
2811 return_error_param
= -EPROTO
;
2812 return_error_line
= __LINE__
;
2814 goto err_bad_call_stack
;
2816 thread
->transaction_stack
= in_reply_to
->to_parent
;
2817 binder_inner_proc_unlock(proc
);
2818 target_thread
= binder_get_txn_from_and_acq_inner(in_reply_to
);
2819 if (target_thread
== NULL
) {
2820 return_error
= BR_DEAD_REPLY
;
2821 return_error_line
= __LINE__
;
2822 goto err_dead_binder
;
2824 if (target_thread
->transaction_stack
!= in_reply_to
) {
2825 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2826 proc
->pid
, thread
->pid
,
2827 target_thread
->transaction_stack
?
2828 target_thread
->transaction_stack
->debug_id
: 0,
2829 in_reply_to
->debug_id
);
2830 binder_inner_proc_unlock(target_thread
->proc
);
2831 return_error
= BR_FAILED_REPLY
;
2832 return_error_param
= -EPROTO
;
2833 return_error_line
= __LINE__
;
2835 target_thread
= NULL
;
2836 goto err_dead_binder
;
2838 target_proc
= target_thread
->proc
;
2839 target_proc
->tmp_ref
++;
2840 binder_inner_proc_unlock(target_thread
->proc
);
2842 if (tr
->target
.handle
) {
2843 struct binder_ref
*ref
;
2846 * There must already be a strong ref
2847 * on this node. If so, do a strong
2848 * increment on the node to ensure it
2849 * stays alive until the transaction is
2852 binder_proc_lock(proc
);
2853 ref
= binder_get_ref_olocked(proc
, tr
->target
.handle
,
2856 binder_inc_node(ref
->node
, 1, 0, NULL
);
2857 target_node
= ref
->node
;
2859 binder_proc_unlock(proc
);
2860 if (target_node
== NULL
) {
2861 binder_user_error("%d:%d got transaction to invalid handle\n",
2862 proc
->pid
, thread
->pid
);
2863 return_error
= BR_FAILED_REPLY
;
2864 return_error_param
= -EINVAL
;
2865 return_error_line
= __LINE__
;
2866 goto err_invalid_target_handle
;
2869 mutex_lock(&context
->context_mgr_node_lock
);
2870 target_node
= context
->binder_context_mgr_node
;
2871 if (target_node
== NULL
) {
2872 return_error
= BR_DEAD_REPLY
;
2873 mutex_unlock(&context
->context_mgr_node_lock
);
2874 return_error_line
= __LINE__
;
2875 goto err_no_context_mgr_node
;
2877 binder_inc_node(target_node
, 1, 0, NULL
);
2878 mutex_unlock(&context
->context_mgr_node_lock
);
2880 e
->to_node
= target_node
->debug_id
;
2881 binder_node_lock(target_node
);
2882 target_proc
= target_node
->proc
;
2883 if (target_proc
== NULL
) {
2884 binder_node_unlock(target_node
);
2885 return_error
= BR_DEAD_REPLY
;
2886 return_error_line
= __LINE__
;
2887 goto err_dead_binder
;
2889 binder_inner_proc_lock(target_proc
);
2890 target_proc
->tmp_ref
++;
2891 binder_inner_proc_unlock(target_proc
);
2892 binder_node_unlock(target_node
);
2893 if (security_binder_transaction(proc
->tsk
,
2894 target_proc
->tsk
) < 0) {
2895 return_error
= BR_FAILED_REPLY
;
2896 return_error_param
= -EPERM
;
2897 return_error_line
= __LINE__
;
2898 goto err_invalid_target_handle
;
2900 binder_inner_proc_lock(proc
);
2901 if (!(tr
->flags
& TF_ONE_WAY
) && thread
->transaction_stack
) {
2902 struct binder_transaction
*tmp
;
2904 tmp
= thread
->transaction_stack
;
2905 if (tmp
->to_thread
!= thread
) {
2906 spin_lock(&tmp
->lock
);
2907 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2908 proc
->pid
, thread
->pid
, tmp
->debug_id
,
2909 tmp
->to_proc
? tmp
->to_proc
->pid
: 0,
2911 tmp
->to_thread
->pid
: 0);
2912 spin_unlock(&tmp
->lock
);
2913 binder_inner_proc_unlock(proc
);
2914 return_error
= BR_FAILED_REPLY
;
2915 return_error_param
= -EPROTO
;
2916 return_error_line
= __LINE__
;
2917 goto err_bad_call_stack
;
2920 struct binder_thread
*from
;
2922 spin_lock(&tmp
->lock
);
2924 if (from
&& from
->proc
== target_proc
) {
2925 atomic_inc(&from
->tmp_ref
);
2926 target_thread
= from
;
2927 spin_unlock(&tmp
->lock
);
2930 spin_unlock(&tmp
->lock
);
2931 tmp
= tmp
->from_parent
;
2934 binder_inner_proc_unlock(proc
);
2937 e
->to_thread
= target_thread
->pid
;
2938 e
->to_proc
= target_proc
->pid
;
2940 /* TODO: reuse incoming transaction for reply */
2941 t
= kzalloc(sizeof(*t
), GFP_KERNEL
);
2943 return_error
= BR_FAILED_REPLY
;
2944 return_error_param
= -ENOMEM
;
2945 return_error_line
= __LINE__
;
2946 goto err_alloc_t_failed
;
2948 binder_stats_created(BINDER_STAT_TRANSACTION
);
2949 spin_lock_init(&t
->lock
);
2951 tcomplete
= kzalloc(sizeof(*tcomplete
), GFP_KERNEL
);
2952 if (tcomplete
== NULL
) {
2953 return_error
= BR_FAILED_REPLY
;
2954 return_error_param
= -ENOMEM
;
2955 return_error_line
= __LINE__
;
2956 goto err_alloc_tcomplete_failed
;
2958 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE
);
2960 t
->debug_id
= t_debug_id
;
2963 binder_debug(BINDER_DEBUG_TRANSACTION
,
2964 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2965 proc
->pid
, thread
->pid
, t
->debug_id
,
2966 target_proc
->pid
, target_thread
->pid
,
2967 (u64
)tr
->data
.ptr
.buffer
,
2968 (u64
)tr
->data
.ptr
.offsets
,
2969 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2970 (u64
)extra_buffers_size
);
2972 binder_debug(BINDER_DEBUG_TRANSACTION
,
2973 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2974 proc
->pid
, thread
->pid
, t
->debug_id
,
2975 target_proc
->pid
, target_node
->debug_id
,
2976 (u64
)tr
->data
.ptr
.buffer
,
2977 (u64
)tr
->data
.ptr
.offsets
,
2978 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
2979 (u64
)extra_buffers_size
);
2981 if (!reply
&& !(tr
->flags
& TF_ONE_WAY
))
2985 t
->sender_euid
= task_euid(proc
->tsk
);
2986 t
->to_proc
= target_proc
;
2987 t
->to_thread
= target_thread
;
2989 t
->flags
= tr
->flags
;
2990 if (!(t
->flags
& TF_ONE_WAY
) &&
2991 binder_supported_policy(current
->policy
)) {
2992 /* Inherit supported policies for synchronous transactions */
2993 t
->priority
.sched_policy
= current
->policy
;
2994 t
->priority
.prio
= current
->normal_prio
;
2996 /* Otherwise, fall back to the default priority */
2997 t
->priority
= target_proc
->default_priority
;
3000 trace_binder_transaction(reply
, t
, target_node
);
3002 t
->buffer
= binder_alloc_new_buf(&target_proc
->alloc
, tr
->data_size
,
3003 tr
->offsets_size
, extra_buffers_size
,
3004 !reply
&& (t
->flags
& TF_ONE_WAY
));
3005 if (IS_ERR(t
->buffer
)) {
3007 * -ESRCH indicates VMA cleared. The target is dying.
3009 return_error_param
= PTR_ERR(t
->buffer
);
3010 return_error
= return_error_param
== -ESRCH
?
3011 BR_DEAD_REPLY
: BR_FAILED_REPLY
;
3012 return_error_line
= __LINE__
;
3014 goto err_binder_alloc_buf_failed
;
3016 t
->buffer
->allow_user_free
= 0;
3017 t
->buffer
->debug_id
= t
->debug_id
;
3018 t
->buffer
->transaction
= t
;
3019 t
->buffer
->target_node
= target_node
;
3020 trace_binder_transaction_alloc_buf(t
->buffer
);
3021 off_start
= (binder_size_t
*)(t
->buffer
->data
+
3022 ALIGN(tr
->data_size
, sizeof(void *)));
3025 if (copy_from_user(t
->buffer
->data
, (const void __user
*)(uintptr_t)
3026 tr
->data
.ptr
.buffer
, tr
->data_size
)) {
3027 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3028 proc
->pid
, thread
->pid
);
3029 return_error
= BR_FAILED_REPLY
;
3030 return_error_param
= -EFAULT
;
3031 return_error_line
= __LINE__
;
3032 goto err_copy_data_failed
;
3034 if (copy_from_user(offp
, (const void __user
*)(uintptr_t)
3035 tr
->data
.ptr
.offsets
, tr
->offsets_size
)) {
3036 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3037 proc
->pid
, thread
->pid
);
3038 return_error
= BR_FAILED_REPLY
;
3039 return_error_param
= -EFAULT
;
3040 return_error_line
= __LINE__
;
3041 goto err_copy_data_failed
;
3043 if (!IS_ALIGNED(tr
->offsets_size
, sizeof(binder_size_t
))) {
3044 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3045 proc
->pid
, thread
->pid
, (u64
)tr
->offsets_size
);
3046 return_error
= BR_FAILED_REPLY
;
3047 return_error_param
= -EINVAL
;
3048 return_error_line
= __LINE__
;
3049 goto err_bad_offset
;
3051 if (!IS_ALIGNED(extra_buffers_size
, sizeof(u64
))) {
3052 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3053 proc
->pid
, thread
->pid
,
3054 extra_buffers_size
);
3055 return_error
= BR_FAILED_REPLY
;
3056 return_error_param
= -EINVAL
;
3057 return_error_line
= __LINE__
;
3058 goto err_bad_offset
;
3060 off_end
= (void *)off_start
+ tr
->offsets_size
;
3061 sg_bufp
= (u8
*)(PTR_ALIGN(off_end
, sizeof(void *)));
3062 sg_buf_end
= sg_bufp
+ extra_buffers_size
;
3064 for (; offp
< off_end
; offp
++) {
3065 struct binder_object_header
*hdr
;
3066 size_t object_size
= binder_validate_object(t
->buffer
, *offp
);
3068 if (object_size
== 0 || *offp
< off_min
) {
3069 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3070 proc
->pid
, thread
->pid
, (u64
)*offp
,
3072 (u64
)t
->buffer
->data_size
);
3073 return_error
= BR_FAILED_REPLY
;
3074 return_error_param
= -EINVAL
;
3075 return_error_line
= __LINE__
;
3076 goto err_bad_offset
;
3079 hdr
= (struct binder_object_header
*)(t
->buffer
->data
+ *offp
);
3080 off_min
= *offp
+ object_size
;
3081 switch (hdr
->type
) {
3082 case BINDER_TYPE_BINDER
:
3083 case BINDER_TYPE_WEAK_BINDER
: {
3084 struct flat_binder_object
*fp
;
3086 fp
= to_flat_binder_object(hdr
);
3087 ret
= binder_translate_binder(fp
, t
, thread
);
3089 return_error
= BR_FAILED_REPLY
;
3090 return_error_param
= ret
;
3091 return_error_line
= __LINE__
;
3092 goto err_translate_failed
;
3095 case BINDER_TYPE_HANDLE
:
3096 case BINDER_TYPE_WEAK_HANDLE
: {
3097 struct flat_binder_object
*fp
;
3099 fp
= to_flat_binder_object(hdr
);
3100 ret
= binder_translate_handle(fp
, t
, thread
);
3102 return_error
= BR_FAILED_REPLY
;
3103 return_error_param
= ret
;
3104 return_error_line
= __LINE__
;
3105 goto err_translate_failed
;
3109 case BINDER_TYPE_FD
: {
3110 struct binder_fd_object
*fp
= to_binder_fd_object(hdr
);
3111 int target_fd
= binder_translate_fd(fp
->fd
, t
, thread
,
3114 if (target_fd
< 0) {
3115 return_error
= BR_FAILED_REPLY
;
3116 return_error_param
= target_fd
;
3117 return_error_line
= __LINE__
;
3118 goto err_translate_failed
;
3123 case BINDER_TYPE_FDA
: {
3124 struct binder_fd_array_object
*fda
=
3125 to_binder_fd_array_object(hdr
);
3126 struct binder_buffer_object
*parent
=
3127 binder_validate_ptr(t
->buffer
, fda
->parent
,
3131 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3132 proc
->pid
, thread
->pid
);
3133 return_error
= BR_FAILED_REPLY
;
3134 return_error_param
= -EINVAL
;
3135 return_error_line
= __LINE__
;
3136 goto err_bad_parent
;
3138 if (!binder_validate_fixup(t
->buffer
, off_start
,
3139 parent
, fda
->parent_offset
,
3141 last_fixup_min_off
)) {
3142 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3143 proc
->pid
, thread
->pid
);
3144 return_error
= BR_FAILED_REPLY
;
3145 return_error_param
= -EINVAL
;
3146 return_error_line
= __LINE__
;
3147 goto err_bad_parent
;
3149 ret
= binder_translate_fd_array(fda
, parent
, t
, thread
,
3152 return_error
= BR_FAILED_REPLY
;
3153 return_error_param
= ret
;
3154 return_error_line
= __LINE__
;
3155 goto err_translate_failed
;
3157 last_fixup_obj
= parent
;
3158 last_fixup_min_off
=
3159 fda
->parent_offset
+ sizeof(u32
) * fda
->num_fds
;
3161 case BINDER_TYPE_PTR
: {
3162 struct binder_buffer_object
*bp
=
3163 to_binder_buffer_object(hdr
);
3164 size_t buf_left
= sg_buf_end
- sg_bufp
;
3166 if (bp
->length
> buf_left
) {
3167 binder_user_error("%d:%d got transaction with too large buffer\n",
3168 proc
->pid
, thread
->pid
);
3169 return_error
= BR_FAILED_REPLY
;
3170 return_error_param
= -EINVAL
;
3171 return_error_line
= __LINE__
;
3172 goto err_bad_offset
;
3174 if (copy_from_user(sg_bufp
,
3175 (const void __user
*)(uintptr_t)
3176 bp
->buffer
, bp
->length
)) {
3177 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3178 proc
->pid
, thread
->pid
);
3179 return_error_param
= -EFAULT
;
3180 return_error
= BR_FAILED_REPLY
;
3181 return_error_line
= __LINE__
;
3182 goto err_copy_data_failed
;
3184 /* Fixup buffer pointer to target proc address space */
3185 bp
->buffer
= (uintptr_t)sg_bufp
+
3186 binder_alloc_get_user_buffer_offset(
3187 &target_proc
->alloc
);
3188 sg_bufp
+= ALIGN(bp
->length
, sizeof(u64
));
3190 ret
= binder_fixup_parent(t
, thread
, bp
, off_start
,
3193 last_fixup_min_off
);
3195 return_error
= BR_FAILED_REPLY
;
3196 return_error_param
= ret
;
3197 return_error_line
= __LINE__
;
3198 goto err_translate_failed
;
3200 last_fixup_obj
= bp
;
3201 last_fixup_min_off
= 0;
3204 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3205 proc
->pid
, thread
->pid
, hdr
->type
);
3206 return_error
= BR_FAILED_REPLY
;
3207 return_error_param
= -EINVAL
;
3208 return_error_line
= __LINE__
;
3209 goto err_bad_object_type
;
3212 tcomplete
->type
= BINDER_WORK_TRANSACTION_COMPLETE
;
3213 binder_enqueue_work(proc
, tcomplete
, &thread
->todo
);
3214 t
->work
.type
= BINDER_WORK_TRANSACTION
;
3217 binder_inner_proc_lock(target_proc
);
3218 if (target_thread
->is_dead
) {
3219 binder_inner_proc_unlock(target_proc
);
3220 goto err_dead_proc_or_thread
;
3222 BUG_ON(t
->buffer
->async_transaction
!= 0);
3223 binder_pop_transaction_ilocked(target_thread
, in_reply_to
);
3224 binder_enqueue_work_ilocked(&t
->work
, &target_thread
->todo
);
3225 binder_inner_proc_unlock(target_proc
);
3226 wake_up_interruptible_sync(&target_thread
->wait
);
3227 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3228 binder_free_transaction(in_reply_to
);
3229 } else if (!(t
->flags
& TF_ONE_WAY
)) {
3230 BUG_ON(t
->buffer
->async_transaction
!= 0);
3231 binder_inner_proc_lock(proc
);
3233 t
->from_parent
= thread
->transaction_stack
;
3234 thread
->transaction_stack
= t
;
3235 binder_inner_proc_unlock(proc
);
3236 if (!binder_proc_transaction(t
, target_proc
, target_thread
)) {
3237 binder_inner_proc_lock(proc
);
3238 binder_pop_transaction_ilocked(thread
, t
);
3239 binder_inner_proc_unlock(proc
);
3240 goto err_dead_proc_or_thread
;
3243 BUG_ON(target_node
== NULL
);
3244 BUG_ON(t
->buffer
->async_transaction
!= 1);
3245 if (!binder_proc_transaction(t
, target_proc
, NULL
))
3246 goto err_dead_proc_or_thread
;
3249 binder_thread_dec_tmpref(target_thread
);
3250 binder_proc_dec_tmpref(target_proc
);
3252 * write barrier to synchronize with initialization
3256 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3259 err_dead_proc_or_thread
:
3260 return_error
= BR_DEAD_REPLY
;
3261 return_error_line
= __LINE__
;
3262 binder_dequeue_work(proc
, tcomplete
);
3263 err_translate_failed
:
3264 err_bad_object_type
:
3267 err_copy_data_failed
:
3268 trace_binder_transaction_failed_buffer_release(t
->buffer
);
3269 binder_transaction_buffer_release(target_proc
, t
->buffer
, offp
);
3271 t
->buffer
->transaction
= NULL
;
3272 binder_alloc_free_buf(&target_proc
->alloc
, t
->buffer
);
3273 err_binder_alloc_buf_failed
:
3275 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3276 err_alloc_tcomplete_failed
:
3278 binder_stats_deleted(BINDER_STAT_TRANSACTION
);
3281 err_empty_call_stack
:
3283 err_invalid_target_handle
:
3284 err_no_context_mgr_node
:
3286 binder_thread_dec_tmpref(target_thread
);
3288 binder_proc_dec_tmpref(target_proc
);
3290 binder_dec_node(target_node
, 1, 0);
3292 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION
,
3293 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3294 proc
->pid
, thread
->pid
, return_error
, return_error_param
,
3295 (u64
)tr
->data_size
, (u64
)tr
->offsets_size
,
3299 struct binder_transaction_log_entry
*fe
;
3301 e
->return_error
= return_error
;
3302 e
->return_error_param
= return_error_param
;
3303 e
->return_error_line
= return_error_line
;
3304 fe
= binder_transaction_log_add(&binder_transaction_log_failed
);
3307 * write barrier to synchronize with initialization
3311 WRITE_ONCE(e
->debug_id_done
, t_debug_id
);
3312 WRITE_ONCE(fe
->debug_id_done
, t_debug_id
);
3315 BUG_ON(thread
->return_error
.cmd
!= BR_OK
);
3317 binder_restore_priority(current
, in_reply_to
->saved_priority
);
3318 thread
->return_error
.cmd
= BR_TRANSACTION_COMPLETE
;
3319 binder_enqueue_work(thread
->proc
,
3320 &thread
->return_error
.work
,
3322 binder_send_failed_reply(in_reply_to
, return_error
);
3324 thread
->return_error
.cmd
= return_error
;
3325 binder_enqueue_work(thread
->proc
,
3326 &thread
->return_error
.work
,
3331 int binder_thread_write(struct binder_proc
*proc
,
3332 struct binder_thread
*thread
,
3333 binder_uintptr_t binder_buffer
, size_t size
,
3334 binder_size_t
*consumed
)
3337 struct binder_context
*context
= proc
->context
;
3338 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3339 void __user
*ptr
= buffer
+ *consumed
;
3340 void __user
*end
= buffer
+ size
;
3342 while (ptr
< end
&& thread
->return_error
.cmd
== BR_OK
) {
3345 if (get_user(cmd
, (uint32_t __user
*)ptr
))
3347 ptr
+= sizeof(uint32_t);
3348 trace_binder_command(cmd
);
3349 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.bc
)) {
3350 atomic_inc(&binder_stats
.bc
[_IOC_NR(cmd
)]);
3351 atomic_inc(&proc
->stats
.bc
[_IOC_NR(cmd
)]);
3352 atomic_inc(&thread
->stats
.bc
[_IOC_NR(cmd
)]);
3360 const char *debug_string
;
3361 bool strong
= cmd
== BC_ACQUIRE
|| cmd
== BC_RELEASE
;
3362 bool increment
= cmd
== BC_INCREFS
|| cmd
== BC_ACQUIRE
;
3363 struct binder_ref_data rdata
;
3365 if (get_user(target
, (uint32_t __user
*)ptr
))
3368 ptr
+= sizeof(uint32_t);
3370 if (increment
&& !target
) {
3371 struct binder_node
*ctx_mgr_node
;
3372 mutex_lock(&context
->context_mgr_node_lock
);
3373 ctx_mgr_node
= context
->binder_context_mgr_node
;
3375 ret
= binder_inc_ref_for_node(
3377 strong
, NULL
, &rdata
);
3378 mutex_unlock(&context
->context_mgr_node_lock
);
3381 ret
= binder_update_ref_for_handle(
3382 proc
, target
, increment
, strong
,
3384 if (!ret
&& rdata
.desc
!= target
) {
3385 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3386 proc
->pid
, thread
->pid
,
3387 target
, rdata
.desc
);
3391 debug_string
= "IncRefs";
3394 debug_string
= "Acquire";
3397 debug_string
= "Release";
3401 debug_string
= "DecRefs";
3405 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3406 proc
->pid
, thread
->pid
, debug_string
,
3407 strong
, target
, ret
);
3410 binder_debug(BINDER_DEBUG_USER_REFS
,
3411 "%d:%d %s ref %d desc %d s %d w %d\n",
3412 proc
->pid
, thread
->pid
, debug_string
,
3413 rdata
.debug_id
, rdata
.desc
, rdata
.strong
,
3417 case BC_INCREFS_DONE
:
3418 case BC_ACQUIRE_DONE
: {
3419 binder_uintptr_t node_ptr
;
3420 binder_uintptr_t cookie
;
3421 struct binder_node
*node
;
3424 if (get_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3426 ptr
+= sizeof(binder_uintptr_t
);
3427 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3429 ptr
+= sizeof(binder_uintptr_t
);
3430 node
= binder_get_node(proc
, node_ptr
);
3432 binder_user_error("%d:%d %s u%016llx no match\n",
3433 proc
->pid
, thread
->pid
,
3434 cmd
== BC_INCREFS_DONE
?
3440 if (cookie
!= node
->cookie
) {
3441 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3442 proc
->pid
, thread
->pid
,
3443 cmd
== BC_INCREFS_DONE
?
3444 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3445 (u64
)node_ptr
, node
->debug_id
,
3446 (u64
)cookie
, (u64
)node
->cookie
);
3447 binder_put_node(node
);
3450 binder_node_inner_lock(node
);
3451 if (cmd
== BC_ACQUIRE_DONE
) {
3452 if (node
->pending_strong_ref
== 0) {
3453 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3454 proc
->pid
, thread
->pid
,
3456 binder_node_inner_unlock(node
);
3457 binder_put_node(node
);
3460 node
->pending_strong_ref
= 0;
3462 if (node
->pending_weak_ref
== 0) {
3463 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3464 proc
->pid
, thread
->pid
,
3466 binder_node_inner_unlock(node
);
3467 binder_put_node(node
);
3470 node
->pending_weak_ref
= 0;
3472 free_node
= binder_dec_node_nilocked(node
,
3473 cmd
== BC_ACQUIRE_DONE
, 0);
3475 binder_debug(BINDER_DEBUG_USER_REFS
,
3476 "%d:%d %s node %d ls %d lw %d tr %d\n",
3477 proc
->pid
, thread
->pid
,
3478 cmd
== BC_INCREFS_DONE
? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3479 node
->debug_id
, node
->local_strong_refs
,
3480 node
->local_weak_refs
, node
->tmp_refs
);
3481 binder_node_inner_unlock(node
);
3482 binder_put_node(node
);
3485 case BC_ATTEMPT_ACQUIRE
:
3486 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3488 case BC_ACQUIRE_RESULT
:
3489 pr_err("BC_ACQUIRE_RESULT not supported\n");
3492 case BC_FREE_BUFFER
: {
3493 binder_uintptr_t data_ptr
;
3494 struct binder_buffer
*buffer
;
3496 if (get_user(data_ptr
, (binder_uintptr_t __user
*)ptr
))
3498 ptr
+= sizeof(binder_uintptr_t
);
3500 buffer
= binder_alloc_prepare_to_free(&proc
->alloc
,
3502 if (buffer
== NULL
) {
3503 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3504 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3507 if (!buffer
->allow_user_free
) {
3508 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3509 proc
->pid
, thread
->pid
, (u64
)data_ptr
);
3512 binder_debug(BINDER_DEBUG_FREE_BUFFER
,
3513 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3514 proc
->pid
, thread
->pid
, (u64
)data_ptr
,
3516 buffer
->transaction
? "active" : "finished");
3518 if (buffer
->transaction
) {
3519 buffer
->transaction
->buffer
= NULL
;
3520 buffer
->transaction
= NULL
;
3522 if (buffer
->async_transaction
&& buffer
->target_node
) {
3523 struct binder_node
*buf_node
;
3524 struct binder_work
*w
;
3526 buf_node
= buffer
->target_node
;
3527 binder_node_inner_lock(buf_node
);
3528 BUG_ON(!buf_node
->has_async_transaction
);
3529 BUG_ON(buf_node
->proc
!= proc
);
3530 w
= binder_dequeue_work_head_ilocked(
3531 &buf_node
->async_todo
);
3533 buf_node
->has_async_transaction
= 0;
3535 binder_enqueue_work_ilocked(
3537 binder_wakeup_proc_ilocked(proc
);
3539 binder_node_inner_unlock(buf_node
);
3541 trace_binder_transaction_buffer_release(buffer
);
3542 binder_transaction_buffer_release(proc
, buffer
, NULL
);
3543 binder_alloc_free_buf(&proc
->alloc
, buffer
);
3547 case BC_TRANSACTION_SG
:
3549 struct binder_transaction_data_sg tr
;
3551 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3554 binder_transaction(proc
, thread
, &tr
.transaction_data
,
3555 cmd
== BC_REPLY_SG
, tr
.buffers_size
);
3558 case BC_TRANSACTION
:
3560 struct binder_transaction_data tr
;
3562 if (copy_from_user(&tr
, ptr
, sizeof(tr
)))
3565 binder_transaction(proc
, thread
, &tr
,
3566 cmd
== BC_REPLY
, 0);
3570 case BC_REGISTER_LOOPER
:
3571 binder_debug(BINDER_DEBUG_THREADS
,
3572 "%d:%d BC_REGISTER_LOOPER\n",
3573 proc
->pid
, thread
->pid
);
3574 binder_inner_proc_lock(proc
);
3575 if (thread
->looper
& BINDER_LOOPER_STATE_ENTERED
) {
3576 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3577 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3578 proc
->pid
, thread
->pid
);
3579 } else if (proc
->requested_threads
== 0) {
3580 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3581 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3582 proc
->pid
, thread
->pid
);
3584 proc
->requested_threads
--;
3585 proc
->requested_threads_started
++;
3587 thread
->looper
|= BINDER_LOOPER_STATE_REGISTERED
;
3588 binder_inner_proc_unlock(proc
);
3590 case BC_ENTER_LOOPER
:
3591 binder_debug(BINDER_DEBUG_THREADS
,
3592 "%d:%d BC_ENTER_LOOPER\n",
3593 proc
->pid
, thread
->pid
);
3594 if (thread
->looper
& BINDER_LOOPER_STATE_REGISTERED
) {
3595 thread
->looper
|= BINDER_LOOPER_STATE_INVALID
;
3596 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3597 proc
->pid
, thread
->pid
);
3599 thread
->looper
|= BINDER_LOOPER_STATE_ENTERED
;
3601 case BC_EXIT_LOOPER
:
3602 binder_debug(BINDER_DEBUG_THREADS
,
3603 "%d:%d BC_EXIT_LOOPER\n",
3604 proc
->pid
, thread
->pid
);
3605 thread
->looper
|= BINDER_LOOPER_STATE_EXITED
;
3608 case BC_REQUEST_DEATH_NOTIFICATION
:
3609 case BC_CLEAR_DEATH_NOTIFICATION
: {
3611 binder_uintptr_t cookie
;
3612 struct binder_ref
*ref
;
3613 struct binder_ref_death
*death
= NULL
;
3615 if (get_user(target
, (uint32_t __user
*)ptr
))
3617 ptr
+= sizeof(uint32_t);
3618 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3620 ptr
+= sizeof(binder_uintptr_t
);
3621 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3623 * Allocate memory for death notification
3624 * before taking lock
3626 death
= kzalloc(sizeof(*death
), GFP_KERNEL
);
3627 if (death
== NULL
) {
3628 WARN_ON(thread
->return_error
.cmd
!=
3630 thread
->return_error
.cmd
= BR_ERROR
;
3631 binder_enqueue_work(
3633 &thread
->return_error
.work
,
3636 BINDER_DEBUG_FAILED_TRANSACTION
,
3637 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3638 proc
->pid
, thread
->pid
);
3642 binder_proc_lock(proc
);
3643 ref
= binder_get_ref_olocked(proc
, target
, false);
3645 binder_user_error("%d:%d %s invalid ref %d\n",
3646 proc
->pid
, thread
->pid
,
3647 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3648 "BC_REQUEST_DEATH_NOTIFICATION" :
3649 "BC_CLEAR_DEATH_NOTIFICATION",
3651 binder_proc_unlock(proc
);
3656 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
3657 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3658 proc
->pid
, thread
->pid
,
3659 cmd
== BC_REQUEST_DEATH_NOTIFICATION
?
3660 "BC_REQUEST_DEATH_NOTIFICATION" :
3661 "BC_CLEAR_DEATH_NOTIFICATION",
3662 (u64
)cookie
, ref
->data
.debug_id
,
3663 ref
->data
.desc
, ref
->data
.strong
,
3664 ref
->data
.weak
, ref
->node
->debug_id
);
3666 binder_node_lock(ref
->node
);
3667 if (cmd
== BC_REQUEST_DEATH_NOTIFICATION
) {
3669 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3670 proc
->pid
, thread
->pid
);
3671 binder_node_unlock(ref
->node
);
3672 binder_proc_unlock(proc
);
3676 binder_stats_created(BINDER_STAT_DEATH
);
3677 INIT_LIST_HEAD(&death
->work
.entry
);
3678 death
->cookie
= cookie
;
3680 if (ref
->node
->proc
== NULL
) {
3681 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
3683 binder_inner_proc_lock(proc
);
3684 binder_enqueue_work_ilocked(
3685 &ref
->death
->work
, &proc
->todo
);
3686 binder_wakeup_proc_ilocked(proc
);
3687 binder_inner_proc_unlock(proc
);
3690 if (ref
->death
== NULL
) {
3691 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3692 proc
->pid
, thread
->pid
);
3693 binder_node_unlock(ref
->node
);
3694 binder_proc_unlock(proc
);
3698 if (death
->cookie
!= cookie
) {
3699 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3700 proc
->pid
, thread
->pid
,
3703 binder_node_unlock(ref
->node
);
3704 binder_proc_unlock(proc
);
3708 binder_inner_proc_lock(proc
);
3709 if (list_empty(&death
->work
.entry
)) {
3710 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3711 if (thread
->looper
&
3712 (BINDER_LOOPER_STATE_REGISTERED
|
3713 BINDER_LOOPER_STATE_ENTERED
))
3714 binder_enqueue_work_ilocked(
3718 binder_enqueue_work_ilocked(
3721 binder_wakeup_proc_ilocked(
3725 BUG_ON(death
->work
.type
!= BINDER_WORK_DEAD_BINDER
);
3726 death
->work
.type
= BINDER_WORK_DEAD_BINDER_AND_CLEAR
;
3728 binder_inner_proc_unlock(proc
);
3730 binder_node_unlock(ref
->node
);
3731 binder_proc_unlock(proc
);
3733 case BC_DEAD_BINDER_DONE
: {
3734 struct binder_work
*w
;
3735 binder_uintptr_t cookie
;
3736 struct binder_ref_death
*death
= NULL
;
3738 if (get_user(cookie
, (binder_uintptr_t __user
*)ptr
))
3741 ptr
+= sizeof(cookie
);
3742 binder_inner_proc_lock(proc
);
3743 list_for_each_entry(w
, &proc
->delivered_death
,
3745 struct binder_ref_death
*tmp_death
=
3747 struct binder_ref_death
,
3750 if (tmp_death
->cookie
== cookie
) {
3755 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
3756 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3757 proc
->pid
, thread
->pid
, (u64
)cookie
,
3759 if (death
== NULL
) {
3760 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3761 proc
->pid
, thread
->pid
, (u64
)cookie
);
3762 binder_inner_proc_unlock(proc
);
3765 binder_dequeue_work_ilocked(&death
->work
);
3766 if (death
->work
.type
== BINDER_WORK_DEAD_BINDER_AND_CLEAR
) {
3767 death
->work
.type
= BINDER_WORK_CLEAR_DEATH_NOTIFICATION
;
3768 if (thread
->looper
&
3769 (BINDER_LOOPER_STATE_REGISTERED
|
3770 BINDER_LOOPER_STATE_ENTERED
))
3771 binder_enqueue_work_ilocked(
3772 &death
->work
, &thread
->todo
);
3774 binder_enqueue_work_ilocked(
3777 binder_wakeup_proc_ilocked(proc
);
3780 binder_inner_proc_unlock(proc
);
3784 pr_err("%d:%d unknown command %d\n",
3785 proc
->pid
, thread
->pid
, cmd
);
3788 *consumed
= ptr
- buffer
;
3793 static void binder_stat_br(struct binder_proc
*proc
,
3794 struct binder_thread
*thread
, uint32_t cmd
)
3796 trace_binder_return(cmd
);
3797 if (_IOC_NR(cmd
) < ARRAY_SIZE(binder_stats
.br
)) {
3798 atomic_inc(&binder_stats
.br
[_IOC_NR(cmd
)]);
3799 atomic_inc(&proc
->stats
.br
[_IOC_NR(cmd
)]);
3800 atomic_inc(&thread
->stats
.br
[_IOC_NR(cmd
)]);
3804 static int binder_put_node_cmd(struct binder_proc
*proc
,
3805 struct binder_thread
*thread
,
3807 binder_uintptr_t node_ptr
,
3808 binder_uintptr_t node_cookie
,
3810 uint32_t cmd
, const char *cmd_name
)
3812 void __user
*ptr
= *ptrp
;
3814 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3816 ptr
+= sizeof(uint32_t);
3818 if (put_user(node_ptr
, (binder_uintptr_t __user
*)ptr
))
3820 ptr
+= sizeof(binder_uintptr_t
);
3822 if (put_user(node_cookie
, (binder_uintptr_t __user
*)ptr
))
3824 ptr
+= sizeof(binder_uintptr_t
);
3826 binder_stat_br(proc
, thread
, cmd
);
3827 binder_debug(BINDER_DEBUG_USER_REFS
, "%d:%d %s %d u%016llx c%016llx\n",
3828 proc
->pid
, thread
->pid
, cmd_name
, node_debug_id
,
3829 (u64
)node_ptr
, (u64
)node_cookie
);
3835 static int binder_wait_for_work(struct binder_thread
*thread
,
3839 struct binder_proc
*proc
= thread
->proc
;
3842 freezer_do_not_count();
3843 binder_inner_proc_lock(proc
);
3845 prepare_to_wait(&thread
->wait
, &wait
, TASK_INTERRUPTIBLE
);
3846 if (binder_has_work_ilocked(thread
, do_proc_work
))
3849 list_add(&thread
->waiting_thread_node
,
3850 &proc
->waiting_threads
);
3851 binder_inner_proc_unlock(proc
);
3853 binder_inner_proc_lock(proc
);
3854 list_del_init(&thread
->waiting_thread_node
);
3855 if (signal_pending(current
)) {
3860 finish_wait(&thread
->wait
, &wait
);
3861 binder_inner_proc_unlock(proc
);
3867 static int binder_thread_read(struct binder_proc
*proc
,
3868 struct binder_thread
*thread
,
3869 binder_uintptr_t binder_buffer
, size_t size
,
3870 binder_size_t
*consumed
, int non_block
)
3872 void __user
*buffer
= (void __user
*)(uintptr_t)binder_buffer
;
3873 void __user
*ptr
= buffer
+ *consumed
;
3874 void __user
*end
= buffer
+ size
;
3877 int wait_for_proc_work
;
3879 if (*consumed
== 0) {
3880 if (put_user(BR_NOOP
, (uint32_t __user
*)ptr
))
3882 ptr
+= sizeof(uint32_t);
3886 binder_inner_proc_lock(proc
);
3887 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
3888 binder_inner_proc_unlock(proc
);
3890 thread
->looper
|= BINDER_LOOPER_STATE_WAITING
;
3892 trace_binder_wait_for_work(wait_for_proc_work
,
3893 !!thread
->transaction_stack
,
3894 !binder_worklist_empty(proc
, &thread
->todo
));
3895 if (wait_for_proc_work
) {
3896 if (!(thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
3897 BINDER_LOOPER_STATE_ENTERED
))) {
3898 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3899 proc
->pid
, thread
->pid
, thread
->looper
);
3900 wait_event_interruptible(binder_user_error_wait
,
3901 binder_stop_on_user_error
< 2);
3903 binder_restore_priority(current
, proc
->default_priority
);
3907 if (!binder_has_work(thread
, wait_for_proc_work
))
3910 ret
= binder_wait_for_work(thread
, wait_for_proc_work
);
3913 thread
->looper
&= ~BINDER_LOOPER_STATE_WAITING
;
3920 struct binder_transaction_data tr
;
3921 struct binder_work
*w
= NULL
;
3922 struct list_head
*list
= NULL
;
3923 struct binder_transaction
*t
= NULL
;
3924 struct binder_thread
*t_from
;
3926 binder_inner_proc_lock(proc
);
3927 if (!binder_worklist_empty_ilocked(&thread
->todo
))
3928 list
= &thread
->todo
;
3929 else if (!binder_worklist_empty_ilocked(&proc
->todo
) &&
3933 binder_inner_proc_unlock(proc
);
3936 if (ptr
- buffer
== 4 && !thread
->looper_need_return
)
3941 if (end
- ptr
< sizeof(tr
) + 4) {
3942 binder_inner_proc_unlock(proc
);
3945 w
= binder_dequeue_work_head_ilocked(list
);
3948 case BINDER_WORK_TRANSACTION
: {
3949 binder_inner_proc_unlock(proc
);
3950 t
= container_of(w
, struct binder_transaction
, work
);
3952 case BINDER_WORK_RETURN_ERROR
: {
3953 struct binder_error
*e
= container_of(
3954 w
, struct binder_error
, work
);
3956 WARN_ON(e
->cmd
== BR_OK
);
3957 binder_inner_proc_unlock(proc
);
3958 if (put_user(e
->cmd
, (uint32_t __user
*)ptr
))
3961 ptr
+= sizeof(uint32_t);
3963 binder_stat_br(proc
, thread
, cmd
);
3965 case BINDER_WORK_TRANSACTION_COMPLETE
: {
3966 binder_inner_proc_unlock(proc
);
3967 cmd
= BR_TRANSACTION_COMPLETE
;
3968 if (put_user(cmd
, (uint32_t __user
*)ptr
))
3970 ptr
+= sizeof(uint32_t);
3972 binder_stat_br(proc
, thread
, cmd
);
3973 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE
,
3974 "%d:%d BR_TRANSACTION_COMPLETE\n",
3975 proc
->pid
, thread
->pid
);
3977 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
3979 case BINDER_WORK_NODE
: {
3980 struct binder_node
*node
= container_of(w
, struct binder_node
, work
);
3982 binder_uintptr_t node_ptr
= node
->ptr
;
3983 binder_uintptr_t node_cookie
= node
->cookie
;
3984 int node_debug_id
= node
->debug_id
;
3987 void __user
*orig_ptr
= ptr
;
3989 BUG_ON(proc
!= node
->proc
);
3990 strong
= node
->internal_strong_refs
||
3991 node
->local_strong_refs
;
3992 weak
= !hlist_empty(&node
->refs
) ||
3993 node
->local_weak_refs
||
3994 node
->tmp_refs
|| strong
;
3995 has_strong_ref
= node
->has_strong_ref
;
3996 has_weak_ref
= node
->has_weak_ref
;
3998 if (weak
&& !has_weak_ref
) {
3999 node
->has_weak_ref
= 1;
4000 node
->pending_weak_ref
= 1;
4001 node
->local_weak_refs
++;
4003 if (strong
&& !has_strong_ref
) {
4004 node
->has_strong_ref
= 1;
4005 node
->pending_strong_ref
= 1;
4006 node
->local_strong_refs
++;
4008 if (!strong
&& has_strong_ref
)
4009 node
->has_strong_ref
= 0;
4010 if (!weak
&& has_weak_ref
)
4011 node
->has_weak_ref
= 0;
4012 if (!weak
&& !strong
) {
4013 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4014 "%d:%d node %d u%016llx c%016llx deleted\n",
4015 proc
->pid
, thread
->pid
,
4019 rb_erase(&node
->rb_node
, &proc
->nodes
);
4020 binder_inner_proc_unlock(proc
);
4021 binder_node_lock(node
);
4023 * Acquire the node lock before freeing the
4024 * node to serialize with other threads that
4025 * may have been holding the node lock while
4026 * decrementing this node (avoids race where
4027 * this thread frees while the other thread
4028 * is unlocking the node after the final
4031 binder_node_unlock(node
);
4032 binder_free_node(node
);
4034 binder_inner_proc_unlock(proc
);
4036 if (weak
&& !has_weak_ref
)
4037 ret
= binder_put_node_cmd(
4038 proc
, thread
, &ptr
, node_ptr
,
4039 node_cookie
, node_debug_id
,
4040 BR_INCREFS
, "BR_INCREFS");
4041 if (!ret
&& strong
&& !has_strong_ref
)
4042 ret
= binder_put_node_cmd(
4043 proc
, thread
, &ptr
, node_ptr
,
4044 node_cookie
, node_debug_id
,
4045 BR_ACQUIRE
, "BR_ACQUIRE");
4046 if (!ret
&& !strong
&& has_strong_ref
)
4047 ret
= binder_put_node_cmd(
4048 proc
, thread
, &ptr
, node_ptr
,
4049 node_cookie
, node_debug_id
,
4050 BR_RELEASE
, "BR_RELEASE");
4051 if (!ret
&& !weak
&& has_weak_ref
)
4052 ret
= binder_put_node_cmd(
4053 proc
, thread
, &ptr
, node_ptr
,
4054 node_cookie
, node_debug_id
,
4055 BR_DECREFS
, "BR_DECREFS");
4056 if (orig_ptr
== ptr
)
4057 binder_debug(BINDER_DEBUG_INTERNAL_REFS
,
4058 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4059 proc
->pid
, thread
->pid
,
4066 case BINDER_WORK_DEAD_BINDER
:
4067 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4068 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4069 struct binder_ref_death
*death
;
4071 binder_uintptr_t cookie
;
4073 death
= container_of(w
, struct binder_ref_death
, work
);
4074 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
)
4075 cmd
= BR_CLEAR_DEATH_NOTIFICATION_DONE
;
4077 cmd
= BR_DEAD_BINDER
;
4078 cookie
= death
->cookie
;
4080 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION
,
4081 "%d:%d %s %016llx\n",
4082 proc
->pid
, thread
->pid
,
4083 cmd
== BR_DEAD_BINDER
?
4085 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4087 if (w
->type
== BINDER_WORK_CLEAR_DEATH_NOTIFICATION
) {
4088 binder_inner_proc_unlock(proc
);
4090 binder_stats_deleted(BINDER_STAT_DEATH
);
4092 binder_enqueue_work_ilocked(
4093 w
, &proc
->delivered_death
);
4094 binder_inner_proc_unlock(proc
);
4096 if (put_user(cmd
, (uint32_t __user
*)ptr
))
4098 ptr
+= sizeof(uint32_t);
4099 if (put_user(cookie
,
4100 (binder_uintptr_t __user
*)ptr
))
4102 ptr
+= sizeof(binder_uintptr_t
);
4103 binder_stat_br(proc
, thread
, cmd
);
4104 if (cmd
== BR_DEAD_BINDER
)
4105 goto done
; /* DEAD_BINDER notifications can cause transactions */
4112 BUG_ON(t
->buffer
== NULL
);
4113 if (t
->buffer
->target_node
) {
4114 struct binder_node
*target_node
= t
->buffer
->target_node
;
4115 struct binder_priority node_prio
;
4117 tr
.target
.ptr
= target_node
->ptr
;
4118 tr
.cookie
= target_node
->cookie
;
4119 node_prio
.sched_policy
= target_node
->sched_policy
;
4120 node_prio
.prio
= target_node
->min_priority
;
4121 binder_transaction_priority(current
, t
, node_prio
,
4122 target_node
->inherit_rt
);
4123 cmd
= BR_TRANSACTION
;
4130 tr
.flags
= t
->flags
;
4131 tr
.sender_euid
= from_kuid(current_user_ns(), t
->sender_euid
);
4133 t_from
= binder_get_txn_from(t
);
4135 struct task_struct
*sender
= t_from
->proc
->tsk
;
4137 tr
.sender_pid
= task_tgid_nr_ns(sender
,
4138 task_active_pid_ns(current
));
4143 tr
.data_size
= t
->buffer
->data_size
;
4144 tr
.offsets_size
= t
->buffer
->offsets_size
;
4145 tr
.data
.ptr
.buffer
= (binder_uintptr_t
)
4146 ((uintptr_t)t
->buffer
->data
+
4147 binder_alloc_get_user_buffer_offset(&proc
->alloc
));
4148 tr
.data
.ptr
.offsets
= tr
.data
.ptr
.buffer
+
4149 ALIGN(t
->buffer
->data_size
,
4152 if (put_user(cmd
, (uint32_t __user
*)ptr
)) {
4154 binder_thread_dec_tmpref(t_from
);
4157 ptr
+= sizeof(uint32_t);
4158 if (copy_to_user(ptr
, &tr
, sizeof(tr
))) {
4160 binder_thread_dec_tmpref(t_from
);
4165 trace_binder_transaction_received(t
);
4166 binder_stat_br(proc
, thread
, cmd
);
4167 binder_debug(BINDER_DEBUG_TRANSACTION
,
4168 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4169 proc
->pid
, thread
->pid
,
4170 (cmd
== BR_TRANSACTION
) ? "BR_TRANSACTION" :
4172 t
->debug_id
, t_from
? t_from
->proc
->pid
: 0,
4173 t_from
? t_from
->pid
: 0, cmd
,
4174 t
->buffer
->data_size
, t
->buffer
->offsets_size
,
4175 (u64
)tr
.data
.ptr
.buffer
, (u64
)tr
.data
.ptr
.offsets
);
4178 binder_thread_dec_tmpref(t_from
);
4179 t
->buffer
->allow_user_free
= 1;
4180 if (cmd
== BR_TRANSACTION
&& !(t
->flags
& TF_ONE_WAY
)) {
4181 binder_inner_proc_lock(thread
->proc
);
4182 t
->to_parent
= thread
->transaction_stack
;
4183 t
->to_thread
= thread
;
4184 thread
->transaction_stack
= t
;
4185 binder_inner_proc_unlock(thread
->proc
);
4187 binder_free_transaction(t
);
4194 *consumed
= ptr
- buffer
;
4195 binder_inner_proc_lock(proc
);
4196 if (proc
->requested_threads
== 0 &&
4197 list_empty(&thread
->proc
->waiting_threads
) &&
4198 proc
->requested_threads_started
< proc
->max_threads
&&
4199 (thread
->looper
& (BINDER_LOOPER_STATE_REGISTERED
|
4200 BINDER_LOOPER_STATE_ENTERED
)) /* the user-space code fails to */
4201 /*spawn a new thread if we leave this out */) {
4202 proc
->requested_threads
++;
4203 binder_inner_proc_unlock(proc
);
4204 binder_debug(BINDER_DEBUG_THREADS
,
4205 "%d:%d BR_SPAWN_LOOPER\n",
4206 proc
->pid
, thread
->pid
);
4207 if (put_user(BR_SPAWN_LOOPER
, (uint32_t __user
*)buffer
))
4209 binder_stat_br(proc
, thread
, BR_SPAWN_LOOPER
);
4211 binder_inner_proc_unlock(proc
);
4215 static void binder_release_work(struct binder_proc
*proc
,
4216 struct list_head
*list
)
4218 struct binder_work
*w
;
4221 w
= binder_dequeue_work_head(proc
, list
);
4226 case BINDER_WORK_TRANSACTION
: {
4227 struct binder_transaction
*t
;
4229 t
= container_of(w
, struct binder_transaction
, work
);
4230 if (t
->buffer
->target_node
&&
4231 !(t
->flags
& TF_ONE_WAY
)) {
4232 binder_send_failed_reply(t
, BR_DEAD_REPLY
);
4234 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4235 "undelivered transaction %d\n",
4237 binder_free_transaction(t
);
4240 case BINDER_WORK_RETURN_ERROR
: {
4241 struct binder_error
*e
= container_of(
4242 w
, struct binder_error
, work
);
4244 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4245 "undelivered TRANSACTION_ERROR: %u\n",
4248 case BINDER_WORK_TRANSACTION_COMPLETE
: {
4249 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4250 "undelivered TRANSACTION_COMPLETE\n");
4252 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE
);
4254 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
4255 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
: {
4256 struct binder_ref_death
*death
;
4258 death
= container_of(w
, struct binder_ref_death
, work
);
4259 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4260 "undelivered death notification, %016llx\n",
4261 (u64
)death
->cookie
);
4263 binder_stats_deleted(BINDER_STAT_DEATH
);
4266 pr_err("unexpected work type, %d, not freed\n",
4274 static struct binder_thread
*binder_get_thread_ilocked(
4275 struct binder_proc
*proc
, struct binder_thread
*new_thread
)
4277 struct binder_thread
*thread
= NULL
;
4278 struct rb_node
*parent
= NULL
;
4279 struct rb_node
**p
= &proc
->threads
.rb_node
;
4283 thread
= rb_entry(parent
, struct binder_thread
, rb_node
);
4285 if (current
->pid
< thread
->pid
)
4287 else if (current
->pid
> thread
->pid
)
4288 p
= &(*p
)->rb_right
;
4294 thread
= new_thread
;
4295 binder_stats_created(BINDER_STAT_THREAD
);
4296 thread
->proc
= proc
;
4297 thread
->pid
= current
->pid
;
4298 get_task_struct(current
);
4299 thread
->task
= current
;
4300 atomic_set(&thread
->tmp_ref
, 0);
4301 init_waitqueue_head(&thread
->wait
);
4302 INIT_LIST_HEAD(&thread
->todo
);
4303 rb_link_node(&thread
->rb_node
, parent
, p
);
4304 rb_insert_color(&thread
->rb_node
, &proc
->threads
);
4305 thread
->looper_need_return
= true;
4306 thread
->return_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4307 thread
->return_error
.cmd
= BR_OK
;
4308 thread
->reply_error
.work
.type
= BINDER_WORK_RETURN_ERROR
;
4309 thread
->reply_error
.cmd
= BR_OK
;
4310 INIT_LIST_HEAD(&new_thread
->waiting_thread_node
);
4314 static struct binder_thread
*binder_get_thread(struct binder_proc
*proc
)
4316 struct binder_thread
*thread
;
4317 struct binder_thread
*new_thread
;
4319 binder_inner_proc_lock(proc
);
4320 thread
= binder_get_thread_ilocked(proc
, NULL
);
4321 binder_inner_proc_unlock(proc
);
4323 new_thread
= kzalloc(sizeof(*thread
), GFP_KERNEL
);
4324 if (new_thread
== NULL
)
4326 binder_inner_proc_lock(proc
);
4327 thread
= binder_get_thread_ilocked(proc
, new_thread
);
4328 binder_inner_proc_unlock(proc
);
4329 if (thread
!= new_thread
)
4335 static void binder_free_proc(struct binder_proc
*proc
)
4337 BUG_ON(!list_empty(&proc
->todo
));
4338 BUG_ON(!list_empty(&proc
->delivered_death
));
4339 binder_alloc_deferred_release(&proc
->alloc
);
4340 put_task_struct(proc
->tsk
);
4341 binder_stats_deleted(BINDER_STAT_PROC
);
4345 static void binder_free_thread(struct binder_thread
*thread
)
4347 BUG_ON(!list_empty(&thread
->todo
));
4348 binder_stats_deleted(BINDER_STAT_THREAD
);
4349 binder_proc_dec_tmpref(thread
->proc
);
4350 put_task_struct(thread
->task
);
4354 static int binder_thread_release(struct binder_proc
*proc
,
4355 struct binder_thread
*thread
)
4357 struct binder_transaction
*t
;
4358 struct binder_transaction
*send_reply
= NULL
;
4359 int active_transactions
= 0;
4360 struct binder_transaction
*last_t
= NULL
;
4362 binder_inner_proc_lock(thread
->proc
);
4364 * take a ref on the proc so it survives
4365 * after we remove this thread from proc->threads.
4366 * The corresponding dec is when we actually
4367 * free the thread in binder_free_thread()
4371 * take a ref on this thread to ensure it
4372 * survives while we are releasing it
4374 atomic_inc(&thread
->tmp_ref
);
4375 rb_erase(&thread
->rb_node
, &proc
->threads
);
4376 t
= thread
->transaction_stack
;
4378 spin_lock(&t
->lock
);
4379 if (t
->to_thread
== thread
)
4382 thread
->is_dead
= true;
4386 active_transactions
++;
4387 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION
,
4388 "release %d:%d transaction %d %s, still active\n",
4389 proc
->pid
, thread
->pid
,
4391 (t
->to_thread
== thread
) ? "in" : "out");
4393 if (t
->to_thread
== thread
) {
4395 t
->to_thread
= NULL
;
4397 t
->buffer
->transaction
= NULL
;
4401 } else if (t
->from
== thread
) {
4406 spin_unlock(&last_t
->lock
);
4408 spin_lock(&t
->lock
);
4410 binder_inner_proc_unlock(thread
->proc
);
4413 binder_send_failed_reply(send_reply
, BR_DEAD_REPLY
);
4414 binder_release_work(proc
, &thread
->todo
);
4415 binder_thread_dec_tmpref(thread
);
4416 return active_transactions
;
4419 static unsigned int binder_poll(struct file
*filp
,
4420 struct poll_table_struct
*wait
)
4422 struct binder_proc
*proc
= filp
->private_data
;
4423 struct binder_thread
*thread
= NULL
;
4424 bool wait_for_proc_work
;
4426 thread
= binder_get_thread(proc
);
4428 binder_inner_proc_lock(thread
->proc
);
4429 thread
->looper
|= BINDER_LOOPER_STATE_POLL
;
4430 wait_for_proc_work
= binder_available_for_proc_work_ilocked(thread
);
4432 binder_inner_proc_unlock(thread
->proc
);
4434 poll_wait(filp
, &thread
->wait
, wait
);
4436 if (binder_has_work(thread
, wait_for_proc_work
))
4442 static int binder_ioctl_write_read(struct file
*filp
,
4443 unsigned int cmd
, unsigned long arg
,
4444 struct binder_thread
*thread
)
4447 struct binder_proc
*proc
= filp
->private_data
;
4448 unsigned int size
= _IOC_SIZE(cmd
);
4449 void __user
*ubuf
= (void __user
*)arg
;
4450 struct binder_write_read bwr
;
4452 if (size
!= sizeof(struct binder_write_read
)) {
4456 if (copy_from_user(&bwr
, ubuf
, sizeof(bwr
))) {
4460 binder_debug(BINDER_DEBUG_READ_WRITE
,
4461 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4462 proc
->pid
, thread
->pid
,
4463 (u64
)bwr
.write_size
, (u64
)bwr
.write_buffer
,
4464 (u64
)bwr
.read_size
, (u64
)bwr
.read_buffer
);
4466 if (bwr
.write_size
> 0) {
4467 ret
= binder_thread_write(proc
, thread
,
4470 &bwr
.write_consumed
);
4471 trace_binder_write_done(ret
);
4473 bwr
.read_consumed
= 0;
4474 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4479 if (bwr
.read_size
> 0) {
4480 ret
= binder_thread_read(proc
, thread
, bwr
.read_buffer
,
4483 filp
->f_flags
& O_NONBLOCK
);
4484 trace_binder_read_done(ret
);
4485 binder_inner_proc_lock(proc
);
4486 if (!binder_worklist_empty_ilocked(&proc
->todo
))
4487 binder_wakeup_proc_ilocked(proc
);
4488 binder_inner_proc_unlock(proc
);
4490 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
)))
4495 binder_debug(BINDER_DEBUG_READ_WRITE
,
4496 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4497 proc
->pid
, thread
->pid
,
4498 (u64
)bwr
.write_consumed
, (u64
)bwr
.write_size
,
4499 (u64
)bwr
.read_consumed
, (u64
)bwr
.read_size
);
4500 if (copy_to_user(ubuf
, &bwr
, sizeof(bwr
))) {
4508 static int binder_ioctl_set_ctx_mgr(struct file
*filp
)
4511 struct binder_proc
*proc
= filp
->private_data
;
4512 struct binder_context
*context
= proc
->context
;
4513 struct binder_node
*new_node
;
4514 kuid_t curr_euid
= current_euid();
4516 mutex_lock(&context
->context_mgr_node_lock
);
4517 if (context
->binder_context_mgr_node
) {
4518 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4522 ret
= security_binder_set_context_mgr(proc
->tsk
);
4525 if (uid_valid(context
->binder_context_mgr_uid
)) {
4526 if (!uid_eq(context
->binder_context_mgr_uid
, curr_euid
)) {
4527 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4528 from_kuid(&init_user_ns
, curr_euid
),
4529 from_kuid(&init_user_ns
,
4530 context
->binder_context_mgr_uid
));
4535 context
->binder_context_mgr_uid
= curr_euid
;
4537 new_node
= binder_new_node(proc
, NULL
);
4542 binder_node_lock(new_node
);
4543 new_node
->local_weak_refs
++;
4544 new_node
->local_strong_refs
++;
4545 new_node
->has_strong_ref
= 1;
4546 new_node
->has_weak_ref
= 1;
4547 context
->binder_context_mgr_node
= new_node
;
4548 binder_node_unlock(new_node
);
4549 binder_put_node(new_node
);
4551 mutex_unlock(&context
->context_mgr_node_lock
);
4555 static int binder_ioctl_get_node_debug_info(struct binder_proc
*proc
,
4556 struct binder_node_debug_info
*info
) {
4558 binder_uintptr_t ptr
= info
->ptr
;
4560 memset(info
, 0, sizeof(*info
));
4562 binder_inner_proc_lock(proc
);
4563 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
4564 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
4566 if (node
->ptr
> ptr
) {
4567 info
->ptr
= node
->ptr
;
4568 info
->cookie
= node
->cookie
;
4569 info
->has_strong_ref
= node
->has_strong_ref
;
4570 info
->has_weak_ref
= node
->has_weak_ref
;
4574 binder_inner_proc_unlock(proc
);
4579 static long binder_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
4582 struct binder_proc
*proc
= filp
->private_data
;
4583 struct binder_thread
*thread
;
4584 unsigned int size
= _IOC_SIZE(cmd
);
4585 void __user
*ubuf
= (void __user
*)arg
;
4587 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4588 proc->pid, current->pid, cmd, arg);*/
4590 binder_selftest_alloc(&proc
->alloc
);
4592 trace_binder_ioctl(cmd
, arg
);
4594 ret
= wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4598 thread
= binder_get_thread(proc
);
4599 if (thread
== NULL
) {
4605 case BINDER_WRITE_READ
:
4606 ret
= binder_ioctl_write_read(filp
, cmd
, arg
, thread
);
4610 case BINDER_SET_MAX_THREADS
: {
4613 if (copy_from_user(&max_threads
, ubuf
,
4614 sizeof(max_threads
))) {
4618 binder_inner_proc_lock(proc
);
4619 proc
->max_threads
= max_threads
;
4620 binder_inner_proc_unlock(proc
);
4623 case BINDER_SET_CONTEXT_MGR
:
4624 ret
= binder_ioctl_set_ctx_mgr(filp
);
4628 case BINDER_THREAD_EXIT
:
4629 binder_debug(BINDER_DEBUG_THREADS
, "%d:%d exit\n",
4630 proc
->pid
, thread
->pid
);
4631 binder_thread_release(proc
, thread
);
4634 case BINDER_VERSION
: {
4635 struct binder_version __user
*ver
= ubuf
;
4637 if (size
!= sizeof(struct binder_version
)) {
4641 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION
,
4642 &ver
->protocol_version
)) {
4648 case BINDER_GET_NODE_DEBUG_INFO
: {
4649 struct binder_node_debug_info info
;
4651 if (copy_from_user(&info
, ubuf
, sizeof(info
))) {
4656 ret
= binder_ioctl_get_node_debug_info(proc
, &info
);
4660 if (copy_to_user(ubuf
, &info
, sizeof(info
))) {
4673 thread
->looper_need_return
= false;
4674 wait_event_interruptible(binder_user_error_wait
, binder_stop_on_user_error
< 2);
4675 if (ret
&& ret
!= -ERESTARTSYS
)
4676 pr_info("%d:%d ioctl %x %lx returned %d\n", proc
->pid
, current
->pid
, cmd
, arg
, ret
);
4678 trace_binder_ioctl_done(ret
);
4682 static void binder_vma_open(struct vm_area_struct
*vma
)
4684 struct binder_proc
*proc
= vma
->vm_private_data
;
4686 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4687 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4688 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4689 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4690 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4693 static void binder_vma_close(struct vm_area_struct
*vma
)
4695 struct binder_proc
*proc
= vma
->vm_private_data
;
4697 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4698 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4699 proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4700 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4701 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4702 binder_alloc_vma_close(&proc
->alloc
);
4703 binder_defer_work(proc
, BINDER_DEFERRED_PUT_FILES
);
4706 static int binder_vm_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
4708 return VM_FAULT_SIGBUS
;
4711 static struct vm_operations_struct binder_vm_ops
= {
4712 .open
= binder_vma_open
,
4713 .close
= binder_vma_close
,
4714 .fault
= binder_vm_fault
,
4717 static int binder_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
4720 struct binder_proc
*proc
= filp
->private_data
;
4721 const char *failure_string
;
4723 if (proc
->tsk
!= current
->group_leader
)
4726 if ((vma
->vm_end
- vma
->vm_start
) > SZ_4M
)
4727 vma
->vm_end
= vma
->vm_start
+ SZ_4M
;
4729 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4730 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4731 __func__
, proc
->pid
, vma
->vm_start
, vma
->vm_end
,
4732 (vma
->vm_end
- vma
->vm_start
) / SZ_1K
, vma
->vm_flags
,
4733 (unsigned long)pgprot_val(vma
->vm_page_prot
));
4735 if (vma
->vm_flags
& FORBIDDEN_MMAP_FLAGS
) {
4737 failure_string
= "bad vm_flags";
4740 vma
->vm_flags
= (vma
->vm_flags
| VM_DONTCOPY
) & ~VM_MAYWRITE
;
4741 vma
->vm_ops
= &binder_vm_ops
;
4742 vma
->vm_private_data
= proc
;
4744 ret
= binder_alloc_mmap_handler(&proc
->alloc
, vma
);
4747 proc
->files
= get_files_struct(current
);
4751 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4752 proc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
4756 static int binder_open(struct inode
*nodp
, struct file
*filp
)
4758 struct binder_proc
*proc
;
4759 struct binder_device
*binder_dev
;
4761 binder_debug(BINDER_DEBUG_OPEN_CLOSE
, "binder_open: %d:%d\n",
4762 current
->group_leader
->pid
, current
->pid
);
4764 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
4767 spin_lock_init(&proc
->inner_lock
);
4768 spin_lock_init(&proc
->outer_lock
);
4769 get_task_struct(current
->group_leader
);
4770 proc
->tsk
= current
->group_leader
;
4771 INIT_LIST_HEAD(&proc
->todo
);
4772 if (binder_supported_policy(current
->policy
)) {
4773 proc
->default_priority
.sched_policy
= current
->policy
;
4774 proc
->default_priority
.prio
= current
->normal_prio
;
4776 proc
->default_priority
.sched_policy
= SCHED_NORMAL
;
4777 proc
->default_priority
.prio
= NICE_TO_PRIO(0);
4780 binder_dev
= container_of(filp
->private_data
, struct binder_device
,
4782 proc
->context
= &binder_dev
->context
;
4783 binder_alloc_init(&proc
->alloc
);
4785 binder_stats_created(BINDER_STAT_PROC
);
4786 proc
->pid
= current
->group_leader
->pid
;
4787 INIT_LIST_HEAD(&proc
->delivered_death
);
4788 INIT_LIST_HEAD(&proc
->waiting_threads
);
4789 filp
->private_data
= proc
;
4791 mutex_lock(&binder_procs_lock
);
4792 hlist_add_head(&proc
->proc_node
, &binder_procs
);
4793 mutex_unlock(&binder_procs_lock
);
4795 if (binder_debugfs_dir_entry_proc
) {
4798 snprintf(strbuf
, sizeof(strbuf
), "%u", proc
->pid
);
4800 * proc debug entries are shared between contexts, so
4801 * this will fail if the process tries to open the driver
4802 * again with a different context. The priting code will
4803 * anyway print all contexts that a given PID has, so this
4806 proc
->debugfs_entry
= debugfs_create_file(strbuf
, S_IRUGO
,
4807 binder_debugfs_dir_entry_proc
,
4808 (void *)(unsigned long)proc
->pid
,
4815 static int binder_flush(struct file
*filp
, fl_owner_t id
)
4817 struct binder_proc
*proc
= filp
->private_data
;
4819 binder_defer_work(proc
, BINDER_DEFERRED_FLUSH
);
4824 static void binder_deferred_flush(struct binder_proc
*proc
)
4829 binder_inner_proc_lock(proc
);
4830 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
)) {
4831 struct binder_thread
*thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4833 thread
->looper_need_return
= true;
4834 if (thread
->looper
& BINDER_LOOPER_STATE_WAITING
) {
4835 wake_up_interruptible(&thread
->wait
);
4839 binder_inner_proc_unlock(proc
);
4841 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
4842 "binder_flush: %d woke %d threads\n", proc
->pid
,
4846 static int binder_release(struct inode
*nodp
, struct file
*filp
)
4848 struct binder_proc
*proc
= filp
->private_data
;
4850 debugfs_remove(proc
->debugfs_entry
);
4851 binder_defer_work(proc
, BINDER_DEFERRED_RELEASE
);
4856 static int binder_node_release(struct binder_node
*node
, int refs
)
4858 struct binder_ref
*ref
;
4860 struct binder_proc
*proc
= node
->proc
;
4862 binder_release_work(proc
, &node
->async_todo
);
4864 binder_node_lock(node
);
4865 binder_inner_proc_lock(proc
);
4866 binder_dequeue_work_ilocked(&node
->work
);
4868 * The caller must have taken a temporary ref on the node,
4870 BUG_ON(!node
->tmp_refs
);
4871 if (hlist_empty(&node
->refs
) && node
->tmp_refs
== 1) {
4872 binder_inner_proc_unlock(proc
);
4873 binder_node_unlock(node
);
4874 binder_free_node(node
);
4880 node
->local_strong_refs
= 0;
4881 node
->local_weak_refs
= 0;
4882 binder_inner_proc_unlock(proc
);
4884 spin_lock(&binder_dead_nodes_lock
);
4885 hlist_add_head(&node
->dead_node
, &binder_dead_nodes
);
4886 spin_unlock(&binder_dead_nodes_lock
);
4888 hlist_for_each_entry(ref
, &node
->refs
, node_entry
) {
4891 * Need the node lock to synchronize
4892 * with new notification requests and the
4893 * inner lock to synchronize with queued
4894 * death notifications.
4896 binder_inner_proc_lock(ref
->proc
);
4898 binder_inner_proc_unlock(ref
->proc
);
4904 BUG_ON(!list_empty(&ref
->death
->work
.entry
));
4905 ref
->death
->work
.type
= BINDER_WORK_DEAD_BINDER
;
4906 binder_enqueue_work_ilocked(&ref
->death
->work
,
4908 binder_wakeup_proc_ilocked(ref
->proc
);
4909 binder_inner_proc_unlock(ref
->proc
);
4912 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4913 "node %d now dead, refs %d, death %d\n",
4914 node
->debug_id
, refs
, death
);
4915 binder_node_unlock(node
);
4916 binder_put_node(node
);
4921 static void binder_deferred_release(struct binder_proc
*proc
)
4923 struct binder_context
*context
= proc
->context
;
4925 int threads
, nodes
, incoming_refs
, outgoing_refs
, active_transactions
;
4927 BUG_ON(proc
->files
);
4929 mutex_lock(&binder_procs_lock
);
4930 hlist_del(&proc
->proc_node
);
4931 mutex_unlock(&binder_procs_lock
);
4933 mutex_lock(&context
->context_mgr_node_lock
);
4934 if (context
->binder_context_mgr_node
&&
4935 context
->binder_context_mgr_node
->proc
== proc
) {
4936 binder_debug(BINDER_DEBUG_DEAD_BINDER
,
4937 "%s: %d context_mgr_node gone\n",
4938 __func__
, proc
->pid
);
4939 context
->binder_context_mgr_node
= NULL
;
4941 mutex_unlock(&context
->context_mgr_node_lock
);
4942 binder_inner_proc_lock(proc
);
4944 * Make sure proc stays alive after we
4945 * remove all the threads
4949 proc
->is_dead
= true;
4951 active_transactions
= 0;
4952 while ((n
= rb_first(&proc
->threads
))) {
4953 struct binder_thread
*thread
;
4955 thread
= rb_entry(n
, struct binder_thread
, rb_node
);
4956 binder_inner_proc_unlock(proc
);
4958 active_transactions
+= binder_thread_release(proc
, thread
);
4959 binder_inner_proc_lock(proc
);
4964 while ((n
= rb_first(&proc
->nodes
))) {
4965 struct binder_node
*node
;
4967 node
= rb_entry(n
, struct binder_node
, rb_node
);
4970 * take a temporary ref on the node before
4971 * calling binder_node_release() which will either
4972 * kfree() the node or call binder_put_node()
4974 binder_inc_node_tmpref_ilocked(node
);
4975 rb_erase(&node
->rb_node
, &proc
->nodes
);
4976 binder_inner_proc_unlock(proc
);
4977 incoming_refs
= binder_node_release(node
, incoming_refs
);
4978 binder_inner_proc_lock(proc
);
4980 binder_inner_proc_unlock(proc
);
4983 binder_proc_lock(proc
);
4984 while ((n
= rb_first(&proc
->refs_by_desc
))) {
4985 struct binder_ref
*ref
;
4987 ref
= rb_entry(n
, struct binder_ref
, rb_node_desc
);
4989 binder_cleanup_ref_olocked(ref
);
4990 binder_proc_unlock(proc
);
4991 binder_free_ref(ref
);
4992 binder_proc_lock(proc
);
4994 binder_proc_unlock(proc
);
4996 binder_release_work(proc
, &proc
->todo
);
4997 binder_release_work(proc
, &proc
->delivered_death
);
4999 binder_debug(BINDER_DEBUG_OPEN_CLOSE
,
5000 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5001 __func__
, proc
->pid
, threads
, nodes
, incoming_refs
,
5002 outgoing_refs
, active_transactions
);
5004 binder_proc_dec_tmpref(proc
);
5007 static void binder_deferred_func(struct work_struct
*work
)
5009 struct binder_proc
*proc
;
5010 struct files_struct
*files
;
5015 mutex_lock(&binder_deferred_lock
);
5016 if (!hlist_empty(&binder_deferred_list
)) {
5017 proc
= hlist_entry(binder_deferred_list
.first
,
5018 struct binder_proc
, deferred_work_node
);
5019 hlist_del_init(&proc
->deferred_work_node
);
5020 defer
= proc
->deferred_work
;
5021 proc
->deferred_work
= 0;
5026 mutex_unlock(&binder_deferred_lock
);
5029 if (defer
& BINDER_DEFERRED_PUT_FILES
) {
5030 files
= proc
->files
;
5035 if (defer
& BINDER_DEFERRED_FLUSH
)
5036 binder_deferred_flush(proc
);
5038 if (defer
& BINDER_DEFERRED_RELEASE
)
5039 binder_deferred_release(proc
); /* frees proc */
5042 put_files_struct(files
);
5045 static DECLARE_WORK(binder_deferred_work
, binder_deferred_func
);
5048 binder_defer_work(struct binder_proc
*proc
, enum binder_deferred_state defer
)
5050 mutex_lock(&binder_deferred_lock
);
5051 proc
->deferred_work
|= defer
;
5052 if (hlist_unhashed(&proc
->deferred_work_node
)) {
5053 hlist_add_head(&proc
->deferred_work_node
,
5054 &binder_deferred_list
);
5055 queue_work(binder_deferred_workqueue
, &binder_deferred_work
);
5057 mutex_unlock(&binder_deferred_lock
);
5060 static void print_binder_transaction_ilocked(struct seq_file
*m
,
5061 struct binder_proc
*proc
,
5063 struct binder_transaction
*t
)
5065 struct binder_proc
*to_proc
;
5066 struct binder_buffer
*buffer
= t
->buffer
;
5068 spin_lock(&t
->lock
);
5069 to_proc
= t
->to_proc
;
5071 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5072 prefix
, t
->debug_id
, t
,
5073 t
->from
? t
->from
->proc
->pid
: 0,
5074 t
->from
? t
->from
->pid
: 0,
5075 to_proc
? to_proc
->pid
: 0,
5076 t
->to_thread
? t
->to_thread
->pid
: 0,
5077 t
->code
, t
->flags
, t
->priority
.sched_policy
,
5078 t
->priority
.prio
, t
->need_reply
);
5079 spin_unlock(&t
->lock
);
5081 if (proc
!= to_proc
) {
5083 * Can only safely deref buffer if we are holding the
5084 * correct proc inner lock for this node
5090 if (buffer
== NULL
) {
5091 seq_puts(m
, " buffer free\n");
5094 if (buffer
->target_node
)
5095 seq_printf(m
, " node %d", buffer
->target_node
->debug_id
);
5096 seq_printf(m
, " size %zd:%zd data %p\n",
5097 buffer
->data_size
, buffer
->offsets_size
,
5101 static void print_binder_work_ilocked(struct seq_file
*m
,
5102 struct binder_proc
*proc
,
5104 const char *transaction_prefix
,
5105 struct binder_work
*w
)
5107 struct binder_node
*node
;
5108 struct binder_transaction
*t
;
5111 case BINDER_WORK_TRANSACTION
:
5112 t
= container_of(w
, struct binder_transaction
, work
);
5113 print_binder_transaction_ilocked(
5114 m
, proc
, transaction_prefix
, t
);
5116 case BINDER_WORK_RETURN_ERROR
: {
5117 struct binder_error
*e
= container_of(
5118 w
, struct binder_error
, work
);
5120 seq_printf(m
, "%stransaction error: %u\n",
5123 case BINDER_WORK_TRANSACTION_COMPLETE
:
5124 seq_printf(m
, "%stransaction complete\n", prefix
);
5126 case BINDER_WORK_NODE
:
5127 node
= container_of(w
, struct binder_node
, work
);
5128 seq_printf(m
, "%snode work %d: u%016llx c%016llx\n",
5129 prefix
, node
->debug_id
,
5130 (u64
)node
->ptr
, (u64
)node
->cookie
);
5132 case BINDER_WORK_DEAD_BINDER
:
5133 seq_printf(m
, "%shas dead binder\n", prefix
);
5135 case BINDER_WORK_DEAD_BINDER_AND_CLEAR
:
5136 seq_printf(m
, "%shas cleared dead binder\n", prefix
);
5138 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION
:
5139 seq_printf(m
, "%shas cleared death notification\n", prefix
);
5142 seq_printf(m
, "%sunknown work: type %d\n", prefix
, w
->type
);
5147 static void print_binder_thread_ilocked(struct seq_file
*m
,
5148 struct binder_thread
*thread
,
5151 struct binder_transaction
*t
;
5152 struct binder_work
*w
;
5153 size_t start_pos
= m
->count
;
5156 seq_printf(m
, " thread %d: l %02x need_return %d tr %d\n",
5157 thread
->pid
, thread
->looper
,
5158 thread
->looper_need_return
,
5159 atomic_read(&thread
->tmp_ref
));
5160 header_pos
= m
->count
;
5161 t
= thread
->transaction_stack
;
5163 if (t
->from
== thread
) {
5164 print_binder_transaction_ilocked(m
, thread
->proc
,
5165 " outgoing transaction", t
);
5167 } else if (t
->to_thread
== thread
) {
5168 print_binder_transaction_ilocked(m
, thread
->proc
,
5169 " incoming transaction", t
);
5172 print_binder_transaction_ilocked(m
, thread
->proc
,
5173 " bad transaction", t
);
5177 list_for_each_entry(w
, &thread
->todo
, entry
) {
5178 print_binder_work_ilocked(m
, thread
->proc
, " ",
5179 " pending transaction", w
);
5181 if (!print_always
&& m
->count
== header_pos
)
5182 m
->count
= start_pos
;
5185 static void print_binder_node_nilocked(struct seq_file
*m
,
5186 struct binder_node
*node
)
5188 struct binder_ref
*ref
;
5189 struct binder_work
*w
;
5193 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5196 seq_printf(m
, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5197 node
->debug_id
, (u64
)node
->ptr
, (u64
)node
->cookie
,
5198 node
->sched_policy
, node
->min_priority
,
5199 node
->has_strong_ref
, node
->has_weak_ref
,
5200 node
->local_strong_refs
, node
->local_weak_refs
,
5201 node
->internal_strong_refs
, count
, node
->tmp_refs
);
5203 seq_puts(m
, " proc");
5204 hlist_for_each_entry(ref
, &node
->refs
, node_entry
)
5205 seq_printf(m
, " %d", ref
->proc
->pid
);
5209 list_for_each_entry(w
, &node
->async_todo
, entry
)
5210 print_binder_work_ilocked(m
, node
->proc
, " ",
5211 " pending async transaction", w
);
5215 static void print_binder_ref_olocked(struct seq_file
*m
,
5216 struct binder_ref
*ref
)
5218 binder_node_lock(ref
->node
);
5219 seq_printf(m
, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5220 ref
->data
.debug_id
, ref
->data
.desc
,
5221 ref
->node
->proc
? "" : "dead ",
5222 ref
->node
->debug_id
, ref
->data
.strong
,
5223 ref
->data
.weak
, ref
->death
);
5224 binder_node_unlock(ref
->node
);
5227 static void print_binder_proc(struct seq_file
*m
,
5228 struct binder_proc
*proc
, int print_all
)
5230 struct binder_work
*w
;
5232 size_t start_pos
= m
->count
;
5234 struct binder_node
*last_node
= NULL
;
5236 seq_printf(m
, "proc %d\n", proc
->pid
);
5237 seq_printf(m
, "context %s\n", proc
->context
->name
);
5238 header_pos
= m
->count
;
5240 binder_inner_proc_lock(proc
);
5241 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5242 print_binder_thread_ilocked(m
, rb_entry(n
, struct binder_thread
,
5243 rb_node
), print_all
);
5245 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
)) {
5246 struct binder_node
*node
= rb_entry(n
, struct binder_node
,
5249 * take a temporary reference on the node so it
5250 * survives and isn't removed from the tree
5251 * while we print it.
5253 binder_inc_node_tmpref_ilocked(node
);
5254 /* Need to drop inner lock to take node lock */
5255 binder_inner_proc_unlock(proc
);
5257 binder_put_node(last_node
);
5258 binder_node_inner_lock(node
);
5259 print_binder_node_nilocked(m
, node
);
5260 binder_node_inner_unlock(node
);
5262 binder_inner_proc_lock(proc
);
5264 binder_inner_proc_unlock(proc
);
5266 binder_put_node(last_node
);
5269 binder_proc_lock(proc
);
5270 for (n
= rb_first(&proc
->refs_by_desc
);
5273 print_binder_ref_olocked(m
, rb_entry(n
,
5276 binder_proc_unlock(proc
);
5278 binder_alloc_print_allocated(m
, &proc
->alloc
);
5279 binder_inner_proc_lock(proc
);
5280 list_for_each_entry(w
, &proc
->todo
, entry
)
5281 print_binder_work_ilocked(m
, proc
, " ",
5282 " pending transaction", w
);
5283 list_for_each_entry(w
, &proc
->delivered_death
, entry
) {
5284 seq_puts(m
, " has delivered dead binder\n");
5287 binder_inner_proc_unlock(proc
);
5288 if (!print_all
&& m
->count
== header_pos
)
5289 m
->count
= start_pos
;
5292 static const char * const binder_return_strings
[] = {
5297 "BR_ACQUIRE_RESULT",
5299 "BR_TRANSACTION_COMPLETE",
5304 "BR_ATTEMPT_ACQUIRE",
5309 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5313 static const char * const binder_command_strings
[] = {
5316 "BC_ACQUIRE_RESULT",
5324 "BC_ATTEMPT_ACQUIRE",
5325 "BC_REGISTER_LOOPER",
5328 "BC_REQUEST_DEATH_NOTIFICATION",
5329 "BC_CLEAR_DEATH_NOTIFICATION",
5330 "BC_DEAD_BINDER_DONE",
5331 "BC_TRANSACTION_SG",
5335 static const char * const binder_objstat_strings
[] = {
5342 "transaction_complete"
5345 static void print_binder_stats(struct seq_file
*m
, const char *prefix
,
5346 struct binder_stats
*stats
)
5350 BUILD_BUG_ON(ARRAY_SIZE(stats
->bc
) !=
5351 ARRAY_SIZE(binder_command_strings
));
5352 for (i
= 0; i
< ARRAY_SIZE(stats
->bc
); i
++) {
5353 int temp
= atomic_read(&stats
->bc
[i
]);
5356 seq_printf(m
, "%s%s: %d\n", prefix
,
5357 binder_command_strings
[i
], temp
);
5360 BUILD_BUG_ON(ARRAY_SIZE(stats
->br
) !=
5361 ARRAY_SIZE(binder_return_strings
));
5362 for (i
= 0; i
< ARRAY_SIZE(stats
->br
); i
++) {
5363 int temp
= atomic_read(&stats
->br
[i
]);
5366 seq_printf(m
, "%s%s: %d\n", prefix
,
5367 binder_return_strings
[i
], temp
);
5370 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5371 ARRAY_SIZE(binder_objstat_strings
));
5372 BUILD_BUG_ON(ARRAY_SIZE(stats
->obj_created
) !=
5373 ARRAY_SIZE(stats
->obj_deleted
));
5374 for (i
= 0; i
< ARRAY_SIZE(stats
->obj_created
); i
++) {
5375 int created
= atomic_read(&stats
->obj_created
[i
]);
5376 int deleted
= atomic_read(&stats
->obj_deleted
[i
]);
5378 if (created
|| deleted
)
5379 seq_printf(m
, "%s%s: active %d total %d\n",
5381 binder_objstat_strings
[i
],
5387 static void print_binder_proc_stats(struct seq_file
*m
,
5388 struct binder_proc
*proc
)
5390 struct binder_work
*w
;
5391 struct binder_thread
*thread
;
5393 int count
, strong
, weak
, ready_threads
;
5394 size_t free_async_space
=
5395 binder_alloc_get_free_async_space(&proc
->alloc
);
5397 seq_printf(m
, "proc %d\n", proc
->pid
);
5398 seq_printf(m
, "context %s\n", proc
->context
->name
);
5401 binder_inner_proc_lock(proc
);
5402 for (n
= rb_first(&proc
->threads
); n
!= NULL
; n
= rb_next(n
))
5405 list_for_each_entry(thread
, &proc
->waiting_threads
, waiting_thread_node
)
5408 seq_printf(m
, " threads: %d\n", count
);
5409 seq_printf(m
, " requested threads: %d+%d/%d\n"
5410 " ready threads %d\n"
5411 " free async space %zd\n", proc
->requested_threads
,
5412 proc
->requested_threads_started
, proc
->max_threads
,
5416 for (n
= rb_first(&proc
->nodes
); n
!= NULL
; n
= rb_next(n
))
5418 binder_inner_proc_unlock(proc
);
5419 seq_printf(m
, " nodes: %d\n", count
);
5423 binder_proc_lock(proc
);
5424 for (n
= rb_first(&proc
->refs_by_desc
); n
!= NULL
; n
= rb_next(n
)) {
5425 struct binder_ref
*ref
= rb_entry(n
, struct binder_ref
,
5428 strong
+= ref
->data
.strong
;
5429 weak
+= ref
->data
.weak
;
5431 binder_proc_unlock(proc
);
5432 seq_printf(m
, " refs: %d s %d w %d\n", count
, strong
, weak
);
5434 count
= binder_alloc_get_allocated_count(&proc
->alloc
);
5435 seq_printf(m
, " buffers: %d\n", count
);
5438 binder_inner_proc_lock(proc
);
5439 list_for_each_entry(w
, &proc
->todo
, entry
) {
5440 if (w
->type
== BINDER_WORK_TRANSACTION
)
5443 binder_inner_proc_unlock(proc
);
5444 seq_printf(m
, " pending transactions: %d\n", count
);
5446 print_binder_stats(m
, " ", &proc
->stats
);
5450 static int binder_state_show(struct seq_file
*m
, void *unused
)
5452 struct binder_proc
*proc
;
5453 struct binder_node
*node
;
5454 struct binder_node
*last_node
= NULL
;
5456 seq_puts(m
, "binder state:\n");
5458 spin_lock(&binder_dead_nodes_lock
);
5459 if (!hlist_empty(&binder_dead_nodes
))
5460 seq_puts(m
, "dead nodes:\n");
5461 hlist_for_each_entry(node
, &binder_dead_nodes
, dead_node
) {
5463 * take a temporary reference on the node so it
5464 * survives and isn't removed from the list
5465 * while we print it.
5468 spin_unlock(&binder_dead_nodes_lock
);
5470 binder_put_node(last_node
);
5471 binder_node_lock(node
);
5472 print_binder_node_nilocked(m
, node
);
5473 binder_node_unlock(node
);
5475 spin_lock(&binder_dead_nodes_lock
);
5477 spin_unlock(&binder_dead_nodes_lock
);
5479 binder_put_node(last_node
);
5481 mutex_lock(&binder_procs_lock
);
5482 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5483 print_binder_proc(m
, proc
, 1);
5484 mutex_unlock(&binder_procs_lock
);
5489 static int binder_stats_show(struct seq_file
*m
, void *unused
)
5491 struct binder_proc
*proc
;
5493 seq_puts(m
, "binder stats:\n");
5495 print_binder_stats(m
, "", &binder_stats
);
5497 mutex_lock(&binder_procs_lock
);
5498 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5499 print_binder_proc_stats(m
, proc
);
5500 mutex_unlock(&binder_procs_lock
);
5505 static int binder_transactions_show(struct seq_file
*m
, void *unused
)
5507 struct binder_proc
*proc
;
5509 seq_puts(m
, "binder transactions:\n");
5510 mutex_lock(&binder_procs_lock
);
5511 hlist_for_each_entry(proc
, &binder_procs
, proc_node
)
5512 print_binder_proc(m
, proc
, 0);
5513 mutex_unlock(&binder_procs_lock
);
5518 static int binder_proc_show(struct seq_file
*m
, void *unused
)
5520 struct binder_proc
*itr
;
5521 int pid
= (unsigned long)m
->private;
5523 mutex_lock(&binder_procs_lock
);
5524 hlist_for_each_entry(itr
, &binder_procs
, proc_node
) {
5525 if (itr
->pid
== pid
) {
5526 seq_puts(m
, "binder proc state:\n");
5527 print_binder_proc(m
, itr
, 1);
5530 mutex_unlock(&binder_procs_lock
);
5535 static void print_binder_transaction_log_entry(struct seq_file
*m
,
5536 struct binder_transaction_log_entry
*e
)
5538 int debug_id
= READ_ONCE(e
->debug_id_done
);
5540 * read barrier to guarantee debug_id_done read before
5541 * we print the log values
5545 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5546 e
->debug_id
, (e
->call_type
== 2) ? "reply" :
5547 ((e
->call_type
== 1) ? "async" : "call "), e
->from_proc
,
5548 e
->from_thread
, e
->to_proc
, e
->to_thread
, e
->context_name
,
5549 e
->to_node
, e
->target_handle
, e
->data_size
, e
->offsets_size
,
5550 e
->return_error
, e
->return_error_param
,
5551 e
->return_error_line
);
5553 * read-barrier to guarantee read of debug_id_done after
5554 * done printing the fields of the entry
5557 seq_printf(m
, debug_id
&& debug_id
== READ_ONCE(e
->debug_id_done
) ?
5558 "\n" : " (incomplete)\n");
5561 static int binder_transaction_log_show(struct seq_file
*m
, void *unused
)
5563 struct binder_transaction_log
*log
= m
->private;
5564 unsigned int log_cur
= atomic_read(&log
->cur
);
5569 count
= log_cur
+ 1;
5570 cur
= count
< ARRAY_SIZE(log
->entry
) && !log
->full
?
5571 0 : count
% ARRAY_SIZE(log
->entry
);
5572 if (count
> ARRAY_SIZE(log
->entry
) || log
->full
)
5573 count
= ARRAY_SIZE(log
->entry
);
5574 for (i
= 0; i
< count
; i
++) {
5575 unsigned int index
= cur
++ % ARRAY_SIZE(log
->entry
);
5577 print_binder_transaction_log_entry(m
, &log
->entry
[index
]);
5582 static const struct file_operations binder_fops
= {
5583 .owner
= THIS_MODULE
,
5584 .poll
= binder_poll
,
5585 .unlocked_ioctl
= binder_ioctl
,
5586 .compat_ioctl
= binder_ioctl
,
5587 .mmap
= binder_mmap
,
5588 .open
= binder_open
,
5589 .flush
= binder_flush
,
5590 .release
= binder_release
,
5593 BINDER_DEBUG_ENTRY(state
);
5594 BINDER_DEBUG_ENTRY(stats
);
5595 BINDER_DEBUG_ENTRY(transactions
);
5596 BINDER_DEBUG_ENTRY(transaction_log
);
5598 static int __init
init_binder_device(const char *name
)
5601 struct binder_device
*binder_device
;
5603 binder_device
= kzalloc(sizeof(*binder_device
), GFP_KERNEL
);
5607 binder_device
->miscdev
.fops
= &binder_fops
;
5608 binder_device
->miscdev
.minor
= MISC_DYNAMIC_MINOR
;
5609 binder_device
->miscdev
.name
= name
;
5611 binder_device
->context
.binder_context_mgr_uid
= INVALID_UID
;
5612 binder_device
->context
.name
= name
;
5613 mutex_init(&binder_device
->context
.context_mgr_node_lock
);
5615 ret
= misc_register(&binder_device
->miscdev
);
5617 kfree(binder_device
);
5621 hlist_add_head(&binder_device
->hlist
, &binder_devices
);
5626 static int __init
binder_init(void)
5629 char *device_name
, *device_names
;
5630 struct binder_device
*device
;
5631 struct hlist_node
*tmp
;
5633 atomic_set(&binder_transaction_log
.cur
, ~0U);
5634 atomic_set(&binder_transaction_log_failed
.cur
, ~0U);
5635 binder_deferred_workqueue
= create_singlethread_workqueue("binder");
5636 if (!binder_deferred_workqueue
)
5639 binder_debugfs_dir_entry_root
= debugfs_create_dir("binder", NULL
);
5640 if (binder_debugfs_dir_entry_root
)
5641 binder_debugfs_dir_entry_proc
= debugfs_create_dir("proc",
5642 binder_debugfs_dir_entry_root
);
5644 if (binder_debugfs_dir_entry_root
) {
5645 debugfs_create_file("state",
5647 binder_debugfs_dir_entry_root
,
5649 &binder_state_fops
);
5650 debugfs_create_file("stats",
5652 binder_debugfs_dir_entry_root
,
5654 &binder_stats_fops
);
5655 debugfs_create_file("transactions",
5657 binder_debugfs_dir_entry_root
,
5659 &binder_transactions_fops
);
5660 debugfs_create_file("transaction_log",
5662 binder_debugfs_dir_entry_root
,
5663 &binder_transaction_log
,
5664 &binder_transaction_log_fops
);
5665 debugfs_create_file("failed_transaction_log",
5667 binder_debugfs_dir_entry_root
,
5668 &binder_transaction_log_failed
,
5669 &binder_transaction_log_fops
);
5673 * Copy the module_parameter string, because we don't want to
5674 * tokenize it in-place.
5676 device_names
= kzalloc(strlen(binder_devices_param
) + 1, GFP_KERNEL
);
5677 if (!device_names
) {
5679 goto err_alloc_device_names_failed
;
5681 strcpy(device_names
, binder_devices_param
);
5683 while ((device_name
= strsep(&device_names
, ","))) {
5684 ret
= init_binder_device(device_name
);
5686 goto err_init_binder_device_failed
;
5691 err_init_binder_device_failed
:
5692 hlist_for_each_entry_safe(device
, tmp
, &binder_devices
, hlist
) {
5693 misc_deregister(&device
->miscdev
);
5694 hlist_del(&device
->hlist
);
5697 err_alloc_device_names_failed
:
5698 debugfs_remove_recursive(binder_debugfs_dir_entry_root
);
5700 destroy_workqueue(binder_deferred_workqueue
);
5705 device_initcall(binder_init
);
5707 #define CREATE_TRACE_POINTS
5708 #include "binder_trace.h"
5710 MODULE_LICENSE("GPL v2");