UPSTREAM: android: binder: fix type mismatch warning
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / android / binder.c
1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73
74 #include <linux/sched/rt.h>
75 #define MAX_NICE 19
76 #define MIN_NICE -20
77 #define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
78 #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
79 #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
80
81 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
82 #define BINDER_IPC_32BIT 1
83 #endif
84
85 #include <uapi/linux/android/binder.h>
86 #include "binder_alloc.h"
87 #include "binder_trace.h"
88
89 static HLIST_HEAD(binder_deferred_list);
90 static DEFINE_MUTEX(binder_deferred_lock);
91
92 static HLIST_HEAD(binder_devices);
93 static HLIST_HEAD(binder_procs);
94 static DEFINE_MUTEX(binder_procs_lock);
95
96 static HLIST_HEAD(binder_dead_nodes);
97 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
98
99 static struct dentry *binder_debugfs_dir_entry_root;
100 static struct dentry *binder_debugfs_dir_entry_proc;
101 static atomic_t binder_last_id;
102 static struct workqueue_struct *binder_deferred_workqueue;
103
104 #define BINDER_DEBUG_ENTRY(name) \
105 static int binder_##name##_open(struct inode *inode, struct file *file) \
106 { \
107 return single_open(file, binder_##name##_show, inode->i_private); \
108 } \
109 \
110 static const struct file_operations binder_##name##_fops = { \
111 .owner = THIS_MODULE, \
112 .open = binder_##name##_open, \
113 .read = seq_read, \
114 .llseek = seq_lseek, \
115 .release = single_release, \
116 }
117
118 static int binder_proc_show(struct seq_file *m, void *unused);
119 BINDER_DEBUG_ENTRY(proc);
120
121 /* This is only defined in include/asm-arm/sizes.h */
122 #ifndef SZ_1K
123 #define SZ_1K 0x400
124 #endif
125
126 #ifndef SZ_4M
127 #define SZ_4M 0x400000
128 #endif
129
130 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
131
132 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
133
134 enum {
135 BINDER_DEBUG_USER_ERROR = 1U << 0,
136 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
137 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
138 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
139 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
140 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
141 BINDER_DEBUG_READ_WRITE = 1U << 6,
142 BINDER_DEBUG_USER_REFS = 1U << 7,
143 BINDER_DEBUG_THREADS = 1U << 8,
144 BINDER_DEBUG_TRANSACTION = 1U << 9,
145 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
146 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
147 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
148 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
149 BINDER_DEBUG_SPINLOCKS = 1U << 14,
150 };
151 static uint32_t binder_debug_mask;
152
153 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
154
155 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
156 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
157
158 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
159 static int binder_stop_on_user_error;
160
161 static int binder_set_stop_on_user_error(const char *val,
162 struct kernel_param *kp)
163 {
164 int ret;
165
166 ret = param_set_int(val, kp);
167 if (binder_stop_on_user_error < 2)
168 wake_up(&binder_user_error_wait);
169 return ret;
170 }
171 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
172 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
173
174 #define binder_debug(mask, x...) \
175 do { \
176 if (binder_debug_mask & mask) \
177 pr_info(x); \
178 } while (0)
179
180 #define binder_user_error(x...) \
181 do { \
182 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
183 pr_info(x); \
184 if (binder_stop_on_user_error) \
185 binder_stop_on_user_error = 2; \
186 } while (0)
187
188 #define to_flat_binder_object(hdr) \
189 container_of(hdr, struct flat_binder_object, hdr)
190
191 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
192
193 #define to_binder_buffer_object(hdr) \
194 container_of(hdr, struct binder_buffer_object, hdr)
195
196 #define to_binder_fd_array_object(hdr) \
197 container_of(hdr, struct binder_fd_array_object, hdr)
198
199 enum binder_stat_types {
200 BINDER_STAT_PROC,
201 BINDER_STAT_THREAD,
202 BINDER_STAT_NODE,
203 BINDER_STAT_REF,
204 BINDER_STAT_DEATH,
205 BINDER_STAT_TRANSACTION,
206 BINDER_STAT_TRANSACTION_COMPLETE,
207 BINDER_STAT_COUNT
208 };
209
210 struct binder_stats {
211 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
212 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
213 atomic_t obj_created[BINDER_STAT_COUNT];
214 atomic_t obj_deleted[BINDER_STAT_COUNT];
215 };
216
217 static struct binder_stats binder_stats;
218
219 static inline void binder_stats_deleted(enum binder_stat_types type)
220 {
221 atomic_inc(&binder_stats.obj_deleted[type]);
222 }
223
224 static inline void binder_stats_created(enum binder_stat_types type)
225 {
226 atomic_inc(&binder_stats.obj_created[type]);
227 }
228
229 struct binder_transaction_log_entry {
230 int debug_id;
231 int debug_id_done;
232 int call_type;
233 int from_proc;
234 int from_thread;
235 int target_handle;
236 int to_proc;
237 int to_thread;
238 int to_node;
239 int data_size;
240 int offsets_size;
241 int return_error_line;
242 uint32_t return_error;
243 uint32_t return_error_param;
244 const char *context_name;
245 };
246 struct binder_transaction_log {
247 atomic_t cur;
248 bool full;
249 struct binder_transaction_log_entry entry[32];
250 };
251 static struct binder_transaction_log binder_transaction_log;
252 static struct binder_transaction_log binder_transaction_log_failed;
253
254 static struct binder_transaction_log_entry *binder_transaction_log_add(
255 struct binder_transaction_log *log)
256 {
257 struct binder_transaction_log_entry *e;
258 unsigned int cur = atomic_inc_return(&log->cur);
259
260 if (cur >= ARRAY_SIZE(log->entry))
261 log->full = 1;
262 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
263 WRITE_ONCE(e->debug_id_done, 0);
264 /*
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
268 */
269 smp_wmb();
270 memset(e, 0, sizeof(*e));
271 return e;
272 }
273
274 struct binder_context {
275 struct binder_node *binder_context_mgr_node;
276 struct mutex context_mgr_node_lock;
277
278 kuid_t binder_context_mgr_uid;
279 const char *name;
280 };
281
282 struct binder_device {
283 struct hlist_node hlist;
284 struct miscdevice miscdev;
285 struct binder_context context;
286 };
287
288 /**
289 * struct binder_work - work enqueued on a worklist
290 * @entry: node enqueued on list
291 * @type: type of work to be performed
292 *
293 * There are separate work lists for proc, thread, and node (async).
294 */
295 struct binder_work {
296 struct list_head entry;
297
298 enum {
299 BINDER_WORK_TRANSACTION = 1,
300 BINDER_WORK_TRANSACTION_COMPLETE,
301 BINDER_WORK_RETURN_ERROR,
302 BINDER_WORK_NODE,
303 BINDER_WORK_DEAD_BINDER,
304 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
305 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
306 } type;
307 };
308
309 struct binder_error {
310 struct binder_work work;
311 uint32_t cmd;
312 };
313
314 /**
315 * struct binder_node - binder node bookkeeping
316 * @debug_id: unique ID for debugging
317 * (invariant after initialized)
318 * @lock: lock for node fields
319 * @work: worklist element for node work
320 * (protected by @proc->inner_lock)
321 * @rb_node: element for proc->nodes tree
322 * (protected by @proc->inner_lock)
323 * @dead_node: element for binder_dead_nodes list
324 * (protected by binder_dead_nodes_lock)
325 * @proc: binder_proc that owns this node
326 * (invariant after initialized)
327 * @refs: list of references on this node
328 * (protected by @lock)
329 * @internal_strong_refs: used to take strong references when
330 * initiating a transaction
331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
333 * @local_weak_refs: weak user refs from local process
334 * (protected by @proc->inner_lock if @proc
335 * and by @lock)
336 * @local_strong_refs: strong user refs from local process
337 * (protected by @proc->inner_lock if @proc
338 * and by @lock)
339 * @tmp_refs: temporary kernel refs
340 * (protected by @proc->inner_lock while @proc
341 * is valid, and by binder_dead_nodes_lock
342 * if @proc is NULL. During inc/dec and node release
343 * it is also protected by @lock to provide safety
344 * as the node dies and @proc becomes NULL)
345 * @ptr: userspace pointer for node
346 * (invariant, no lock needed)
347 * @cookie: userspace cookie for node
348 * (invariant, no lock needed)
349 * @has_strong_ref: userspace notified of strong ref
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
352 * @pending_strong_ref: userspace has acked notification of strong ref
353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
355 * @has_weak_ref: userspace notified of weak ref
356 * (protected by @proc->inner_lock if @proc
357 * and by @lock)
358 * @pending_weak_ref: userspace has acked notification of weak ref
359 * (protected by @proc->inner_lock if @proc
360 * and by @lock)
361 * @has_async_transaction: async transaction to node in progress
362 * (protected by @lock)
363 * @sched_policy: minimum scheduling policy for node
364 * (invariant after initialized)
365 * @accept_fds: file descriptor operations supported for node
366 * (invariant after initialized)
367 * @min_priority: minimum scheduling priority
368 * (invariant after initialized)
369 * @inherit_rt: inherit RT scheduling policy from caller
370 * (invariant after initialized)
371 * @async_todo: list of async work items
372 * (protected by @proc->inner_lock)
373 *
374 * Bookkeeping structure for binder nodes.
375 */
376 struct binder_node {
377 int debug_id;
378 spinlock_t lock;
379 struct binder_work work;
380 union {
381 struct rb_node rb_node;
382 struct hlist_node dead_node;
383 };
384 struct binder_proc *proc;
385 struct hlist_head refs;
386 int internal_strong_refs;
387 int local_weak_refs;
388 int local_strong_refs;
389 int tmp_refs;
390 binder_uintptr_t ptr;
391 binder_uintptr_t cookie;
392 struct {
393 /*
394 * bitfield elements protected by
395 * proc inner_lock
396 */
397 u8 has_strong_ref:1;
398 u8 pending_strong_ref:1;
399 u8 has_weak_ref:1;
400 u8 pending_weak_ref:1;
401 };
402 struct {
403 /*
404 * invariant after initialization
405 */
406 u8 sched_policy:2;
407 u8 inherit_rt:1;
408 u8 accept_fds:1;
409 u8 min_priority;
410 };
411 bool has_async_transaction;
412 struct list_head async_todo;
413 };
414
415 struct binder_ref_death {
416 /**
417 * @work: worklist element for death notifications
418 * (protected by inner_lock of the proc that
419 * this ref belongs to)
420 */
421 struct binder_work work;
422 binder_uintptr_t cookie;
423 };
424
425 /**
426 * struct binder_ref_data - binder_ref counts and id
427 * @debug_id: unique ID for the ref
428 * @desc: unique userspace handle for ref
429 * @strong: strong ref count (debugging only if not locked)
430 * @weak: weak ref count (debugging only if not locked)
431 *
432 * Structure to hold ref count and ref id information. Since
433 * the actual ref can only be accessed with a lock, this structure
434 * is used to return information about the ref to callers of
435 * ref inc/dec functions.
436 */
437 struct binder_ref_data {
438 int debug_id;
439 uint32_t desc;
440 int strong;
441 int weak;
442 };
443
444 /**
445 * struct binder_ref - struct to track references on nodes
446 * @data: binder_ref_data containing id, handle, and current refcounts
447 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
448 * @rb_node_node: node for lookup by @node in proc's rb_tree
449 * @node_entry: list entry for node->refs list in target node
450 * (protected by @node->lock)
451 * @proc: binder_proc containing ref
452 * @node: binder_node of target node. When cleaning up a
453 * ref for deletion in binder_cleanup_ref, a non-NULL
454 * @node indicates the node must be freed
455 * @death: pointer to death notification (ref_death) if requested
456 * (protected by @node->lock)
457 *
458 * Structure to track references from procA to target node (on procB). This
459 * structure is unsafe to access without holding @proc->outer_lock.
460 */
461 struct binder_ref {
462 /* Lookups needed: */
463 /* node + proc => ref (transaction) */
464 /* desc + proc => ref (transaction, inc/dec ref) */
465 /* node => refs + procs (proc exit) */
466 struct binder_ref_data data;
467 struct rb_node rb_node_desc;
468 struct rb_node rb_node_node;
469 struct hlist_node node_entry;
470 struct binder_proc *proc;
471 struct binder_node *node;
472 struct binder_ref_death *death;
473 };
474
475 enum binder_deferred_state {
476 BINDER_DEFERRED_PUT_FILES = 0x01,
477 BINDER_DEFERRED_FLUSH = 0x02,
478 BINDER_DEFERRED_RELEASE = 0x04,
479 };
480
481 /**
482 * struct binder_priority - scheduler policy and priority
483 * @sched_policy scheduler policy
484 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
485 *
486 * The binder driver supports inheriting the following scheduler policies:
487 * SCHED_NORMAL
488 * SCHED_BATCH
489 * SCHED_FIFO
490 * SCHED_RR
491 */
492 struct binder_priority {
493 unsigned int sched_policy;
494 int prio;
495 };
496
497 /**
498 * struct binder_proc - binder process bookkeeping
499 * @proc_node: element for binder_procs list
500 * @threads: rbtree of binder_threads in this proc
501 * (protected by @inner_lock)
502 * @nodes: rbtree of binder nodes associated with
503 * this proc ordered by node->ptr
504 * (protected by @inner_lock)
505 * @refs_by_desc: rbtree of refs ordered by ref->desc
506 * (protected by @outer_lock)
507 * @refs_by_node: rbtree of refs ordered by ref->node
508 * (protected by @outer_lock)
509 * @waiting_threads: threads currently waiting for proc work
510 * (protected by @inner_lock)
511 * @pid PID of group_leader of process
512 * (invariant after initialized)
513 * @tsk task_struct for group_leader of process
514 * (invariant after initialized)
515 * @files files_struct for process
516 * (invariant after initialized)
517 * @deferred_work_node: element for binder_deferred_list
518 * (protected by binder_deferred_lock)
519 * @deferred_work: bitmap of deferred work to perform
520 * (protected by binder_deferred_lock)
521 * @is_dead: process is dead and awaiting free
522 * when outstanding transactions are cleaned up
523 * (protected by @inner_lock)
524 * @todo: list of work for this process
525 * (protected by @inner_lock)
526 * @wait: wait queue head to wait for proc work
527 * (invariant after initialized)
528 * @stats: per-process binder statistics
529 * (atomics, no lock needed)
530 * @delivered_death: list of delivered death notification
531 * (protected by @inner_lock)
532 * @max_threads: cap on number of binder threads
533 * (protected by @inner_lock)
534 * @requested_threads: number of binder threads requested but not
535 * yet started. In current implementation, can
536 * only be 0 or 1.
537 * (protected by @inner_lock)
538 * @requested_threads_started: number binder threads started
539 * (protected by @inner_lock)
540 * @tmp_ref: temporary reference to indicate proc is in use
541 * (protected by @inner_lock)
542 * @default_priority: default scheduler priority
543 * (invariant after initialized)
544 * @debugfs_entry: debugfs node
545 * @alloc: binder allocator bookkeeping
546 * @context: binder_context for this proc
547 * (invariant after initialized)
548 * @inner_lock: can nest under outer_lock and/or node lock
549 * @outer_lock: no nesting under innor or node lock
550 * Lock order: 1) outer, 2) node, 3) inner
551 *
552 * Bookkeeping structure for binder processes
553 */
554 struct binder_proc {
555 struct hlist_node proc_node;
556 struct rb_root threads;
557 struct rb_root nodes;
558 struct rb_root refs_by_desc;
559 struct rb_root refs_by_node;
560 struct list_head waiting_threads;
561 int pid;
562 struct task_struct *tsk;
563 struct files_struct *files;
564 struct hlist_node deferred_work_node;
565 int deferred_work;
566 bool is_dead;
567
568 struct list_head todo;
569 wait_queue_head_t wait;
570 struct binder_stats stats;
571 struct list_head delivered_death;
572 int max_threads;
573 int requested_threads;
574 int requested_threads_started;
575 int tmp_ref;
576 struct binder_priority default_priority;
577 struct dentry *debugfs_entry;
578 struct binder_alloc alloc;
579 struct binder_context *context;
580 spinlock_t inner_lock;
581 spinlock_t outer_lock;
582 };
583
584 enum {
585 BINDER_LOOPER_STATE_REGISTERED = 0x01,
586 BINDER_LOOPER_STATE_ENTERED = 0x02,
587 BINDER_LOOPER_STATE_EXITED = 0x04,
588 BINDER_LOOPER_STATE_INVALID = 0x08,
589 BINDER_LOOPER_STATE_WAITING = 0x10,
590 BINDER_LOOPER_STATE_POLL = 0x20,
591 };
592
593 /**
594 * struct binder_thread - binder thread bookkeeping
595 * @proc: binder process for this thread
596 * (invariant after initialization)
597 * @rb_node: element for proc->threads rbtree
598 * (protected by @proc->inner_lock)
599 * @waiting_thread_node: element for @proc->waiting_threads list
600 * (protected by @proc->inner_lock)
601 * @pid: PID for this thread
602 * (invariant after initialization)
603 * @looper: bitmap of looping state
604 * (only accessed by this thread)
605 * @looper_needs_return: looping thread needs to exit driver
606 * (no lock needed)
607 * @transaction_stack: stack of in-progress transactions for this thread
608 * (protected by @proc->inner_lock)
609 * @todo: list of work to do for this thread
610 * (protected by @proc->inner_lock)
611 * @process_todo: whether work in @todo should be processed
612 * (protected by @proc->inner_lock)
613 * @return_error: transaction errors reported by this thread
614 * (only accessed by this thread)
615 * @reply_error: transaction errors reported by target thread
616 * (protected by @proc->inner_lock)
617 * @wait: wait queue for thread work
618 * @stats: per-thread statistics
619 * (atomics, no lock needed)
620 * @tmp_ref: temporary reference to indicate thread is in use
621 * (atomic since @proc->inner_lock cannot
622 * always be acquired)
623 * @is_dead: thread is dead and awaiting free
624 * when outstanding transactions are cleaned up
625 * (protected by @proc->inner_lock)
626 * @task: struct task_struct for this thread
627 *
628 * Bookkeeping structure for binder threads.
629 */
630 struct binder_thread {
631 struct binder_proc *proc;
632 struct rb_node rb_node;
633 struct list_head waiting_thread_node;
634 int pid;
635 int looper; /* only modified by this thread */
636 bool looper_need_return; /* can be written by other thread */
637 struct binder_transaction *transaction_stack;
638 struct list_head todo;
639 bool process_todo;
640 struct binder_error return_error;
641 struct binder_error reply_error;
642 wait_queue_head_t wait;
643 struct binder_stats stats;
644 atomic_t tmp_ref;
645 bool is_dead;
646 struct task_struct *task;
647 };
648
649 struct binder_transaction {
650 int debug_id;
651 struct binder_work work;
652 struct binder_thread *from;
653 struct binder_transaction *from_parent;
654 struct binder_proc *to_proc;
655 struct binder_thread *to_thread;
656 struct binder_transaction *to_parent;
657 unsigned need_reply:1;
658 /* unsigned is_dead:1; */ /* not used at the moment */
659
660 struct binder_buffer *buffer;
661 unsigned int code;
662 unsigned int flags;
663 struct binder_priority priority;
664 struct binder_priority saved_priority;
665 bool set_priority_called;
666 kuid_t sender_euid;
667 /**
668 * @lock: protects @from, @to_proc, and @to_thread
669 *
670 * @from, @to_proc, and @to_thread can be set to NULL
671 * during thread teardown
672 */
673 spinlock_t lock;
674 };
675
676 /**
677 * binder_proc_lock() - Acquire outer lock for given binder_proc
678 * @proc: struct binder_proc to acquire
679 *
680 * Acquires proc->outer_lock. Used to protect binder_ref
681 * structures associated with the given proc.
682 */
683 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
684 static void
685 _binder_proc_lock(struct binder_proc *proc, int line)
686 {
687 binder_debug(BINDER_DEBUG_SPINLOCKS,
688 "%s: line=%d\n", __func__, line);
689 spin_lock(&proc->outer_lock);
690 }
691
692 /**
693 * binder_proc_unlock() - Release spinlock for given binder_proc
694 * @proc: struct binder_proc to acquire
695 *
696 * Release lock acquired via binder_proc_lock()
697 */
698 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
699 static void
700 _binder_proc_unlock(struct binder_proc *proc, int line)
701 {
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_unlock(&proc->outer_lock);
705 }
706
707 /**
708 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
709 * @proc: struct binder_proc to acquire
710 *
711 * Acquires proc->inner_lock. Used to protect todo lists
712 */
713 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
714 static void
715 _binder_inner_proc_lock(struct binder_proc *proc, int line)
716 {
717 binder_debug(BINDER_DEBUG_SPINLOCKS,
718 "%s: line=%d\n", __func__, line);
719 spin_lock(&proc->inner_lock);
720 }
721
722 /**
723 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
724 * @proc: struct binder_proc to acquire
725 *
726 * Release lock acquired via binder_inner_proc_lock()
727 */
728 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
729 static void
730 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
731 {
732 binder_debug(BINDER_DEBUG_SPINLOCKS,
733 "%s: line=%d\n", __func__, line);
734 spin_unlock(&proc->inner_lock);
735 }
736
737 /**
738 * binder_node_lock() - Acquire spinlock for given binder_node
739 * @node: struct binder_node to acquire
740 *
741 * Acquires node->lock. Used to protect binder_node fields
742 */
743 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
744 static void
745 _binder_node_lock(struct binder_node *node, int line)
746 {
747 binder_debug(BINDER_DEBUG_SPINLOCKS,
748 "%s: line=%d\n", __func__, line);
749 spin_lock(&node->lock);
750 }
751
752 /**
753 * binder_node_unlock() - Release spinlock for given binder_proc
754 * @node: struct binder_node to acquire
755 *
756 * Release lock acquired via binder_node_lock()
757 */
758 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
759 static void
760 _binder_node_unlock(struct binder_node *node, int line)
761 {
762 binder_debug(BINDER_DEBUG_SPINLOCKS,
763 "%s: line=%d\n", __func__, line);
764 spin_unlock(&node->lock);
765 }
766
767 /**
768 * binder_node_inner_lock() - Acquire node and inner locks
769 * @node: struct binder_node to acquire
770 *
771 * Acquires node->lock. If node->proc also acquires
772 * proc->inner_lock. Used to protect binder_node fields
773 */
774 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
775 static void
776 _binder_node_inner_lock(struct binder_node *node, int line)
777 {
778 binder_debug(BINDER_DEBUG_SPINLOCKS,
779 "%s: line=%d\n", __func__, line);
780 spin_lock(&node->lock);
781 if (node->proc)
782 binder_inner_proc_lock(node->proc);
783 }
784
785 /**
786 * binder_node_unlock() - Release node and inner locks
787 * @node: struct binder_node to acquire
788 *
789 * Release lock acquired via binder_node_lock()
790 */
791 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
792 static void
793 _binder_node_inner_unlock(struct binder_node *node, int line)
794 {
795 struct binder_proc *proc = node->proc;
796
797 binder_debug(BINDER_DEBUG_SPINLOCKS,
798 "%s: line=%d\n", __func__, line);
799 if (proc)
800 binder_inner_proc_unlock(proc);
801 spin_unlock(&node->lock);
802 }
803
804 static bool binder_worklist_empty_ilocked(struct list_head *list)
805 {
806 return list_empty(list);
807 }
808
809 /**
810 * binder_worklist_empty() - Check if no items on the work list
811 * @proc: binder_proc associated with list
812 * @list: list to check
813 *
814 * Return: true if there are no items on list, else false
815 */
816 static bool binder_worklist_empty(struct binder_proc *proc,
817 struct list_head *list)
818 {
819 bool ret;
820
821 binder_inner_proc_lock(proc);
822 ret = binder_worklist_empty_ilocked(list);
823 binder_inner_proc_unlock(proc);
824 return ret;
825 }
826
827 /**
828 * binder_enqueue_work_ilocked() - Add an item to the work list
829 * @work: struct binder_work to add to list
830 * @target_list: list to add work to
831 *
832 * Adds the work to the specified list. Asserts that work
833 * is not already on a list.
834 *
835 * Requires the proc->inner_lock to be held.
836 */
837 static void
838 binder_enqueue_work_ilocked(struct binder_work *work,
839 struct list_head *target_list)
840 {
841 BUG_ON(target_list == NULL);
842 BUG_ON(work->entry.next && !list_empty(&work->entry));
843 list_add_tail(&work->entry, target_list);
844 }
845
846 /**
847 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
848 * @thread: thread to queue work to
849 * @work: struct binder_work to add to list
850 *
851 * Adds the work to the todo list of the thread. Doesn't set the process_todo
852 * flag, which means that (if it wasn't already set) the thread will go to
853 * sleep without handling this work when it calls read.
854 *
855 * Requires the proc->inner_lock to be held.
856 */
857 static void
858 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
859 struct binder_work *work)
860 {
861 binder_enqueue_work_ilocked(work, &thread->todo);
862 }
863
864 /**
865 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
866 * @thread: thread to queue work to
867 * @work: struct binder_work to add to list
868 *
869 * Adds the work to the todo list of the thread, and enables processing
870 * of the todo queue.
871 *
872 * Requires the proc->inner_lock to be held.
873 */
874 static void
875 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
876 struct binder_work *work)
877 {
878 binder_enqueue_work_ilocked(work, &thread->todo);
879 thread->process_todo = true;
880 }
881
882 /**
883 * binder_enqueue_thread_work() - Add an item to the thread work list
884 * @thread: thread to queue work to
885 * @work: struct binder_work to add to list
886 *
887 * Adds the work to the todo list of the thread, and enables processing
888 * of the todo queue.
889 */
890 static void
891 binder_enqueue_thread_work(struct binder_thread *thread,
892 struct binder_work *work)
893 {
894 binder_inner_proc_lock(thread->proc);
895 binder_enqueue_thread_work_ilocked(thread, work);
896 binder_inner_proc_unlock(thread->proc);
897 }
898
899 static void
900 binder_dequeue_work_ilocked(struct binder_work *work)
901 {
902 list_del_init(&work->entry);
903 }
904
905 /**
906 * binder_dequeue_work() - Removes an item from the work list
907 * @proc: binder_proc associated with list
908 * @work: struct binder_work to remove from list
909 *
910 * Removes the specified work item from whatever list it is on.
911 * Can safely be called if work is not on any list.
912 */
913 static void
914 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
915 {
916 binder_inner_proc_lock(proc);
917 binder_dequeue_work_ilocked(work);
918 binder_inner_proc_unlock(proc);
919 }
920
921 static struct binder_work *binder_dequeue_work_head_ilocked(
922 struct list_head *list)
923 {
924 struct binder_work *w;
925
926 w = list_first_entry_or_null(list, struct binder_work, entry);
927 if (w)
928 list_del_init(&w->entry);
929 return w;
930 }
931
932 /**
933 * binder_dequeue_work_head() - Dequeues the item at head of list
934 * @proc: binder_proc associated with list
935 * @list: list to dequeue head
936 *
937 * Removes the head of the list if there are items on the list
938 *
939 * Return: pointer dequeued binder_work, NULL if list was empty
940 */
941 static struct binder_work *binder_dequeue_work_head(
942 struct binder_proc *proc,
943 struct list_head *list)
944 {
945 struct binder_work *w;
946
947 binder_inner_proc_lock(proc);
948 w = binder_dequeue_work_head_ilocked(list);
949 binder_inner_proc_unlock(proc);
950 return w;
951 }
952
953 static void
954 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
955 static void binder_free_thread(struct binder_thread *thread);
956 static void binder_free_proc(struct binder_proc *proc);
957 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
958
959 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
960 {
961 struct files_struct *files = proc->files;
962 unsigned long rlim_cur;
963 unsigned long irqs;
964
965 if (files == NULL)
966 return -ESRCH;
967
968 if (!lock_task_sighand(proc->tsk, &irqs))
969 return -EMFILE;
970
971 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
972 unlock_task_sighand(proc->tsk, &irqs);
973
974 return __alloc_fd(files, 0, rlim_cur, flags);
975 }
976
977 /*
978 * copied from fd_install
979 */
980 static void task_fd_install(
981 struct binder_proc *proc, unsigned int fd, struct file *file)
982 {
983 if (proc->files)
984 __fd_install(proc->files, fd, file);
985 }
986
987 /*
988 * copied from sys_close
989 */
990 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
991 {
992 int retval;
993
994 if (proc->files == NULL)
995 return -ESRCH;
996
997 retval = __close_fd(proc->files, fd);
998 /* can't restart close syscall because file table entry was cleared */
999 if (unlikely(retval == -ERESTARTSYS ||
1000 retval == -ERESTARTNOINTR ||
1001 retval == -ERESTARTNOHAND ||
1002 retval == -ERESTART_RESTARTBLOCK))
1003 retval = -EINTR;
1004
1005 return retval;
1006 }
1007
1008 static bool binder_has_work_ilocked(struct binder_thread *thread,
1009 bool do_proc_work)
1010 {
1011 return thread->process_todo ||
1012 thread->looper_need_return ||
1013 (do_proc_work &&
1014 !binder_worklist_empty_ilocked(&thread->proc->todo));
1015 }
1016
1017 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1018 {
1019 bool has_work;
1020
1021 binder_inner_proc_lock(thread->proc);
1022 has_work = binder_has_work_ilocked(thread, do_proc_work);
1023 binder_inner_proc_unlock(thread->proc);
1024
1025 return has_work;
1026 }
1027
1028 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1029 {
1030 return !thread->transaction_stack &&
1031 binder_worklist_empty_ilocked(&thread->todo) &&
1032 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1033 BINDER_LOOPER_STATE_REGISTERED));
1034 }
1035
1036 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1037 bool sync)
1038 {
1039 struct rb_node *n;
1040 struct binder_thread *thread;
1041
1042 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1043 thread = rb_entry(n, struct binder_thread, rb_node);
1044 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1045 binder_available_for_proc_work_ilocked(thread)) {
1046 if (sync)
1047 wake_up_interruptible_sync(&thread->wait);
1048 else
1049 wake_up_interruptible(&thread->wait);
1050 }
1051 }
1052 }
1053
1054 /**
1055 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1056 * @proc: process to select a thread from
1057 *
1058 * Note that calling this function moves the thread off the waiting_threads
1059 * list, so it can only be woken up by the caller of this function, or a
1060 * signal. Therefore, callers *should* always wake up the thread this function
1061 * returns.
1062 *
1063 * Return: If there's a thread currently waiting for process work,
1064 * returns that thread. Otherwise returns NULL.
1065 */
1066 static struct binder_thread *
1067 binder_select_thread_ilocked(struct binder_proc *proc)
1068 {
1069 struct binder_thread *thread;
1070
1071 assert_spin_locked(&proc->inner_lock);
1072 thread = list_first_entry_or_null(&proc->waiting_threads,
1073 struct binder_thread,
1074 waiting_thread_node);
1075
1076 if (thread)
1077 list_del_init(&thread->waiting_thread_node);
1078
1079 return thread;
1080 }
1081
1082 /**
1083 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1084 * @proc: process to wake up a thread in
1085 * @thread: specific thread to wake-up (may be NULL)
1086 * @sync: whether to do a synchronous wake-up
1087 *
1088 * This function wakes up a thread in the @proc process.
1089 * The caller may provide a specific thread to wake-up in
1090 * the @thread parameter. If @thread is NULL, this function
1091 * will wake up threads that have called poll().
1092 *
1093 * Note that for this function to work as expected, callers
1094 * should first call binder_select_thread() to find a thread
1095 * to handle the work (if they don't have a thread already),
1096 * and pass the result into the @thread parameter.
1097 */
1098 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1099 struct binder_thread *thread,
1100 bool sync)
1101 {
1102 assert_spin_locked(&proc->inner_lock);
1103
1104 if (thread) {
1105 if (sync)
1106 wake_up_interruptible_sync(&thread->wait);
1107 else
1108 wake_up_interruptible(&thread->wait);
1109 return;
1110 }
1111
1112 /* Didn't find a thread waiting for proc work; this can happen
1113 * in two scenarios:
1114 * 1. All threads are busy handling transactions
1115 * In that case, one of those threads should call back into
1116 * the kernel driver soon and pick up this work.
1117 * 2. Threads are using the (e)poll interface, in which case
1118 * they may be blocked on the waitqueue without having been
1119 * added to waiting_threads. For this case, we just iterate
1120 * over all threads not handling transaction work, and
1121 * wake them all up. We wake all because we don't know whether
1122 * a thread that called into (e)poll is handling non-binder
1123 * work currently.
1124 */
1125 binder_wakeup_poll_threads_ilocked(proc, sync);
1126 }
1127
1128 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1129 {
1130 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1131
1132 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1133 }
1134
1135 static bool is_rt_policy(int policy)
1136 {
1137 return policy == SCHED_FIFO || policy == SCHED_RR;
1138 }
1139
1140 static bool is_fair_policy(int policy)
1141 {
1142 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1143 }
1144
1145 static bool binder_supported_policy(int policy)
1146 {
1147 return is_fair_policy(policy) || is_rt_policy(policy);
1148 }
1149
1150 static int to_userspace_prio(int policy, int kernel_priority)
1151 {
1152 if (is_fair_policy(policy))
1153 return PRIO_TO_NICE(kernel_priority);
1154 else
1155 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1156 }
1157
1158 static int to_kernel_prio(int policy, int user_priority)
1159 {
1160 if (is_fair_policy(policy))
1161 return NICE_TO_PRIO(user_priority);
1162 else
1163 return MAX_USER_RT_PRIO - 1 - user_priority;
1164 }
1165
1166 static void binder_do_set_priority(struct task_struct *task,
1167 struct binder_priority desired,
1168 bool verify)
1169 {
1170 int priority; /* user-space prio value */
1171 bool has_cap_nice;
1172 unsigned int policy = desired.sched_policy;
1173
1174 if (task->policy == policy && task->normal_prio == desired.prio)
1175 return;
1176
1177 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1178
1179 priority = to_userspace_prio(policy, desired.prio);
1180
1181 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1182 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1183
1184 if (max_rtprio == 0) {
1185 policy = SCHED_NORMAL;
1186 priority = MIN_NICE;
1187 } else if (priority > max_rtprio) {
1188 priority = max_rtprio;
1189 }
1190 }
1191
1192 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1193 long min_nice = (MAX_NICE - task_rlimit(task, RLIMIT_NICE) + 1);
1194
1195 if (min_nice > MAX_NICE) {
1196 binder_user_error("%d RLIMIT_NICE not set\n",
1197 task->pid);
1198 return;
1199 } else if (priority < min_nice) {
1200 priority = min_nice;
1201 }
1202 }
1203
1204 if (policy != desired.sched_policy ||
1205 to_kernel_prio(policy, priority) != desired.prio)
1206 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1207 "%d: priority %d not allowed, using %d instead\n",
1208 task->pid, desired.prio,
1209 to_kernel_prio(policy, priority));
1210
1211 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1212 to_kernel_prio(policy, priority),
1213 desired.prio);
1214
1215 /* Set the actual priority */
1216 if (task->policy != policy || is_rt_policy(policy)) {
1217 struct sched_param params;
1218
1219 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1220
1221 sched_setscheduler_nocheck(task,
1222 policy | SCHED_RESET_ON_FORK,
1223 &params);
1224 }
1225 if (is_fair_policy(policy))
1226 set_user_nice(task, priority);
1227 }
1228
1229 static void binder_set_priority(struct task_struct *task,
1230 struct binder_priority desired)
1231 {
1232 binder_do_set_priority(task, desired, /* verify = */ true);
1233 }
1234
1235 static void binder_restore_priority(struct task_struct *task,
1236 struct binder_priority desired)
1237 {
1238 binder_do_set_priority(task, desired, /* verify = */ false);
1239 }
1240
1241 static void binder_transaction_priority(struct task_struct *task,
1242 struct binder_transaction *t,
1243 struct binder_priority node_prio,
1244 bool inherit_rt)
1245 {
1246 struct binder_priority desired_prio = t->priority;
1247
1248 if (t->set_priority_called)
1249 return;
1250
1251 t->set_priority_called = true;
1252 t->saved_priority.sched_policy = task->policy;
1253 t->saved_priority.prio = task->normal_prio;
1254
1255 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1256 desired_prio.prio = NICE_TO_PRIO(0);
1257 desired_prio.sched_policy = SCHED_NORMAL;
1258 }
1259
1260 if (node_prio.prio < t->priority.prio ||
1261 (node_prio.prio == t->priority.prio &&
1262 node_prio.sched_policy == SCHED_FIFO)) {
1263 /*
1264 * In case the minimum priority on the node is
1265 * higher (lower value), use that priority. If
1266 * the priority is the same, but the node uses
1267 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1268 * run unbounded, unlike SCHED_RR.
1269 */
1270 desired_prio = node_prio;
1271 }
1272
1273 binder_set_priority(task, desired_prio);
1274 }
1275
1276 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1277 binder_uintptr_t ptr)
1278 {
1279 struct rb_node *n = proc->nodes.rb_node;
1280 struct binder_node *node;
1281
1282 assert_spin_locked(&proc->inner_lock);
1283
1284 while (n) {
1285 node = rb_entry(n, struct binder_node, rb_node);
1286
1287 if (ptr < node->ptr)
1288 n = n->rb_left;
1289 else if (ptr > node->ptr)
1290 n = n->rb_right;
1291 else {
1292 /*
1293 * take an implicit weak reference
1294 * to ensure node stays alive until
1295 * call to binder_put_node()
1296 */
1297 binder_inc_node_tmpref_ilocked(node);
1298 return node;
1299 }
1300 }
1301 return NULL;
1302 }
1303
1304 static struct binder_node *binder_get_node(struct binder_proc *proc,
1305 binder_uintptr_t ptr)
1306 {
1307 struct binder_node *node;
1308
1309 binder_inner_proc_lock(proc);
1310 node = binder_get_node_ilocked(proc, ptr);
1311 binder_inner_proc_unlock(proc);
1312 return node;
1313 }
1314
1315 static struct binder_node *binder_init_node_ilocked(
1316 struct binder_proc *proc,
1317 struct binder_node *new_node,
1318 struct flat_binder_object *fp)
1319 {
1320 struct rb_node **p = &proc->nodes.rb_node;
1321 struct rb_node *parent = NULL;
1322 struct binder_node *node;
1323 binder_uintptr_t ptr = fp ? fp->binder : 0;
1324 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1325 __u32 flags = fp ? fp->flags : 0;
1326 s8 priority;
1327
1328 assert_spin_locked(&proc->inner_lock);
1329
1330 while (*p) {
1331
1332 parent = *p;
1333 node = rb_entry(parent, struct binder_node, rb_node);
1334
1335 if (ptr < node->ptr)
1336 p = &(*p)->rb_left;
1337 else if (ptr > node->ptr)
1338 p = &(*p)->rb_right;
1339 else {
1340 /*
1341 * A matching node is already in
1342 * the rb tree. Abandon the init
1343 * and return it.
1344 */
1345 binder_inc_node_tmpref_ilocked(node);
1346 return node;
1347 }
1348 }
1349 node = new_node;
1350 binder_stats_created(BINDER_STAT_NODE);
1351 node->tmp_refs++;
1352 rb_link_node(&node->rb_node, parent, p);
1353 rb_insert_color(&node->rb_node, &proc->nodes);
1354 node->debug_id = atomic_inc_return(&binder_last_id);
1355 node->proc = proc;
1356 node->ptr = ptr;
1357 node->cookie = cookie;
1358 node->work.type = BINDER_WORK_NODE;
1359 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1360 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1361 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1362 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1363 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1364 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1365 spin_lock_init(&node->lock);
1366 INIT_LIST_HEAD(&node->work.entry);
1367 INIT_LIST_HEAD(&node->async_todo);
1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1369 "%d:%d node %d u%016llx c%016llx created\n",
1370 proc->pid, current->pid, node->debug_id,
1371 (u64)node->ptr, (u64)node->cookie);
1372
1373 return node;
1374 }
1375
1376 static struct binder_node *binder_new_node(struct binder_proc *proc,
1377 struct flat_binder_object *fp)
1378 {
1379 struct binder_node *node;
1380 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1381
1382 if (!new_node)
1383 return NULL;
1384 binder_inner_proc_lock(proc);
1385 node = binder_init_node_ilocked(proc, new_node, fp);
1386 binder_inner_proc_unlock(proc);
1387 if (node != new_node)
1388 /*
1389 * The node was already added by another thread
1390 */
1391 kfree(new_node);
1392
1393 return node;
1394 }
1395
1396 static void binder_free_node(struct binder_node *node)
1397 {
1398 kfree(node);
1399 binder_stats_deleted(BINDER_STAT_NODE);
1400 }
1401
1402 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1403 int internal,
1404 struct list_head *target_list)
1405 {
1406 struct binder_proc *proc = node->proc;
1407
1408 assert_spin_locked(&node->lock);
1409 if (proc)
1410 assert_spin_locked(&proc->inner_lock);
1411 if (strong) {
1412 if (internal) {
1413 if (target_list == NULL &&
1414 node->internal_strong_refs == 0 &&
1415 !(node->proc &&
1416 node == node->proc->context->
1417 binder_context_mgr_node &&
1418 node->has_strong_ref)) {
1419 pr_err("invalid inc strong node for %d\n",
1420 node->debug_id);
1421 return -EINVAL;
1422 }
1423 node->internal_strong_refs++;
1424 } else
1425 node->local_strong_refs++;
1426 if (!node->has_strong_ref && target_list) {
1427 binder_dequeue_work_ilocked(&node->work);
1428 /*
1429 * Note: this function is the only place where we queue
1430 * directly to a thread->todo without using the
1431 * corresponding binder_enqueue_thread_work() helper
1432 * functions; in this case it's ok to not set the
1433 * process_todo flag, since we know this node work will
1434 * always be followed by other work that starts queue
1435 * processing: in case of synchronous transactions, a
1436 * BR_REPLY or BR_ERROR; in case of oneway
1437 * transactions, a BR_TRANSACTION_COMPLETE.
1438 */
1439 binder_enqueue_work_ilocked(&node->work, target_list);
1440 }
1441 } else {
1442 if (!internal)
1443 node->local_weak_refs++;
1444 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1445 if (target_list == NULL) {
1446 pr_err("invalid inc weak node for %d\n",
1447 node->debug_id);
1448 return -EINVAL;
1449 }
1450 /*
1451 * See comment above
1452 */
1453 binder_enqueue_work_ilocked(&node->work, target_list);
1454 }
1455 }
1456 return 0;
1457 }
1458
1459 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1460 struct list_head *target_list)
1461 {
1462 int ret;
1463
1464 binder_node_inner_lock(node);
1465 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1466 binder_node_inner_unlock(node);
1467
1468 return ret;
1469 }
1470
1471 static bool binder_dec_node_nilocked(struct binder_node *node,
1472 int strong, int internal)
1473 {
1474 struct binder_proc *proc = node->proc;
1475
1476 assert_spin_locked(&node->lock);
1477 if (proc)
1478 assert_spin_locked(&proc->inner_lock);
1479 if (strong) {
1480 if (internal)
1481 node->internal_strong_refs--;
1482 else
1483 node->local_strong_refs--;
1484 if (node->local_strong_refs || node->internal_strong_refs)
1485 return false;
1486 } else {
1487 if (!internal)
1488 node->local_weak_refs--;
1489 if (node->local_weak_refs || node->tmp_refs ||
1490 !hlist_empty(&node->refs))
1491 return false;
1492 }
1493
1494 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1495 if (list_empty(&node->work.entry)) {
1496 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1497 binder_wakeup_proc_ilocked(proc);
1498 }
1499 } else {
1500 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1501 !node->local_weak_refs && !node->tmp_refs) {
1502 if (proc) {
1503 binder_dequeue_work_ilocked(&node->work);
1504 rb_erase(&node->rb_node, &proc->nodes);
1505 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1506 "refless node %d deleted\n",
1507 node->debug_id);
1508 } else {
1509 BUG_ON(!list_empty(&node->work.entry));
1510 spin_lock(&binder_dead_nodes_lock);
1511 /*
1512 * tmp_refs could have changed so
1513 * check it again
1514 */
1515 if (node->tmp_refs) {
1516 spin_unlock(&binder_dead_nodes_lock);
1517 return false;
1518 }
1519 hlist_del(&node->dead_node);
1520 spin_unlock(&binder_dead_nodes_lock);
1521 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1522 "dead node %d deleted\n",
1523 node->debug_id);
1524 }
1525 return true;
1526 }
1527 }
1528 return false;
1529 }
1530
1531 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1532 {
1533 bool free_node;
1534
1535 binder_node_inner_lock(node);
1536 free_node = binder_dec_node_nilocked(node, strong, internal);
1537 binder_node_inner_unlock(node);
1538 if (free_node)
1539 binder_free_node(node);
1540 }
1541
1542 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1543 {
1544 /*
1545 * No call to binder_inc_node() is needed since we
1546 * don't need to inform userspace of any changes to
1547 * tmp_refs
1548 */
1549 node->tmp_refs++;
1550 }
1551
1552 /**
1553 * binder_inc_node_tmpref() - take a temporary reference on node
1554 * @node: node to reference
1555 *
1556 * Take reference on node to prevent the node from being freed
1557 * while referenced only by a local variable. The inner lock is
1558 * needed to serialize with the node work on the queue (which
1559 * isn't needed after the node is dead). If the node is dead
1560 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1561 * node->tmp_refs against dead-node-only cases where the node
1562 * lock cannot be acquired (eg traversing the dead node list to
1563 * print nodes)
1564 */
1565 static void binder_inc_node_tmpref(struct binder_node *node)
1566 {
1567 binder_node_lock(node);
1568 if (node->proc)
1569 binder_inner_proc_lock(node->proc);
1570 else
1571 spin_lock(&binder_dead_nodes_lock);
1572 binder_inc_node_tmpref_ilocked(node);
1573 if (node->proc)
1574 binder_inner_proc_unlock(node->proc);
1575 else
1576 spin_unlock(&binder_dead_nodes_lock);
1577 binder_node_unlock(node);
1578 }
1579
1580 /**
1581 * binder_dec_node_tmpref() - remove a temporary reference on node
1582 * @node: node to reference
1583 *
1584 * Release temporary reference on node taken via binder_inc_node_tmpref()
1585 */
1586 static void binder_dec_node_tmpref(struct binder_node *node)
1587 {
1588 bool free_node;
1589
1590 binder_node_inner_lock(node);
1591 if (!node->proc)
1592 spin_lock(&binder_dead_nodes_lock);
1593 node->tmp_refs--;
1594 BUG_ON(node->tmp_refs < 0);
1595 if (!node->proc)
1596 spin_unlock(&binder_dead_nodes_lock);
1597 /*
1598 * Call binder_dec_node() to check if all refcounts are 0
1599 * and cleanup is needed. Calling with strong=0 and internal=1
1600 * causes no actual reference to be released in binder_dec_node().
1601 * If that changes, a change is needed here too.
1602 */
1603 free_node = binder_dec_node_nilocked(node, 0, 1);
1604 binder_node_inner_unlock(node);
1605 if (free_node)
1606 binder_free_node(node);
1607 }
1608
1609 static void binder_put_node(struct binder_node *node)
1610 {
1611 binder_dec_node_tmpref(node);
1612 }
1613
1614 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1615 u32 desc, bool need_strong_ref)
1616 {
1617 struct rb_node *n = proc->refs_by_desc.rb_node;
1618 struct binder_ref *ref;
1619
1620 while (n) {
1621 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1622
1623 if (desc < ref->data.desc) {
1624 n = n->rb_left;
1625 } else if (desc > ref->data.desc) {
1626 n = n->rb_right;
1627 } else if (need_strong_ref && !ref->data.strong) {
1628 binder_user_error("tried to use weak ref as strong ref\n");
1629 return NULL;
1630 } else {
1631 return ref;
1632 }
1633 }
1634 return NULL;
1635 }
1636
1637 /**
1638 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1639 * @proc: binder_proc that owns the ref
1640 * @node: binder_node of target
1641 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1642 *
1643 * Look up the ref for the given node and return it if it exists
1644 *
1645 * If it doesn't exist and the caller provides a newly allocated
1646 * ref, initialize the fields of the newly allocated ref and insert
1647 * into the given proc rb_trees and node refs list.
1648 *
1649 * Return: the ref for node. It is possible that another thread
1650 * allocated/initialized the ref first in which case the
1651 * returned ref would be different than the passed-in
1652 * new_ref. new_ref must be kfree'd by the caller in
1653 * this case.
1654 */
1655 static struct binder_ref *binder_get_ref_for_node_olocked(
1656 struct binder_proc *proc,
1657 struct binder_node *node,
1658 struct binder_ref *new_ref)
1659 {
1660 struct binder_context *context = proc->context;
1661 struct rb_node **p = &proc->refs_by_node.rb_node;
1662 struct rb_node *parent = NULL;
1663 struct binder_ref *ref;
1664 struct rb_node *n;
1665
1666 while (*p) {
1667 parent = *p;
1668 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1669
1670 if (node < ref->node)
1671 p = &(*p)->rb_left;
1672 else if (node > ref->node)
1673 p = &(*p)->rb_right;
1674 else
1675 return ref;
1676 }
1677 if (!new_ref)
1678 return NULL;
1679
1680 binder_stats_created(BINDER_STAT_REF);
1681 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1682 new_ref->proc = proc;
1683 new_ref->node = node;
1684 rb_link_node(&new_ref->rb_node_node, parent, p);
1685 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1686
1687 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1688 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1689 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1690 if (ref->data.desc > new_ref->data.desc)
1691 break;
1692 new_ref->data.desc = ref->data.desc + 1;
1693 }
1694
1695 p = &proc->refs_by_desc.rb_node;
1696 while (*p) {
1697 parent = *p;
1698 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1699
1700 if (new_ref->data.desc < ref->data.desc)
1701 p = &(*p)->rb_left;
1702 else if (new_ref->data.desc > ref->data.desc)
1703 p = &(*p)->rb_right;
1704 else
1705 BUG();
1706 }
1707 rb_link_node(&new_ref->rb_node_desc, parent, p);
1708 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1709
1710 binder_node_lock(node);
1711 hlist_add_head(&new_ref->node_entry, &node->refs);
1712
1713 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1714 "%d new ref %d desc %d for node %d\n",
1715 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1716 node->debug_id);
1717 binder_node_unlock(node);
1718 return new_ref;
1719 }
1720
1721 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1722 {
1723 bool delete_node = false;
1724
1725 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1726 "%d delete ref %d desc %d for node %d\n",
1727 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1728 ref->node->debug_id);
1729
1730 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1731 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1732
1733 binder_node_inner_lock(ref->node);
1734 if (ref->data.strong)
1735 binder_dec_node_nilocked(ref->node, 1, 1);
1736
1737 hlist_del(&ref->node_entry);
1738 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1739 binder_node_inner_unlock(ref->node);
1740 /*
1741 * Clear ref->node unless we want the caller to free the node
1742 */
1743 if (!delete_node) {
1744 /*
1745 * The caller uses ref->node to determine
1746 * whether the node needs to be freed. Clear
1747 * it since the node is still alive.
1748 */
1749 ref->node = NULL;
1750 }
1751
1752 if (ref->death) {
1753 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1754 "%d delete ref %d desc %d has death notification\n",
1755 ref->proc->pid, ref->data.debug_id,
1756 ref->data.desc);
1757 binder_dequeue_work(ref->proc, &ref->death->work);
1758 binder_stats_deleted(BINDER_STAT_DEATH);
1759 }
1760 binder_stats_deleted(BINDER_STAT_REF);
1761 }
1762
1763 /**
1764 * binder_inc_ref_olocked() - increment the ref for given handle
1765 * @ref: ref to be incremented
1766 * @strong: if true, strong increment, else weak
1767 * @target_list: list to queue node work on
1768 *
1769 * Increment the ref. @ref->proc->outer_lock must be held on entry
1770 *
1771 * Return: 0, if successful, else errno
1772 */
1773 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1774 struct list_head *target_list)
1775 {
1776 int ret;
1777
1778 if (strong) {
1779 if (ref->data.strong == 0) {
1780 ret = binder_inc_node(ref->node, 1, 1, target_list);
1781 if (ret)
1782 return ret;
1783 }
1784 ref->data.strong++;
1785 } else {
1786 if (ref->data.weak == 0) {
1787 ret = binder_inc_node(ref->node, 0, 1, target_list);
1788 if (ret)
1789 return ret;
1790 }
1791 ref->data.weak++;
1792 }
1793 return 0;
1794 }
1795
1796 /**
1797 * binder_dec_ref() - dec the ref for given handle
1798 * @ref: ref to be decremented
1799 * @strong: if true, strong decrement, else weak
1800 *
1801 * Decrement the ref.
1802 *
1803 * Return: true if ref is cleaned up and ready to be freed
1804 */
1805 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1806 {
1807 if (strong) {
1808 if (ref->data.strong == 0) {
1809 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1810 ref->proc->pid, ref->data.debug_id,
1811 ref->data.desc, ref->data.strong,
1812 ref->data.weak);
1813 return false;
1814 }
1815 ref->data.strong--;
1816 if (ref->data.strong == 0)
1817 binder_dec_node(ref->node, strong, 1);
1818 } else {
1819 if (ref->data.weak == 0) {
1820 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1821 ref->proc->pid, ref->data.debug_id,
1822 ref->data.desc, ref->data.strong,
1823 ref->data.weak);
1824 return false;
1825 }
1826 ref->data.weak--;
1827 }
1828 if (ref->data.strong == 0 && ref->data.weak == 0) {
1829 binder_cleanup_ref_olocked(ref);
1830 return true;
1831 }
1832 return false;
1833 }
1834
1835 /**
1836 * binder_get_node_from_ref() - get the node from the given proc/desc
1837 * @proc: proc containing the ref
1838 * @desc: the handle associated with the ref
1839 * @need_strong_ref: if true, only return node if ref is strong
1840 * @rdata: the id/refcount data for the ref
1841 *
1842 * Given a proc and ref handle, return the associated binder_node
1843 *
1844 * Return: a binder_node or NULL if not found or not strong when strong required
1845 */
1846 static struct binder_node *binder_get_node_from_ref(
1847 struct binder_proc *proc,
1848 u32 desc, bool need_strong_ref,
1849 struct binder_ref_data *rdata)
1850 {
1851 struct binder_node *node;
1852 struct binder_ref *ref;
1853
1854 binder_proc_lock(proc);
1855 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1856 if (!ref)
1857 goto err_no_ref;
1858 node = ref->node;
1859 /*
1860 * Take an implicit reference on the node to ensure
1861 * it stays alive until the call to binder_put_node()
1862 */
1863 binder_inc_node_tmpref(node);
1864 if (rdata)
1865 *rdata = ref->data;
1866 binder_proc_unlock(proc);
1867
1868 return node;
1869
1870 err_no_ref:
1871 binder_proc_unlock(proc);
1872 return NULL;
1873 }
1874
1875 /**
1876 * binder_free_ref() - free the binder_ref
1877 * @ref: ref to free
1878 *
1879 * Free the binder_ref. Free the binder_node indicated by ref->node
1880 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1881 */
1882 static void binder_free_ref(struct binder_ref *ref)
1883 {
1884 if (ref->node)
1885 binder_free_node(ref->node);
1886 kfree(ref->death);
1887 kfree(ref);
1888 }
1889
1890 /**
1891 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1892 * @proc: proc containing the ref
1893 * @desc: the handle associated with the ref
1894 * @increment: true=inc reference, false=dec reference
1895 * @strong: true=strong reference, false=weak reference
1896 * @rdata: the id/refcount data for the ref
1897 *
1898 * Given a proc and ref handle, increment or decrement the ref
1899 * according to "increment" arg.
1900 *
1901 * Return: 0 if successful, else errno
1902 */
1903 static int binder_update_ref_for_handle(struct binder_proc *proc,
1904 uint32_t desc, bool increment, bool strong,
1905 struct binder_ref_data *rdata)
1906 {
1907 int ret = 0;
1908 struct binder_ref *ref;
1909 bool delete_ref = false;
1910
1911 binder_proc_lock(proc);
1912 ref = binder_get_ref_olocked(proc, desc, strong);
1913 if (!ref) {
1914 ret = -EINVAL;
1915 goto err_no_ref;
1916 }
1917 if (increment)
1918 ret = binder_inc_ref_olocked(ref, strong, NULL);
1919 else
1920 delete_ref = binder_dec_ref_olocked(ref, strong);
1921
1922 if (rdata)
1923 *rdata = ref->data;
1924 binder_proc_unlock(proc);
1925
1926 if (delete_ref)
1927 binder_free_ref(ref);
1928 return ret;
1929
1930 err_no_ref:
1931 binder_proc_unlock(proc);
1932 return ret;
1933 }
1934
1935 /**
1936 * binder_dec_ref_for_handle() - dec the ref for given handle
1937 * @proc: proc containing the ref
1938 * @desc: the handle associated with the ref
1939 * @strong: true=strong reference, false=weak reference
1940 * @rdata: the id/refcount data for the ref
1941 *
1942 * Just calls binder_update_ref_for_handle() to decrement the ref.
1943 *
1944 * Return: 0 if successful, else errno
1945 */
1946 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1947 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1948 {
1949 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1950 }
1951
1952
1953 /**
1954 * binder_inc_ref_for_node() - increment the ref for given proc/node
1955 * @proc: proc containing the ref
1956 * @node: target node
1957 * @strong: true=strong reference, false=weak reference
1958 * @target_list: worklist to use if node is incremented
1959 * @rdata: the id/refcount data for the ref
1960 *
1961 * Given a proc and node, increment the ref. Create the ref if it
1962 * doesn't already exist
1963 *
1964 * Return: 0 if successful, else errno
1965 */
1966 static int binder_inc_ref_for_node(struct binder_proc *proc,
1967 struct binder_node *node,
1968 bool strong,
1969 struct list_head *target_list,
1970 struct binder_ref_data *rdata)
1971 {
1972 struct binder_ref *ref;
1973 struct binder_ref *new_ref = NULL;
1974 int ret = 0;
1975
1976 binder_proc_lock(proc);
1977 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1978 if (!ref) {
1979 binder_proc_unlock(proc);
1980 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1981 if (!new_ref)
1982 return -ENOMEM;
1983 binder_proc_lock(proc);
1984 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1985 }
1986 ret = binder_inc_ref_olocked(ref, strong, target_list);
1987 *rdata = ref->data;
1988 binder_proc_unlock(proc);
1989 if (new_ref && ref != new_ref)
1990 /*
1991 * Another thread created the ref first so
1992 * free the one we allocated
1993 */
1994 kfree(new_ref);
1995 return ret;
1996 }
1997
1998 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1999 struct binder_transaction *t)
2000 {
2001 BUG_ON(!target_thread);
2002 assert_spin_locked(&target_thread->proc->inner_lock);
2003 BUG_ON(target_thread->transaction_stack != t);
2004 BUG_ON(target_thread->transaction_stack->from != target_thread);
2005 target_thread->transaction_stack =
2006 target_thread->transaction_stack->from_parent;
2007 t->from = NULL;
2008 }
2009
2010 /**
2011 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2012 * @thread: thread to decrement
2013 *
2014 * A thread needs to be kept alive while being used to create or
2015 * handle a transaction. binder_get_txn_from() is used to safely
2016 * extract t->from from a binder_transaction and keep the thread
2017 * indicated by t->from from being freed. When done with that
2018 * binder_thread, this function is called to decrement the
2019 * tmp_ref and free if appropriate (thread has been released
2020 * and no transaction being processed by the driver)
2021 */
2022 static void binder_thread_dec_tmpref(struct binder_thread *thread)
2023 {
2024 /*
2025 * atomic is used to protect the counter value while
2026 * it cannot reach zero or thread->is_dead is false
2027 */
2028 binder_inner_proc_lock(thread->proc);
2029 atomic_dec(&thread->tmp_ref);
2030 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2031 binder_inner_proc_unlock(thread->proc);
2032 binder_free_thread(thread);
2033 return;
2034 }
2035 binder_inner_proc_unlock(thread->proc);
2036 }
2037
2038 /**
2039 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2040 * @proc: proc to decrement
2041 *
2042 * A binder_proc needs to be kept alive while being used to create or
2043 * handle a transaction. proc->tmp_ref is incremented when
2044 * creating a new transaction or the binder_proc is currently in-use
2045 * by threads that are being released. When done with the binder_proc,
2046 * this function is called to decrement the counter and free the
2047 * proc if appropriate (proc has been released, all threads have
2048 * been released and not currenly in-use to process a transaction).
2049 */
2050 static void binder_proc_dec_tmpref(struct binder_proc *proc)
2051 {
2052 binder_inner_proc_lock(proc);
2053 proc->tmp_ref--;
2054 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2055 !proc->tmp_ref) {
2056 binder_inner_proc_unlock(proc);
2057 binder_free_proc(proc);
2058 return;
2059 }
2060 binder_inner_proc_unlock(proc);
2061 }
2062
2063 /**
2064 * binder_get_txn_from() - safely extract the "from" thread in transaction
2065 * @t: binder transaction for t->from
2066 *
2067 * Atomically return the "from" thread and increment the tmp_ref
2068 * count for the thread to ensure it stays alive until
2069 * binder_thread_dec_tmpref() is called.
2070 *
2071 * Return: the value of t->from
2072 */
2073 static struct binder_thread *binder_get_txn_from(
2074 struct binder_transaction *t)
2075 {
2076 struct binder_thread *from;
2077
2078 spin_lock(&t->lock);
2079 from = t->from;
2080 if (from)
2081 atomic_inc(&from->tmp_ref);
2082 spin_unlock(&t->lock);
2083 return from;
2084 }
2085
2086 /**
2087 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2088 * @t: binder transaction for t->from
2089 *
2090 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2091 * to guarantee that the thread cannot be released while operating on it.
2092 * The caller must call binder_inner_proc_unlock() to release the inner lock
2093 * as well as call binder_dec_thread_txn() to release the reference.
2094 *
2095 * Return: the value of t->from
2096 */
2097 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2098 struct binder_transaction *t)
2099 {
2100 struct binder_thread *from;
2101
2102 from = binder_get_txn_from(t);
2103 if (!from)
2104 return NULL;
2105 binder_inner_proc_lock(from->proc);
2106 if (t->from) {
2107 BUG_ON(from != t->from);
2108 return from;
2109 }
2110 binder_inner_proc_unlock(from->proc);
2111 binder_thread_dec_tmpref(from);
2112 return NULL;
2113 }
2114
2115 static void binder_free_transaction(struct binder_transaction *t)
2116 {
2117 if (t->buffer)
2118 t->buffer->transaction = NULL;
2119 kfree(t);
2120 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2121 }
2122
2123 static void binder_send_failed_reply(struct binder_transaction *t,
2124 uint32_t error_code)
2125 {
2126 struct binder_thread *target_thread;
2127 struct binder_transaction *next;
2128
2129 BUG_ON(t->flags & TF_ONE_WAY);
2130 while (1) {
2131 target_thread = binder_get_txn_from_and_acq_inner(t);
2132 if (target_thread) {
2133 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2134 "send failed reply for transaction %d to %d:%d\n",
2135 t->debug_id,
2136 target_thread->proc->pid,
2137 target_thread->pid);
2138
2139 binder_pop_transaction_ilocked(target_thread, t);
2140 if (target_thread->reply_error.cmd == BR_OK) {
2141 target_thread->reply_error.cmd = error_code;
2142 binder_enqueue_thread_work_ilocked(
2143 target_thread,
2144 &target_thread->reply_error.work);
2145 wake_up_interruptible(&target_thread->wait);
2146 } else {
2147 WARN(1, "Unexpected reply error: %u\n",
2148 target_thread->reply_error.cmd);
2149 }
2150 binder_inner_proc_unlock(target_thread->proc);
2151 binder_thread_dec_tmpref(target_thread);
2152 binder_free_transaction(t);
2153 return;
2154 }
2155 next = t->from_parent;
2156
2157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2158 "send failed reply for transaction %d, target dead\n",
2159 t->debug_id);
2160
2161 binder_free_transaction(t);
2162 if (next == NULL) {
2163 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2164 "reply failed, no target thread at root\n");
2165 return;
2166 }
2167 t = next;
2168 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2169 "reply failed, no target thread -- retry %d\n",
2170 t->debug_id);
2171 }
2172 }
2173
2174 /**
2175 * binder_cleanup_transaction() - cleans up undelivered transaction
2176 * @t: transaction that needs to be cleaned up
2177 * @reason: reason the transaction wasn't delivered
2178 * @error_code: error to return to caller (if synchronous call)
2179 */
2180 static void binder_cleanup_transaction(struct binder_transaction *t,
2181 const char *reason,
2182 uint32_t error_code)
2183 {
2184 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2185 binder_send_failed_reply(t, error_code);
2186 } else {
2187 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2188 "undelivered transaction %d, %s\n",
2189 t->debug_id, reason);
2190 binder_free_transaction(t);
2191 }
2192 }
2193
2194 /**
2195 * binder_validate_object() - checks for a valid metadata object in a buffer.
2196 * @buffer: binder_buffer that we're parsing.
2197 * @offset: offset in the buffer at which to validate an object.
2198 *
2199 * Return: If there's a valid metadata object at @offset in @buffer, the
2200 * size of that object. Otherwise, it returns zero.
2201 */
2202 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2203 {
2204 /* Check if we can read a header first */
2205 struct binder_object_header *hdr;
2206 size_t object_size = 0;
2207
2208 if (offset > buffer->data_size - sizeof(*hdr) ||
2209 buffer->data_size < sizeof(*hdr) ||
2210 !IS_ALIGNED(offset, sizeof(u32)))
2211 return 0;
2212
2213 /* Ok, now see if we can read a complete object. */
2214 hdr = (struct binder_object_header *)(buffer->data + offset);
2215 switch (hdr->type) {
2216 case BINDER_TYPE_BINDER:
2217 case BINDER_TYPE_WEAK_BINDER:
2218 case BINDER_TYPE_HANDLE:
2219 case BINDER_TYPE_WEAK_HANDLE:
2220 object_size = sizeof(struct flat_binder_object);
2221 break;
2222 case BINDER_TYPE_FD:
2223 object_size = sizeof(struct binder_fd_object);
2224 break;
2225 case BINDER_TYPE_PTR:
2226 object_size = sizeof(struct binder_buffer_object);
2227 break;
2228 case BINDER_TYPE_FDA:
2229 object_size = sizeof(struct binder_fd_array_object);
2230 break;
2231 default:
2232 return 0;
2233 }
2234 if (offset <= buffer->data_size - object_size &&
2235 buffer->data_size >= object_size)
2236 return object_size;
2237 else
2238 return 0;
2239 }
2240
2241 /**
2242 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2243 * @b: binder_buffer containing the object
2244 * @index: index in offset array at which the binder_buffer_object is
2245 * located
2246 * @start: points to the start of the offset array
2247 * @num_valid: the number of valid offsets in the offset array
2248 *
2249 * Return: If @index is within the valid range of the offset array
2250 * described by @start and @num_valid, and if there's a valid
2251 * binder_buffer_object at the offset found in index @index
2252 * of the offset array, that object is returned. Otherwise,
2253 * %NULL is returned.
2254 * Note that the offset found in index @index itself is not
2255 * verified; this function assumes that @num_valid elements
2256 * from @start were previously verified to have valid offsets.
2257 */
2258 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2259 binder_size_t index,
2260 binder_size_t *start,
2261 binder_size_t num_valid)
2262 {
2263 struct binder_buffer_object *buffer_obj;
2264 binder_size_t *offp;
2265
2266 if (index >= num_valid)
2267 return NULL;
2268
2269 offp = start + index;
2270 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2271 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2272 return NULL;
2273
2274 return buffer_obj;
2275 }
2276
2277 /**
2278 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2279 * @b: transaction buffer
2280 * @objects_start start of objects buffer
2281 * @buffer: binder_buffer_object in which to fix up
2282 * @offset: start offset in @buffer to fix up
2283 * @last_obj: last binder_buffer_object that we fixed up in
2284 * @last_min_offset: minimum fixup offset in @last_obj
2285 *
2286 * Return: %true if a fixup in buffer @buffer at offset @offset is
2287 * allowed.
2288 *
2289 * For safety reasons, we only allow fixups inside a buffer to happen
2290 * at increasing offsets; additionally, we only allow fixup on the last
2291 * buffer object that was verified, or one of its parents.
2292 *
2293 * Example of what is allowed:
2294 *
2295 * A
2296 * B (parent = A, offset = 0)
2297 * C (parent = A, offset = 16)
2298 * D (parent = C, offset = 0)
2299 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2300 *
2301 * Examples of what is not allowed:
2302 *
2303 * Decreasing offsets within the same parent:
2304 * A
2305 * C (parent = A, offset = 16)
2306 * B (parent = A, offset = 0) // decreasing offset within A
2307 *
2308 * Referring to a parent that wasn't the last object or any of its parents:
2309 * A
2310 * B (parent = A, offset = 0)
2311 * C (parent = A, offset = 0)
2312 * C (parent = A, offset = 16)
2313 * D (parent = B, offset = 0) // B is not A or any of A's parents
2314 */
2315 static bool binder_validate_fixup(struct binder_buffer *b,
2316 binder_size_t *objects_start,
2317 struct binder_buffer_object *buffer,
2318 binder_size_t fixup_offset,
2319 struct binder_buffer_object *last_obj,
2320 binder_size_t last_min_offset)
2321 {
2322 if (!last_obj) {
2323 /* Nothing to fix up in */
2324 return false;
2325 }
2326
2327 while (last_obj != buffer) {
2328 /*
2329 * Safe to retrieve the parent of last_obj, since it
2330 * was already previously verified by the driver.
2331 */
2332 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2333 return false;
2334 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2335 last_obj = (struct binder_buffer_object *)
2336 (b->data + *(objects_start + last_obj->parent));
2337 }
2338 return (fixup_offset >= last_min_offset);
2339 }
2340
2341 static void binder_transaction_buffer_release(struct binder_proc *proc,
2342 struct binder_buffer *buffer,
2343 binder_size_t *failed_at)
2344 {
2345 binder_size_t *offp, *off_start, *off_end;
2346 int debug_id = buffer->debug_id;
2347
2348 binder_debug(BINDER_DEBUG_TRANSACTION,
2349 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2350 proc->pid, buffer->debug_id,
2351 buffer->data_size, buffer->offsets_size, failed_at);
2352
2353 if (buffer->target_node)
2354 binder_dec_node(buffer->target_node, 1, 0);
2355
2356 off_start = (binder_size_t *)(buffer->data +
2357 ALIGN(buffer->data_size, sizeof(void *)));
2358 if (failed_at)
2359 off_end = failed_at;
2360 else
2361 off_end = (void *)off_start + buffer->offsets_size;
2362 for (offp = off_start; offp < off_end; offp++) {
2363 struct binder_object_header *hdr;
2364 size_t object_size = binder_validate_object(buffer, *offp);
2365
2366 if (object_size == 0) {
2367 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2368 debug_id, (u64)*offp, buffer->data_size);
2369 continue;
2370 }
2371 hdr = (struct binder_object_header *)(buffer->data + *offp);
2372 switch (hdr->type) {
2373 case BINDER_TYPE_BINDER:
2374 case BINDER_TYPE_WEAK_BINDER: {
2375 struct flat_binder_object *fp;
2376 struct binder_node *node;
2377
2378 fp = to_flat_binder_object(hdr);
2379 node = binder_get_node(proc, fp->binder);
2380 if (node == NULL) {
2381 pr_err("transaction release %d bad node %016llx\n",
2382 debug_id, (u64)fp->binder);
2383 break;
2384 }
2385 binder_debug(BINDER_DEBUG_TRANSACTION,
2386 " node %d u%016llx\n",
2387 node->debug_id, (u64)node->ptr);
2388 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2389 0);
2390 binder_put_node(node);
2391 } break;
2392 case BINDER_TYPE_HANDLE:
2393 case BINDER_TYPE_WEAK_HANDLE: {
2394 struct flat_binder_object *fp;
2395 struct binder_ref_data rdata;
2396 int ret;
2397
2398 fp = to_flat_binder_object(hdr);
2399 ret = binder_dec_ref_for_handle(proc, fp->handle,
2400 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2401
2402 if (ret) {
2403 pr_err("transaction release %d bad handle %d, ret = %d\n",
2404 debug_id, fp->handle, ret);
2405 break;
2406 }
2407 binder_debug(BINDER_DEBUG_TRANSACTION,
2408 " ref %d desc %d\n",
2409 rdata.debug_id, rdata.desc);
2410 } break;
2411
2412 case BINDER_TYPE_FD: {
2413 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2414
2415 binder_debug(BINDER_DEBUG_TRANSACTION,
2416 " fd %d\n", fp->fd);
2417 if (failed_at)
2418 task_close_fd(proc, fp->fd);
2419 } break;
2420 case BINDER_TYPE_PTR:
2421 /*
2422 * Nothing to do here, this will get cleaned up when the
2423 * transaction buffer gets freed
2424 */
2425 break;
2426 case BINDER_TYPE_FDA: {
2427 struct binder_fd_array_object *fda;
2428 struct binder_buffer_object *parent;
2429 uintptr_t parent_buffer;
2430 u32 *fd_array;
2431 size_t fd_index;
2432 binder_size_t fd_buf_size;
2433
2434 fda = to_binder_fd_array_object(hdr);
2435 parent = binder_validate_ptr(buffer, fda->parent,
2436 off_start,
2437 offp - off_start);
2438 if (!parent) {
2439 pr_err("transaction release %d bad parent offset",
2440 debug_id);
2441 continue;
2442 }
2443 /*
2444 * Since the parent was already fixed up, convert it
2445 * back to kernel address space to access it
2446 */
2447 parent_buffer = parent->buffer -
2448 binder_alloc_get_user_buffer_offset(
2449 &proc->alloc);
2450
2451 fd_buf_size = sizeof(u32) * fda->num_fds;
2452 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2453 pr_err("transaction release %d invalid number of fds (%lld)\n",
2454 debug_id, (u64)fda->num_fds);
2455 continue;
2456 }
2457 if (fd_buf_size > parent->length ||
2458 fda->parent_offset > parent->length - fd_buf_size) {
2459 /* No space for all file descriptors here. */
2460 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2461 debug_id, (u64)fda->num_fds);
2462 continue;
2463 }
2464 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2465 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2466 task_close_fd(proc, fd_array[fd_index]);
2467 } break;
2468 default:
2469 pr_err("transaction release %d bad object type %x\n",
2470 debug_id, hdr->type);
2471 break;
2472 }
2473 }
2474 }
2475
2476 static int binder_translate_binder(struct flat_binder_object *fp,
2477 struct binder_transaction *t,
2478 struct binder_thread *thread)
2479 {
2480 struct binder_node *node;
2481 struct binder_proc *proc = thread->proc;
2482 struct binder_proc *target_proc = t->to_proc;
2483 struct binder_ref_data rdata;
2484 int ret = 0;
2485
2486 node = binder_get_node(proc, fp->binder);
2487 if (!node) {
2488 node = binder_new_node(proc, fp);
2489 if (!node)
2490 return -ENOMEM;
2491 }
2492 if (fp->cookie != node->cookie) {
2493 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2494 proc->pid, thread->pid, (u64)fp->binder,
2495 node->debug_id, (u64)fp->cookie,
2496 (u64)node->cookie);
2497 ret = -EINVAL;
2498 goto done;
2499 }
2500 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2501 ret = -EPERM;
2502 goto done;
2503 }
2504
2505 ret = binder_inc_ref_for_node(target_proc, node,
2506 fp->hdr.type == BINDER_TYPE_BINDER,
2507 &thread->todo, &rdata);
2508 if (ret)
2509 goto done;
2510
2511 if (fp->hdr.type == BINDER_TYPE_BINDER)
2512 fp->hdr.type = BINDER_TYPE_HANDLE;
2513 else
2514 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2515 fp->binder = 0;
2516 fp->handle = rdata.desc;
2517 fp->cookie = 0;
2518
2519 trace_binder_transaction_node_to_ref(t, node, &rdata);
2520 binder_debug(BINDER_DEBUG_TRANSACTION,
2521 " node %d u%016llx -> ref %d desc %d\n",
2522 node->debug_id, (u64)node->ptr,
2523 rdata.debug_id, rdata.desc);
2524 done:
2525 binder_put_node(node);
2526 return ret;
2527 }
2528
2529 static int binder_translate_handle(struct flat_binder_object *fp,
2530 struct binder_transaction *t,
2531 struct binder_thread *thread)
2532 {
2533 struct binder_proc *proc = thread->proc;
2534 struct binder_proc *target_proc = t->to_proc;
2535 struct binder_node *node;
2536 struct binder_ref_data src_rdata;
2537 int ret = 0;
2538
2539 node = binder_get_node_from_ref(proc, fp->handle,
2540 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2541 if (!node) {
2542 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2543 proc->pid, thread->pid, fp->handle);
2544 return -EINVAL;
2545 }
2546 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2547 ret = -EPERM;
2548 goto done;
2549 }
2550
2551 binder_node_lock(node);
2552 if (node->proc == target_proc) {
2553 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2554 fp->hdr.type = BINDER_TYPE_BINDER;
2555 else
2556 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2557 fp->binder = node->ptr;
2558 fp->cookie = node->cookie;
2559 if (node->proc)
2560 binder_inner_proc_lock(node->proc);
2561 binder_inc_node_nilocked(node,
2562 fp->hdr.type == BINDER_TYPE_BINDER,
2563 0, NULL);
2564 if (node->proc)
2565 binder_inner_proc_unlock(node->proc);
2566 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2567 binder_debug(BINDER_DEBUG_TRANSACTION,
2568 " ref %d desc %d -> node %d u%016llx\n",
2569 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2570 (u64)node->ptr);
2571 binder_node_unlock(node);
2572 } else {
2573 struct binder_ref_data dest_rdata;
2574
2575 binder_node_unlock(node);
2576 ret = binder_inc_ref_for_node(target_proc, node,
2577 fp->hdr.type == BINDER_TYPE_HANDLE,
2578 NULL, &dest_rdata);
2579 if (ret)
2580 goto done;
2581
2582 fp->binder = 0;
2583 fp->handle = dest_rdata.desc;
2584 fp->cookie = 0;
2585 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2586 &dest_rdata);
2587 binder_debug(BINDER_DEBUG_TRANSACTION,
2588 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2589 src_rdata.debug_id, src_rdata.desc,
2590 dest_rdata.debug_id, dest_rdata.desc,
2591 node->debug_id);
2592 }
2593 done:
2594 binder_put_node(node);
2595 return ret;
2596 }
2597
2598 static int binder_translate_fd(int fd,
2599 struct binder_transaction *t,
2600 struct binder_thread *thread,
2601 struct binder_transaction *in_reply_to)
2602 {
2603 struct binder_proc *proc = thread->proc;
2604 struct binder_proc *target_proc = t->to_proc;
2605 int target_fd;
2606 struct file *file;
2607 int ret;
2608 bool target_allows_fd;
2609
2610 if (in_reply_to)
2611 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2612 else
2613 target_allows_fd = t->buffer->target_node->accept_fds;
2614 if (!target_allows_fd) {
2615 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2616 proc->pid, thread->pid,
2617 in_reply_to ? "reply" : "transaction",
2618 fd);
2619 ret = -EPERM;
2620 goto err_fd_not_accepted;
2621 }
2622
2623 file = fget(fd);
2624 if (!file) {
2625 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2626 proc->pid, thread->pid, fd);
2627 ret = -EBADF;
2628 goto err_fget;
2629 }
2630 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2631 if (ret < 0) {
2632 ret = -EPERM;
2633 goto err_security;
2634 }
2635
2636 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2637 if (target_fd < 0) {
2638 ret = -ENOMEM;
2639 goto err_get_unused_fd;
2640 }
2641 task_fd_install(target_proc, target_fd, file);
2642 trace_binder_transaction_fd(t, fd, target_fd);
2643 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2644 fd, target_fd);
2645
2646 return target_fd;
2647
2648 err_get_unused_fd:
2649 err_security:
2650 fput(file);
2651 err_fget:
2652 err_fd_not_accepted:
2653 return ret;
2654 }
2655
2656 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2657 struct binder_buffer_object *parent,
2658 struct binder_transaction *t,
2659 struct binder_thread *thread,
2660 struct binder_transaction *in_reply_to)
2661 {
2662 binder_size_t fdi, fd_buf_size, num_installed_fds;
2663 int target_fd;
2664 uintptr_t parent_buffer;
2665 u32 *fd_array;
2666 struct binder_proc *proc = thread->proc;
2667 struct binder_proc *target_proc = t->to_proc;
2668
2669 fd_buf_size = sizeof(u32) * fda->num_fds;
2670 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2671 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2672 proc->pid, thread->pid, (u64)fda->num_fds);
2673 return -EINVAL;
2674 }
2675 if (fd_buf_size > parent->length ||
2676 fda->parent_offset > parent->length - fd_buf_size) {
2677 /* No space for all file descriptors here. */
2678 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2679 proc->pid, thread->pid, (u64)fda->num_fds);
2680 return -EINVAL;
2681 }
2682 /*
2683 * Since the parent was already fixed up, convert it
2684 * back to the kernel address space to access it
2685 */
2686 parent_buffer = parent->buffer -
2687 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2688 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2689 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2690 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2691 proc->pid, thread->pid);
2692 return -EINVAL;
2693 }
2694 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2695 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2696 in_reply_to);
2697 if (target_fd < 0)
2698 goto err_translate_fd_failed;
2699 fd_array[fdi] = target_fd;
2700 }
2701 return 0;
2702
2703 err_translate_fd_failed:
2704 /*
2705 * Failed to allocate fd or security error, free fds
2706 * installed so far.
2707 */
2708 num_installed_fds = fdi;
2709 for (fdi = 0; fdi < num_installed_fds; fdi++)
2710 task_close_fd(target_proc, fd_array[fdi]);
2711 return target_fd;
2712 }
2713
2714 static int binder_fixup_parent(struct binder_transaction *t,
2715 struct binder_thread *thread,
2716 struct binder_buffer_object *bp,
2717 binder_size_t *off_start,
2718 binder_size_t num_valid,
2719 struct binder_buffer_object *last_fixup_obj,
2720 binder_size_t last_fixup_min_off)
2721 {
2722 struct binder_buffer_object *parent;
2723 u8 *parent_buffer;
2724 struct binder_buffer *b = t->buffer;
2725 struct binder_proc *proc = thread->proc;
2726 struct binder_proc *target_proc = t->to_proc;
2727
2728 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2729 return 0;
2730
2731 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2732 if (!parent) {
2733 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2734 proc->pid, thread->pid);
2735 return -EINVAL;
2736 }
2737
2738 if (!binder_validate_fixup(b, off_start,
2739 parent, bp->parent_offset,
2740 last_fixup_obj,
2741 last_fixup_min_off)) {
2742 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2743 proc->pid, thread->pid);
2744 return -EINVAL;
2745 }
2746
2747 if (parent->length < sizeof(binder_uintptr_t) ||
2748 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2749 /* No space for a pointer here! */
2750 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2751 proc->pid, thread->pid);
2752 return -EINVAL;
2753 }
2754 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2755 binder_alloc_get_user_buffer_offset(
2756 &target_proc->alloc));
2757 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2758
2759 return 0;
2760 }
2761
2762 /**
2763 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2764 * @t: transaction to send
2765 * @proc: process to send the transaction to
2766 * @thread: thread in @proc to send the transaction to (may be NULL)
2767 *
2768 * This function queues a transaction to the specified process. It will try
2769 * to find a thread in the target process to handle the transaction and
2770 * wake it up. If no thread is found, the work is queued to the proc
2771 * waitqueue.
2772 *
2773 * If the @thread parameter is not NULL, the transaction is always queued
2774 * to the waitlist of that specific thread.
2775 *
2776 * Return: true if the transactions was successfully queued
2777 * false if the target process or thread is dead
2778 */
2779 static bool binder_proc_transaction(struct binder_transaction *t,
2780 struct binder_proc *proc,
2781 struct binder_thread *thread)
2782 {
2783 struct binder_node *node = t->buffer->target_node;
2784 struct binder_priority node_prio;
2785 bool oneway = !!(t->flags & TF_ONE_WAY);
2786 bool pending_async = false;
2787
2788 BUG_ON(!node);
2789 binder_node_lock(node);
2790 node_prio.prio = node->min_priority;
2791 node_prio.sched_policy = node->sched_policy;
2792
2793 if (oneway) {
2794 BUG_ON(thread);
2795 if (node->has_async_transaction) {
2796 pending_async = true;
2797 } else {
2798 node->has_async_transaction = 1;
2799 }
2800 }
2801
2802 binder_inner_proc_lock(proc);
2803
2804 if (proc->is_dead || (thread && thread->is_dead)) {
2805 binder_inner_proc_unlock(proc);
2806 binder_node_unlock(node);
2807 return false;
2808 }
2809
2810 if (!thread && !pending_async)
2811 thread = binder_select_thread_ilocked(proc);
2812
2813 if (thread) {
2814 binder_transaction_priority(thread->task, t, node_prio,
2815 node->inherit_rt);
2816 binder_enqueue_thread_work_ilocked(thread, &t->work);
2817 } else if (!pending_async) {
2818 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2819 } else {
2820 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2821 }
2822
2823 if (!pending_async)
2824 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2825
2826 binder_inner_proc_unlock(proc);
2827 binder_node_unlock(node);
2828
2829 return true;
2830 }
2831
2832 /**
2833 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2834 * @node: struct binder_node for which to get refs
2835 * @proc: returns @node->proc if valid
2836 * @error: if no @proc then returns BR_DEAD_REPLY
2837 *
2838 * User-space normally keeps the node alive when creating a transaction
2839 * since it has a reference to the target. The local strong ref keeps it
2840 * alive if the sending process dies before the target process processes
2841 * the transaction. If the source process is malicious or has a reference
2842 * counting bug, relying on the local strong ref can fail.
2843 *
2844 * Since user-space can cause the local strong ref to go away, we also take
2845 * a tmpref on the node to ensure it survives while we are constructing
2846 * the transaction. We also need a tmpref on the proc while we are
2847 * constructing the transaction, so we take that here as well.
2848 *
2849 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2850 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2851 * target proc has died, @error is set to BR_DEAD_REPLY
2852 */
2853 static struct binder_node *binder_get_node_refs_for_txn(
2854 struct binder_node *node,
2855 struct binder_proc **procp,
2856 uint32_t *error)
2857 {
2858 struct binder_node *target_node = NULL;
2859
2860 binder_node_inner_lock(node);
2861 if (node->proc) {
2862 target_node = node;
2863 binder_inc_node_nilocked(node, 1, 0, NULL);
2864 binder_inc_node_tmpref_ilocked(node);
2865 node->proc->tmp_ref++;
2866 *procp = node->proc;
2867 } else
2868 *error = BR_DEAD_REPLY;
2869 binder_node_inner_unlock(node);
2870
2871 return target_node;
2872 }
2873
2874 static void binder_transaction(struct binder_proc *proc,
2875 struct binder_thread *thread,
2876 struct binder_transaction_data *tr, int reply,
2877 binder_size_t extra_buffers_size)
2878 {
2879 int ret;
2880 struct binder_transaction *t;
2881 struct binder_work *tcomplete;
2882 binder_size_t *offp, *off_end, *off_start;
2883 binder_size_t off_min;
2884 u8 *sg_bufp, *sg_buf_end;
2885 struct binder_proc *target_proc = NULL;
2886 struct binder_thread *target_thread = NULL;
2887 struct binder_node *target_node = NULL;
2888 struct binder_transaction *in_reply_to = NULL;
2889 struct binder_transaction_log_entry *e;
2890 uint32_t return_error = 0;
2891 uint32_t return_error_param = 0;
2892 uint32_t return_error_line = 0;
2893 struct binder_buffer_object *last_fixup_obj = NULL;
2894 binder_size_t last_fixup_min_off = 0;
2895 struct binder_context *context = proc->context;
2896 int t_debug_id = atomic_inc_return(&binder_last_id);
2897
2898 e = binder_transaction_log_add(&binder_transaction_log);
2899 e->debug_id = t_debug_id;
2900 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2901 e->from_proc = proc->pid;
2902 e->from_thread = thread->pid;
2903 e->target_handle = tr->target.handle;
2904 e->data_size = tr->data_size;
2905 e->offsets_size = tr->offsets_size;
2906 e->context_name = proc->context->name;
2907
2908 if (reply) {
2909 binder_inner_proc_lock(proc);
2910 in_reply_to = thread->transaction_stack;
2911 if (in_reply_to == NULL) {
2912 binder_inner_proc_unlock(proc);
2913 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2914 proc->pid, thread->pid);
2915 return_error = BR_FAILED_REPLY;
2916 return_error_param = -EPROTO;
2917 return_error_line = __LINE__;
2918 goto err_empty_call_stack;
2919 }
2920 if (in_reply_to->to_thread != thread) {
2921 spin_lock(&in_reply_to->lock);
2922 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2923 proc->pid, thread->pid, in_reply_to->debug_id,
2924 in_reply_to->to_proc ?
2925 in_reply_to->to_proc->pid : 0,
2926 in_reply_to->to_thread ?
2927 in_reply_to->to_thread->pid : 0);
2928 spin_unlock(&in_reply_to->lock);
2929 binder_inner_proc_unlock(proc);
2930 return_error = BR_FAILED_REPLY;
2931 return_error_param = -EPROTO;
2932 return_error_line = __LINE__;
2933 in_reply_to = NULL;
2934 goto err_bad_call_stack;
2935 }
2936 thread->transaction_stack = in_reply_to->to_parent;
2937 binder_inner_proc_unlock(proc);
2938 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2939 if (target_thread == NULL) {
2940 return_error = BR_DEAD_REPLY;
2941 return_error_line = __LINE__;
2942 goto err_dead_binder;
2943 }
2944 if (target_thread->transaction_stack != in_reply_to) {
2945 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2946 proc->pid, thread->pid,
2947 target_thread->transaction_stack ?
2948 target_thread->transaction_stack->debug_id : 0,
2949 in_reply_to->debug_id);
2950 binder_inner_proc_unlock(target_thread->proc);
2951 return_error = BR_FAILED_REPLY;
2952 return_error_param = -EPROTO;
2953 return_error_line = __LINE__;
2954 in_reply_to = NULL;
2955 target_thread = NULL;
2956 goto err_dead_binder;
2957 }
2958 target_proc = target_thread->proc;
2959 target_proc->tmp_ref++;
2960 binder_inner_proc_unlock(target_thread->proc);
2961 } else {
2962 if (tr->target.handle) {
2963 struct binder_ref *ref;
2964
2965 /*
2966 * There must already be a strong ref
2967 * on this node. If so, do a strong
2968 * increment on the node to ensure it
2969 * stays alive until the transaction is
2970 * done.
2971 */
2972 binder_proc_lock(proc);
2973 ref = binder_get_ref_olocked(proc, tr->target.handle,
2974 true);
2975 if (ref) {
2976 target_node = binder_get_node_refs_for_txn(
2977 ref->node, &target_proc,
2978 &return_error);
2979 } else {
2980 binder_user_error("%d:%d got transaction to invalid handle\n",
2981 proc->pid, thread->pid);
2982 return_error = BR_FAILED_REPLY;
2983 }
2984 binder_proc_unlock(proc);
2985 } else {
2986 mutex_lock(&context->context_mgr_node_lock);
2987 target_node = context->binder_context_mgr_node;
2988 if (target_node)
2989 target_node = binder_get_node_refs_for_txn(
2990 target_node, &target_proc,
2991 &return_error);
2992 else
2993 return_error = BR_DEAD_REPLY;
2994 mutex_unlock(&context->context_mgr_node_lock);
2995 }
2996 if (!target_node) {
2997 /*
2998 * return_error is set above
2999 */
3000 return_error_param = -EINVAL;
3001 return_error_line = __LINE__;
3002 goto err_dead_binder;
3003 }
3004 e->to_node = target_node->debug_id;
3005 if (security_binder_transaction(proc->tsk,
3006 target_proc->tsk) < 0) {
3007 return_error = BR_FAILED_REPLY;
3008 return_error_param = -EPERM;
3009 return_error_line = __LINE__;
3010 goto err_invalid_target_handle;
3011 }
3012 binder_inner_proc_lock(proc);
3013 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3014 struct binder_transaction *tmp;
3015
3016 tmp = thread->transaction_stack;
3017 if (tmp->to_thread != thread) {
3018 spin_lock(&tmp->lock);
3019 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3020 proc->pid, thread->pid, tmp->debug_id,
3021 tmp->to_proc ? tmp->to_proc->pid : 0,
3022 tmp->to_thread ?
3023 tmp->to_thread->pid : 0);
3024 spin_unlock(&tmp->lock);
3025 binder_inner_proc_unlock(proc);
3026 return_error = BR_FAILED_REPLY;
3027 return_error_param = -EPROTO;
3028 return_error_line = __LINE__;
3029 goto err_bad_call_stack;
3030 }
3031 while (tmp) {
3032 struct binder_thread *from;
3033
3034 spin_lock(&tmp->lock);
3035 from = tmp->from;
3036 if (from && from->proc == target_proc) {
3037 atomic_inc(&from->tmp_ref);
3038 target_thread = from;
3039 spin_unlock(&tmp->lock);
3040 break;
3041 }
3042 spin_unlock(&tmp->lock);
3043 tmp = tmp->from_parent;
3044 }
3045 }
3046 binder_inner_proc_unlock(proc);
3047 }
3048 if (target_thread)
3049 e->to_thread = target_thread->pid;
3050 e->to_proc = target_proc->pid;
3051
3052 /* TODO: reuse incoming transaction for reply */
3053 t = kzalloc(sizeof(*t), GFP_KERNEL);
3054 if (t == NULL) {
3055 return_error = BR_FAILED_REPLY;
3056 return_error_param = -ENOMEM;
3057 return_error_line = __LINE__;
3058 goto err_alloc_t_failed;
3059 }
3060 binder_stats_created(BINDER_STAT_TRANSACTION);
3061 spin_lock_init(&t->lock);
3062
3063 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3064 if (tcomplete == NULL) {
3065 return_error = BR_FAILED_REPLY;
3066 return_error_param = -ENOMEM;
3067 return_error_line = __LINE__;
3068 goto err_alloc_tcomplete_failed;
3069 }
3070 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3071
3072 t->debug_id = t_debug_id;
3073
3074 if (reply)
3075 binder_debug(BINDER_DEBUG_TRANSACTION,
3076 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3077 proc->pid, thread->pid, t->debug_id,
3078 target_proc->pid, target_thread->pid,
3079 (u64)tr->data.ptr.buffer,
3080 (u64)tr->data.ptr.offsets,
3081 (u64)tr->data_size, (u64)tr->offsets_size,
3082 (u64)extra_buffers_size);
3083 else
3084 binder_debug(BINDER_DEBUG_TRANSACTION,
3085 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3086 proc->pid, thread->pid, t->debug_id,
3087 target_proc->pid, target_node->debug_id,
3088 (u64)tr->data.ptr.buffer,
3089 (u64)tr->data.ptr.offsets,
3090 (u64)tr->data_size, (u64)tr->offsets_size,
3091 (u64)extra_buffers_size);
3092
3093 if (!reply && !(tr->flags & TF_ONE_WAY))
3094 t->from = thread;
3095 else
3096 t->from = NULL;
3097 t->sender_euid = task_euid(proc->tsk);
3098 t->to_proc = target_proc;
3099 t->to_thread = target_thread;
3100 t->code = tr->code;
3101 t->flags = tr->flags;
3102 if (!(t->flags & TF_ONE_WAY) &&
3103 binder_supported_policy(current->policy)) {
3104 /* Inherit supported policies for synchronous transactions */
3105 t->priority.sched_policy = current->policy;
3106 t->priority.prio = current->normal_prio;
3107 } else {
3108 /* Otherwise, fall back to the default priority */
3109 t->priority = target_proc->default_priority;
3110 }
3111
3112 trace_binder_transaction(reply, t, target_node);
3113
3114 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3115 tr->offsets_size, extra_buffers_size,
3116 !reply && (t->flags & TF_ONE_WAY));
3117 if (IS_ERR(t->buffer)) {
3118 /*
3119 * -ESRCH indicates VMA cleared. The target is dying.
3120 */
3121 return_error_param = PTR_ERR(t->buffer);
3122 return_error = return_error_param == -ESRCH ?
3123 BR_DEAD_REPLY : BR_FAILED_REPLY;
3124 return_error_line = __LINE__;
3125 t->buffer = NULL;
3126 goto err_binder_alloc_buf_failed;
3127 }
3128 t->buffer->allow_user_free = 0;
3129 t->buffer->debug_id = t->debug_id;
3130 t->buffer->transaction = t;
3131 t->buffer->target_node = target_node;
3132 trace_binder_transaction_alloc_buf(t->buffer);
3133 off_start = (binder_size_t *)(t->buffer->data +
3134 ALIGN(tr->data_size, sizeof(void *)));
3135 offp = off_start;
3136
3137 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3138 tr->data.ptr.buffer, tr->data_size)) {
3139 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3140 proc->pid, thread->pid);
3141 return_error = BR_FAILED_REPLY;
3142 return_error_param = -EFAULT;
3143 return_error_line = __LINE__;
3144 goto err_copy_data_failed;
3145 }
3146 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3147 tr->data.ptr.offsets, tr->offsets_size)) {
3148 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3149 proc->pid, thread->pid);
3150 return_error = BR_FAILED_REPLY;
3151 return_error_param = -EFAULT;
3152 return_error_line = __LINE__;
3153 goto err_copy_data_failed;
3154 }
3155 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3156 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3157 proc->pid, thread->pid, (u64)tr->offsets_size);
3158 return_error = BR_FAILED_REPLY;
3159 return_error_param = -EINVAL;
3160 return_error_line = __LINE__;
3161 goto err_bad_offset;
3162 }
3163 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3164 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3165 proc->pid, thread->pid,
3166 extra_buffers_size);
3167 return_error = BR_FAILED_REPLY;
3168 return_error_param = -EINVAL;
3169 return_error_line = __LINE__;
3170 goto err_bad_offset;
3171 }
3172 off_end = (void *)off_start + tr->offsets_size;
3173 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3174 sg_buf_end = sg_bufp + extra_buffers_size;
3175 off_min = 0;
3176 for (; offp < off_end; offp++) {
3177 struct binder_object_header *hdr;
3178 size_t object_size = binder_validate_object(t->buffer, *offp);
3179
3180 if (object_size == 0 || *offp < off_min) {
3181 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3182 proc->pid, thread->pid, (u64)*offp,
3183 (u64)off_min,
3184 (u64)t->buffer->data_size);
3185 return_error = BR_FAILED_REPLY;
3186 return_error_param = -EINVAL;
3187 return_error_line = __LINE__;
3188 goto err_bad_offset;
3189 }
3190
3191 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3192 off_min = *offp + object_size;
3193 switch (hdr->type) {
3194 case BINDER_TYPE_BINDER:
3195 case BINDER_TYPE_WEAK_BINDER: {
3196 struct flat_binder_object *fp;
3197
3198 fp = to_flat_binder_object(hdr);
3199 ret = binder_translate_binder(fp, t, thread);
3200 if (ret < 0) {
3201 return_error = BR_FAILED_REPLY;
3202 return_error_param = ret;
3203 return_error_line = __LINE__;
3204 goto err_translate_failed;
3205 }
3206 } break;
3207 case BINDER_TYPE_HANDLE:
3208 case BINDER_TYPE_WEAK_HANDLE: {
3209 struct flat_binder_object *fp;
3210
3211 fp = to_flat_binder_object(hdr);
3212 ret = binder_translate_handle(fp, t, thread);
3213 if (ret < 0) {
3214 return_error = BR_FAILED_REPLY;
3215 return_error_param = ret;
3216 return_error_line = __LINE__;
3217 goto err_translate_failed;
3218 }
3219 } break;
3220
3221 case BINDER_TYPE_FD: {
3222 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3223 int target_fd = binder_translate_fd(fp->fd, t, thread,
3224 in_reply_to);
3225
3226 if (target_fd < 0) {
3227 return_error = BR_FAILED_REPLY;
3228 return_error_param = target_fd;
3229 return_error_line = __LINE__;
3230 goto err_translate_failed;
3231 }
3232 fp->pad_binder = 0;
3233 fp->fd = target_fd;
3234 } break;
3235 case BINDER_TYPE_FDA: {
3236 struct binder_fd_array_object *fda =
3237 to_binder_fd_array_object(hdr);
3238 struct binder_buffer_object *parent =
3239 binder_validate_ptr(t->buffer, fda->parent,
3240 off_start,
3241 offp - off_start);
3242 if (!parent) {
3243 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3244 proc->pid, thread->pid);
3245 return_error = BR_FAILED_REPLY;
3246 return_error_param = -EINVAL;
3247 return_error_line = __LINE__;
3248 goto err_bad_parent;
3249 }
3250 if (!binder_validate_fixup(t->buffer, off_start,
3251 parent, fda->parent_offset,
3252 last_fixup_obj,
3253 last_fixup_min_off)) {
3254 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3255 proc->pid, thread->pid);
3256 return_error = BR_FAILED_REPLY;
3257 return_error_param = -EINVAL;
3258 return_error_line = __LINE__;
3259 goto err_bad_parent;
3260 }
3261 ret = binder_translate_fd_array(fda, parent, t, thread,
3262 in_reply_to);
3263 if (ret < 0) {
3264 return_error = BR_FAILED_REPLY;
3265 return_error_param = ret;
3266 return_error_line = __LINE__;
3267 goto err_translate_failed;
3268 }
3269 last_fixup_obj = parent;
3270 last_fixup_min_off =
3271 fda->parent_offset + sizeof(u32) * fda->num_fds;
3272 } break;
3273 case BINDER_TYPE_PTR: {
3274 struct binder_buffer_object *bp =
3275 to_binder_buffer_object(hdr);
3276 size_t buf_left = sg_buf_end - sg_bufp;
3277
3278 if (bp->length > buf_left) {
3279 binder_user_error("%d:%d got transaction with too large buffer\n",
3280 proc->pid, thread->pid);
3281 return_error = BR_FAILED_REPLY;
3282 return_error_param = -EINVAL;
3283 return_error_line = __LINE__;
3284 goto err_bad_offset;
3285 }
3286 if (copy_from_user(sg_bufp,
3287 (const void __user *)(uintptr_t)
3288 bp->buffer, bp->length)) {
3289 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3290 proc->pid, thread->pid);
3291 return_error_param = -EFAULT;
3292 return_error = BR_FAILED_REPLY;
3293 return_error_line = __LINE__;
3294 goto err_copy_data_failed;
3295 }
3296 /* Fixup buffer pointer to target proc address space */
3297 bp->buffer = (uintptr_t)sg_bufp +
3298 binder_alloc_get_user_buffer_offset(
3299 &target_proc->alloc);
3300 sg_bufp += ALIGN(bp->length, sizeof(u64));
3301
3302 ret = binder_fixup_parent(t, thread, bp, off_start,
3303 offp - off_start,
3304 last_fixup_obj,
3305 last_fixup_min_off);
3306 if (ret < 0) {
3307 return_error = BR_FAILED_REPLY;
3308 return_error_param = ret;
3309 return_error_line = __LINE__;
3310 goto err_translate_failed;
3311 }
3312 last_fixup_obj = bp;
3313 last_fixup_min_off = 0;
3314 } break;
3315 default:
3316 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3317 proc->pid, thread->pid, hdr->type);
3318 return_error = BR_FAILED_REPLY;
3319 return_error_param = -EINVAL;
3320 return_error_line = __LINE__;
3321 goto err_bad_object_type;
3322 }
3323 }
3324 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3325 t->work.type = BINDER_WORK_TRANSACTION;
3326
3327 if (reply) {
3328 binder_enqueue_thread_work(thread, tcomplete);
3329 binder_inner_proc_lock(target_proc);
3330 if (target_thread->is_dead) {
3331 binder_inner_proc_unlock(target_proc);
3332 goto err_dead_proc_or_thread;
3333 }
3334 BUG_ON(t->buffer->async_transaction != 0);
3335 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3336 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3337 binder_inner_proc_unlock(target_proc);
3338 wake_up_interruptible_sync(&target_thread->wait);
3339 binder_restore_priority(current, in_reply_to->saved_priority);
3340 binder_free_transaction(in_reply_to);
3341 } else if (!(t->flags & TF_ONE_WAY)) {
3342 BUG_ON(t->buffer->async_transaction != 0);
3343 binder_inner_proc_lock(proc);
3344 /*
3345 * Defer the TRANSACTION_COMPLETE, so we don't return to
3346 * userspace immediately; this allows the target process to
3347 * immediately start processing this transaction, reducing
3348 * latency. We will then return the TRANSACTION_COMPLETE when
3349 * the target replies (or there is an error).
3350 */
3351 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3352 t->need_reply = 1;
3353 t->from_parent = thread->transaction_stack;
3354 thread->transaction_stack = t;
3355 binder_inner_proc_unlock(proc);
3356 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3357 binder_inner_proc_lock(proc);
3358 binder_pop_transaction_ilocked(thread, t);
3359 binder_inner_proc_unlock(proc);
3360 goto err_dead_proc_or_thread;
3361 }
3362 } else {
3363 BUG_ON(target_node == NULL);
3364 BUG_ON(t->buffer->async_transaction != 1);
3365 binder_enqueue_thread_work(thread, tcomplete);
3366 if (!binder_proc_transaction(t, target_proc, NULL))
3367 goto err_dead_proc_or_thread;
3368 }
3369 if (target_thread)
3370 binder_thread_dec_tmpref(target_thread);
3371 binder_proc_dec_tmpref(target_proc);
3372 if (target_node)
3373 binder_dec_node_tmpref(target_node);
3374 /*
3375 * write barrier to synchronize with initialization
3376 * of log entry
3377 */
3378 smp_wmb();
3379 WRITE_ONCE(e->debug_id_done, t_debug_id);
3380 return;
3381
3382 err_dead_proc_or_thread:
3383 return_error = BR_DEAD_REPLY;
3384 return_error_line = __LINE__;
3385 binder_dequeue_work(proc, tcomplete);
3386 err_translate_failed:
3387 err_bad_object_type:
3388 err_bad_offset:
3389 err_bad_parent:
3390 err_copy_data_failed:
3391 trace_binder_transaction_failed_buffer_release(t->buffer);
3392 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3393 if (target_node)
3394 binder_dec_node_tmpref(target_node);
3395 target_node = NULL;
3396 t->buffer->transaction = NULL;
3397 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3398 err_binder_alloc_buf_failed:
3399 kfree(tcomplete);
3400 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3401 err_alloc_tcomplete_failed:
3402 kfree(t);
3403 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3404 err_alloc_t_failed:
3405 err_bad_call_stack:
3406 err_empty_call_stack:
3407 err_dead_binder:
3408 err_invalid_target_handle:
3409 if (target_thread)
3410 binder_thread_dec_tmpref(target_thread);
3411 if (target_proc)
3412 binder_proc_dec_tmpref(target_proc);
3413 if (target_node) {
3414 binder_dec_node(target_node, 1, 0);
3415 binder_dec_node_tmpref(target_node);
3416 }
3417
3418 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3419 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3420 proc->pid, thread->pid, return_error, return_error_param,
3421 (u64)tr->data_size, (u64)tr->offsets_size,
3422 return_error_line);
3423
3424 {
3425 struct binder_transaction_log_entry *fe;
3426
3427 e->return_error = return_error;
3428 e->return_error_param = return_error_param;
3429 e->return_error_line = return_error_line;
3430 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3431 *fe = *e;
3432 /*
3433 * write barrier to synchronize with initialization
3434 * of log entry
3435 */
3436 smp_wmb();
3437 WRITE_ONCE(e->debug_id_done, t_debug_id);
3438 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3439 }
3440
3441 BUG_ON(thread->return_error.cmd != BR_OK);
3442 if (in_reply_to) {
3443 binder_restore_priority(current, in_reply_to->saved_priority);
3444 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3445 binder_enqueue_thread_work(thread, &thread->return_error.work);
3446 binder_send_failed_reply(in_reply_to, return_error);
3447 } else {
3448 thread->return_error.cmd = return_error;
3449 binder_enqueue_thread_work(thread, &thread->return_error.work);
3450 }
3451 }
3452
3453 int binder_thread_write(struct binder_proc *proc,
3454 struct binder_thread *thread,
3455 binder_uintptr_t binder_buffer, size_t size,
3456 binder_size_t *consumed)
3457 {
3458 uint32_t cmd;
3459 struct binder_context *context = proc->context;
3460 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3461 void __user *ptr = buffer + *consumed;
3462 void __user *end = buffer + size;
3463
3464 while (ptr < end && thread->return_error.cmd == BR_OK) {
3465 int ret;
3466
3467 if (get_user(cmd, (uint32_t __user *)ptr))
3468 return -EFAULT;
3469 ptr += sizeof(uint32_t);
3470 trace_binder_command(cmd);
3471 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3472 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3473 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3474 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3475 }
3476 switch (cmd) {
3477 case BC_INCREFS:
3478 case BC_ACQUIRE:
3479 case BC_RELEASE:
3480 case BC_DECREFS: {
3481 uint32_t target;
3482 const char *debug_string;
3483 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3484 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3485 struct binder_ref_data rdata;
3486
3487 if (get_user(target, (uint32_t __user *)ptr))
3488 return -EFAULT;
3489
3490 ptr += sizeof(uint32_t);
3491 ret = -1;
3492 if (increment && !target) {
3493 struct binder_node *ctx_mgr_node;
3494 mutex_lock(&context->context_mgr_node_lock);
3495 ctx_mgr_node = context->binder_context_mgr_node;
3496 if (ctx_mgr_node)
3497 ret = binder_inc_ref_for_node(
3498 proc, ctx_mgr_node,
3499 strong, NULL, &rdata);
3500 mutex_unlock(&context->context_mgr_node_lock);
3501 }
3502 if (ret)
3503 ret = binder_update_ref_for_handle(
3504 proc, target, increment, strong,
3505 &rdata);
3506 if (!ret && rdata.desc != target) {
3507 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3508 proc->pid, thread->pid,
3509 target, rdata.desc);
3510 }
3511 switch (cmd) {
3512 case BC_INCREFS:
3513 debug_string = "IncRefs";
3514 break;
3515 case BC_ACQUIRE:
3516 debug_string = "Acquire";
3517 break;
3518 case BC_RELEASE:
3519 debug_string = "Release";
3520 break;
3521 case BC_DECREFS:
3522 default:
3523 debug_string = "DecRefs";
3524 break;
3525 }
3526 if (ret) {
3527 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3528 proc->pid, thread->pid, debug_string,
3529 strong, target, ret);
3530 break;
3531 }
3532 binder_debug(BINDER_DEBUG_USER_REFS,
3533 "%d:%d %s ref %d desc %d s %d w %d\n",
3534 proc->pid, thread->pid, debug_string,
3535 rdata.debug_id, rdata.desc, rdata.strong,
3536 rdata.weak);
3537 break;
3538 }
3539 case BC_INCREFS_DONE:
3540 case BC_ACQUIRE_DONE: {
3541 binder_uintptr_t node_ptr;
3542 binder_uintptr_t cookie;
3543 struct binder_node *node;
3544 bool free_node;
3545
3546 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3547 return -EFAULT;
3548 ptr += sizeof(binder_uintptr_t);
3549 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3550 return -EFAULT;
3551 ptr += sizeof(binder_uintptr_t);
3552 node = binder_get_node(proc, node_ptr);
3553 if (node == NULL) {
3554 binder_user_error("%d:%d %s u%016llx no match\n",
3555 proc->pid, thread->pid,
3556 cmd == BC_INCREFS_DONE ?
3557 "BC_INCREFS_DONE" :
3558 "BC_ACQUIRE_DONE",
3559 (u64)node_ptr);
3560 break;
3561 }
3562 if (cookie != node->cookie) {
3563 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3564 proc->pid, thread->pid,
3565 cmd == BC_INCREFS_DONE ?
3566 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3567 (u64)node_ptr, node->debug_id,
3568 (u64)cookie, (u64)node->cookie);
3569 binder_put_node(node);
3570 break;
3571 }
3572 binder_node_inner_lock(node);
3573 if (cmd == BC_ACQUIRE_DONE) {
3574 if (node->pending_strong_ref == 0) {
3575 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3576 proc->pid, thread->pid,
3577 node->debug_id);
3578 binder_node_inner_unlock(node);
3579 binder_put_node(node);
3580 break;
3581 }
3582 node->pending_strong_ref = 0;
3583 } else {
3584 if (node->pending_weak_ref == 0) {
3585 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3586 proc->pid, thread->pid,
3587 node->debug_id);
3588 binder_node_inner_unlock(node);
3589 binder_put_node(node);
3590 break;
3591 }
3592 node->pending_weak_ref = 0;
3593 }
3594 free_node = binder_dec_node_nilocked(node,
3595 cmd == BC_ACQUIRE_DONE, 0);
3596 WARN_ON(free_node);
3597 binder_debug(BINDER_DEBUG_USER_REFS,
3598 "%d:%d %s node %d ls %d lw %d tr %d\n",
3599 proc->pid, thread->pid,
3600 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3601 node->debug_id, node->local_strong_refs,
3602 node->local_weak_refs, node->tmp_refs);
3603 binder_node_inner_unlock(node);
3604 binder_put_node(node);
3605 break;
3606 }
3607 case BC_ATTEMPT_ACQUIRE:
3608 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3609 return -EINVAL;
3610 case BC_ACQUIRE_RESULT:
3611 pr_err("BC_ACQUIRE_RESULT not supported\n");
3612 return -EINVAL;
3613
3614 case BC_FREE_BUFFER: {
3615 binder_uintptr_t data_ptr;
3616 struct binder_buffer *buffer;
3617
3618 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3619 return -EFAULT;
3620 ptr += sizeof(binder_uintptr_t);
3621
3622 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3623 data_ptr);
3624 if (buffer == NULL) {
3625 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3626 proc->pid, thread->pid, (u64)data_ptr);
3627 break;
3628 }
3629 if (!buffer->allow_user_free) {
3630 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3631 proc->pid, thread->pid, (u64)data_ptr);
3632 break;
3633 }
3634 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3635 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3636 proc->pid, thread->pid, (u64)data_ptr,
3637 buffer->debug_id,
3638 buffer->transaction ? "active" : "finished");
3639
3640 if (buffer->transaction) {
3641 buffer->transaction->buffer = NULL;
3642 buffer->transaction = NULL;
3643 }
3644 if (buffer->async_transaction && buffer->target_node) {
3645 struct binder_node *buf_node;
3646 struct binder_work *w;
3647
3648 buf_node = buffer->target_node;
3649 binder_node_inner_lock(buf_node);
3650 BUG_ON(!buf_node->has_async_transaction);
3651 BUG_ON(buf_node->proc != proc);
3652 w = binder_dequeue_work_head_ilocked(
3653 &buf_node->async_todo);
3654 if (!w) {
3655 buf_node->has_async_transaction = 0;
3656 } else {
3657 binder_enqueue_work_ilocked(
3658 w, &proc->todo);
3659 binder_wakeup_proc_ilocked(proc);
3660 }
3661 binder_node_inner_unlock(buf_node);
3662 }
3663 trace_binder_transaction_buffer_release(buffer);
3664 binder_transaction_buffer_release(proc, buffer, NULL);
3665 binder_alloc_free_buf(&proc->alloc, buffer);
3666 break;
3667 }
3668
3669 case BC_TRANSACTION_SG:
3670 case BC_REPLY_SG: {
3671 struct binder_transaction_data_sg tr;
3672
3673 if (copy_from_user(&tr, ptr, sizeof(tr)))
3674 return -EFAULT;
3675 ptr += sizeof(tr);
3676 binder_transaction(proc, thread, &tr.transaction_data,
3677 cmd == BC_REPLY_SG, tr.buffers_size);
3678 break;
3679 }
3680 case BC_TRANSACTION:
3681 case BC_REPLY: {
3682 struct binder_transaction_data tr;
3683
3684 if (copy_from_user(&tr, ptr, sizeof(tr)))
3685 return -EFAULT;
3686 ptr += sizeof(tr);
3687 binder_transaction(proc, thread, &tr,
3688 cmd == BC_REPLY, 0);
3689 break;
3690 }
3691
3692 case BC_REGISTER_LOOPER:
3693 binder_debug(BINDER_DEBUG_THREADS,
3694 "%d:%d BC_REGISTER_LOOPER\n",
3695 proc->pid, thread->pid);
3696 binder_inner_proc_lock(proc);
3697 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3698 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3699 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3700 proc->pid, thread->pid);
3701 } else if (proc->requested_threads == 0) {
3702 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3703 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3704 proc->pid, thread->pid);
3705 } else {
3706 proc->requested_threads--;
3707 proc->requested_threads_started++;
3708 }
3709 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3710 binder_inner_proc_unlock(proc);
3711 break;
3712 case BC_ENTER_LOOPER:
3713 binder_debug(BINDER_DEBUG_THREADS,
3714 "%d:%d BC_ENTER_LOOPER\n",
3715 proc->pid, thread->pid);
3716 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3717 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3718 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3719 proc->pid, thread->pid);
3720 }
3721 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3722 break;
3723 case BC_EXIT_LOOPER:
3724 binder_debug(BINDER_DEBUG_THREADS,
3725 "%d:%d BC_EXIT_LOOPER\n",
3726 proc->pid, thread->pid);
3727 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3728 break;
3729
3730 case BC_REQUEST_DEATH_NOTIFICATION:
3731 case BC_CLEAR_DEATH_NOTIFICATION: {
3732 uint32_t target;
3733 binder_uintptr_t cookie;
3734 struct binder_ref *ref;
3735 struct binder_ref_death *death = NULL;
3736
3737 if (get_user(target, (uint32_t __user *)ptr))
3738 return -EFAULT;
3739 ptr += sizeof(uint32_t);
3740 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3741 return -EFAULT;
3742 ptr += sizeof(binder_uintptr_t);
3743 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3744 /*
3745 * Allocate memory for death notification
3746 * before taking lock
3747 */
3748 death = kzalloc(sizeof(*death), GFP_KERNEL);
3749 if (death == NULL) {
3750 WARN_ON(thread->return_error.cmd !=
3751 BR_OK);
3752 thread->return_error.cmd = BR_ERROR;
3753 binder_enqueue_thread_work(
3754 thread,
3755 &thread->return_error.work);
3756 binder_debug(
3757 BINDER_DEBUG_FAILED_TRANSACTION,
3758 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3759 proc->pid, thread->pid);
3760 break;
3761 }
3762 }
3763 binder_proc_lock(proc);
3764 ref = binder_get_ref_olocked(proc, target, false);
3765 if (ref == NULL) {
3766 binder_user_error("%d:%d %s invalid ref %d\n",
3767 proc->pid, thread->pid,
3768 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3769 "BC_REQUEST_DEATH_NOTIFICATION" :
3770 "BC_CLEAR_DEATH_NOTIFICATION",
3771 target);
3772 binder_proc_unlock(proc);
3773 kfree(death);
3774 break;
3775 }
3776
3777 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3778 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3779 proc->pid, thread->pid,
3780 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3781 "BC_REQUEST_DEATH_NOTIFICATION" :
3782 "BC_CLEAR_DEATH_NOTIFICATION",
3783 (u64)cookie, ref->data.debug_id,
3784 ref->data.desc, ref->data.strong,
3785 ref->data.weak, ref->node->debug_id);
3786
3787 binder_node_lock(ref->node);
3788 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3789 if (ref->death) {
3790 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3791 proc->pid, thread->pid);
3792 binder_node_unlock(ref->node);
3793 binder_proc_unlock(proc);
3794 kfree(death);
3795 break;
3796 }
3797 binder_stats_created(BINDER_STAT_DEATH);
3798 INIT_LIST_HEAD(&death->work.entry);
3799 death->cookie = cookie;
3800 ref->death = death;
3801 if (ref->node->proc == NULL) {
3802 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3803
3804 binder_inner_proc_lock(proc);
3805 binder_enqueue_work_ilocked(
3806 &ref->death->work, &proc->todo);
3807 binder_wakeup_proc_ilocked(proc);
3808 binder_inner_proc_unlock(proc);
3809 }
3810 } else {
3811 if (ref->death == NULL) {
3812 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3813 proc->pid, thread->pid);
3814 binder_node_unlock(ref->node);
3815 binder_proc_unlock(proc);
3816 break;
3817 }
3818 death = ref->death;
3819 if (death->cookie != cookie) {
3820 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3821 proc->pid, thread->pid,
3822 (u64)death->cookie,
3823 (u64)cookie);
3824 binder_node_unlock(ref->node);
3825 binder_proc_unlock(proc);
3826 break;
3827 }
3828 ref->death = NULL;
3829 binder_inner_proc_lock(proc);
3830 if (list_empty(&death->work.entry)) {
3831 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3832 if (thread->looper &
3833 (BINDER_LOOPER_STATE_REGISTERED |
3834 BINDER_LOOPER_STATE_ENTERED))
3835 binder_enqueue_thread_work_ilocked(
3836 thread,
3837 &death->work);
3838 else {
3839 binder_enqueue_work_ilocked(
3840 &death->work,
3841 &proc->todo);
3842 binder_wakeup_proc_ilocked(
3843 proc);
3844 }
3845 } else {
3846 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3847 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3848 }
3849 binder_inner_proc_unlock(proc);
3850 }
3851 binder_node_unlock(ref->node);
3852 binder_proc_unlock(proc);
3853 } break;
3854 case BC_DEAD_BINDER_DONE: {
3855 struct binder_work *w;
3856 binder_uintptr_t cookie;
3857 struct binder_ref_death *death = NULL;
3858
3859 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3860 return -EFAULT;
3861
3862 ptr += sizeof(cookie);
3863 binder_inner_proc_lock(proc);
3864 list_for_each_entry(w, &proc->delivered_death,
3865 entry) {
3866 struct binder_ref_death *tmp_death =
3867 container_of(w,
3868 struct binder_ref_death,
3869 work);
3870
3871 if (tmp_death->cookie == cookie) {
3872 death = tmp_death;
3873 break;
3874 }
3875 }
3876 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3877 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3878 proc->pid, thread->pid, (u64)cookie,
3879 death);
3880 if (death == NULL) {
3881 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3882 proc->pid, thread->pid, (u64)cookie);
3883 binder_inner_proc_unlock(proc);
3884 break;
3885 }
3886 binder_dequeue_work_ilocked(&death->work);
3887 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3888 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3889 if (thread->looper &
3890 (BINDER_LOOPER_STATE_REGISTERED |
3891 BINDER_LOOPER_STATE_ENTERED))
3892 binder_enqueue_thread_work_ilocked(
3893 thread, &death->work);
3894 else {
3895 binder_enqueue_work_ilocked(
3896 &death->work,
3897 &proc->todo);
3898 binder_wakeup_proc_ilocked(proc);
3899 }
3900 }
3901 binder_inner_proc_unlock(proc);
3902 } break;
3903
3904 default:
3905 pr_err("%d:%d unknown command %d\n",
3906 proc->pid, thread->pid, cmd);
3907 return -EINVAL;
3908 }
3909 *consumed = ptr - buffer;
3910 }
3911 return 0;
3912 }
3913
3914 static void binder_stat_br(struct binder_proc *proc,
3915 struct binder_thread *thread, uint32_t cmd)
3916 {
3917 trace_binder_return(cmd);
3918 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3919 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3920 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3921 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3922 }
3923 }
3924
3925 static int binder_put_node_cmd(struct binder_proc *proc,
3926 struct binder_thread *thread,
3927 void __user **ptrp,
3928 binder_uintptr_t node_ptr,
3929 binder_uintptr_t node_cookie,
3930 int node_debug_id,
3931 uint32_t cmd, const char *cmd_name)
3932 {
3933 void __user *ptr = *ptrp;
3934
3935 if (put_user(cmd, (uint32_t __user *)ptr))
3936 return -EFAULT;
3937 ptr += sizeof(uint32_t);
3938
3939 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3940 return -EFAULT;
3941 ptr += sizeof(binder_uintptr_t);
3942
3943 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3944 return -EFAULT;
3945 ptr += sizeof(binder_uintptr_t);
3946
3947 binder_stat_br(proc, thread, cmd);
3948 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3949 proc->pid, thread->pid, cmd_name, node_debug_id,
3950 (u64)node_ptr, (u64)node_cookie);
3951
3952 *ptrp = ptr;
3953 return 0;
3954 }
3955
3956 static int binder_wait_for_work(struct binder_thread *thread,
3957 bool do_proc_work)
3958 {
3959 DEFINE_WAIT(wait);
3960 struct binder_proc *proc = thread->proc;
3961 int ret = 0;
3962
3963 freezer_do_not_count();
3964 binder_inner_proc_lock(proc);
3965 for (;;) {
3966 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3967 if (binder_has_work_ilocked(thread, do_proc_work))
3968 break;
3969 if (do_proc_work)
3970 list_add(&thread->waiting_thread_node,
3971 &proc->waiting_threads);
3972 binder_inner_proc_unlock(proc);
3973 schedule();
3974 binder_inner_proc_lock(proc);
3975 list_del_init(&thread->waiting_thread_node);
3976 if (signal_pending(current)) {
3977 ret = -ERESTARTSYS;
3978 break;
3979 }
3980 }
3981 finish_wait(&thread->wait, &wait);
3982 binder_inner_proc_unlock(proc);
3983 freezer_count();
3984
3985 return ret;
3986 }
3987
3988 static int binder_thread_read(struct binder_proc *proc,
3989 struct binder_thread *thread,
3990 binder_uintptr_t binder_buffer, size_t size,
3991 binder_size_t *consumed, int non_block)
3992 {
3993 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3994 void __user *ptr = buffer + *consumed;
3995 void __user *end = buffer + size;
3996
3997 int ret = 0;
3998 int wait_for_proc_work;
3999
4000 if (*consumed == 0) {
4001 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4002 return -EFAULT;
4003 ptr += sizeof(uint32_t);
4004 }
4005
4006 retry:
4007 binder_inner_proc_lock(proc);
4008 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4009 binder_inner_proc_unlock(proc);
4010
4011 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4012
4013 trace_binder_wait_for_work(wait_for_proc_work,
4014 !!thread->transaction_stack,
4015 !binder_worklist_empty(proc, &thread->todo));
4016 if (wait_for_proc_work) {
4017 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4018 BINDER_LOOPER_STATE_ENTERED))) {
4019 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4020 proc->pid, thread->pid, thread->looper);
4021 wait_event_interruptible(binder_user_error_wait,
4022 binder_stop_on_user_error < 2);
4023 }
4024 binder_restore_priority(current, proc->default_priority);
4025 }
4026
4027 if (non_block) {
4028 if (!binder_has_work(thread, wait_for_proc_work))
4029 ret = -EAGAIN;
4030 } else {
4031 ret = binder_wait_for_work(thread, wait_for_proc_work);
4032 }
4033
4034 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4035
4036 if (ret)
4037 return ret;
4038
4039 while (1) {
4040 uint32_t cmd;
4041 struct binder_transaction_data tr;
4042 struct binder_work *w = NULL;
4043 struct list_head *list = NULL;
4044 struct binder_transaction *t = NULL;
4045 struct binder_thread *t_from;
4046
4047 binder_inner_proc_lock(proc);
4048 if (!binder_worklist_empty_ilocked(&thread->todo))
4049 list = &thread->todo;
4050 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4051 wait_for_proc_work)
4052 list = &proc->todo;
4053 else {
4054 binder_inner_proc_unlock(proc);
4055
4056 /* no data added */
4057 if (ptr - buffer == 4 && !thread->looper_need_return)
4058 goto retry;
4059 break;
4060 }
4061
4062 if (end - ptr < sizeof(tr) + 4) {
4063 binder_inner_proc_unlock(proc);
4064 break;
4065 }
4066 w = binder_dequeue_work_head_ilocked(list);
4067 if (binder_worklist_empty_ilocked(&thread->todo))
4068 thread->process_todo = false;
4069
4070 switch (w->type) {
4071 case BINDER_WORK_TRANSACTION: {
4072 binder_inner_proc_unlock(proc);
4073 t = container_of(w, struct binder_transaction, work);
4074 } break;
4075 case BINDER_WORK_RETURN_ERROR: {
4076 struct binder_error *e = container_of(
4077 w, struct binder_error, work);
4078
4079 WARN_ON(e->cmd == BR_OK);
4080 binder_inner_proc_unlock(proc);
4081 if (put_user(e->cmd, (uint32_t __user *)ptr))
4082 return -EFAULT;
4083 e->cmd = BR_OK;
4084 ptr += sizeof(uint32_t);
4085
4086 binder_stat_br(proc, thread, cmd);
4087 } break;
4088 case BINDER_WORK_TRANSACTION_COMPLETE: {
4089 binder_inner_proc_unlock(proc);
4090 cmd = BR_TRANSACTION_COMPLETE;
4091 if (put_user(cmd, (uint32_t __user *)ptr))
4092 return -EFAULT;
4093 ptr += sizeof(uint32_t);
4094
4095 binder_stat_br(proc, thread, cmd);
4096 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4097 "%d:%d BR_TRANSACTION_COMPLETE\n",
4098 proc->pid, thread->pid);
4099 kfree(w);
4100 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4101 } break;
4102 case BINDER_WORK_NODE: {
4103 struct binder_node *node = container_of(w, struct binder_node, work);
4104 int strong, weak;
4105 binder_uintptr_t node_ptr = node->ptr;
4106 binder_uintptr_t node_cookie = node->cookie;
4107 int node_debug_id = node->debug_id;
4108 int has_weak_ref;
4109 int has_strong_ref;
4110 void __user *orig_ptr = ptr;
4111
4112 BUG_ON(proc != node->proc);
4113 strong = node->internal_strong_refs ||
4114 node->local_strong_refs;
4115 weak = !hlist_empty(&node->refs) ||
4116 node->local_weak_refs ||
4117 node->tmp_refs || strong;
4118 has_strong_ref = node->has_strong_ref;
4119 has_weak_ref = node->has_weak_ref;
4120
4121 if (weak && !has_weak_ref) {
4122 node->has_weak_ref = 1;
4123 node->pending_weak_ref = 1;
4124 node->local_weak_refs++;
4125 }
4126 if (strong && !has_strong_ref) {
4127 node->has_strong_ref = 1;
4128 node->pending_strong_ref = 1;
4129 node->local_strong_refs++;
4130 }
4131 if (!strong && has_strong_ref)
4132 node->has_strong_ref = 0;
4133 if (!weak && has_weak_ref)
4134 node->has_weak_ref = 0;
4135 if (!weak && !strong) {
4136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4137 "%d:%d node %d u%016llx c%016llx deleted\n",
4138 proc->pid, thread->pid,
4139 node_debug_id,
4140 (u64)node_ptr,
4141 (u64)node_cookie);
4142 rb_erase(&node->rb_node, &proc->nodes);
4143 binder_inner_proc_unlock(proc);
4144 binder_node_lock(node);
4145 /*
4146 * Acquire the node lock before freeing the
4147 * node to serialize with other threads that
4148 * may have been holding the node lock while
4149 * decrementing this node (avoids race where
4150 * this thread frees while the other thread
4151 * is unlocking the node after the final
4152 * decrement)
4153 */
4154 binder_node_unlock(node);
4155 binder_free_node(node);
4156 } else
4157 binder_inner_proc_unlock(proc);
4158
4159 if (weak && !has_weak_ref)
4160 ret = binder_put_node_cmd(
4161 proc, thread, &ptr, node_ptr,
4162 node_cookie, node_debug_id,
4163 BR_INCREFS, "BR_INCREFS");
4164 if (!ret && strong && !has_strong_ref)
4165 ret = binder_put_node_cmd(
4166 proc, thread, &ptr, node_ptr,
4167 node_cookie, node_debug_id,
4168 BR_ACQUIRE, "BR_ACQUIRE");
4169 if (!ret && !strong && has_strong_ref)
4170 ret = binder_put_node_cmd(
4171 proc, thread, &ptr, node_ptr,
4172 node_cookie, node_debug_id,
4173 BR_RELEASE, "BR_RELEASE");
4174 if (!ret && !weak && has_weak_ref)
4175 ret = binder_put_node_cmd(
4176 proc, thread, &ptr, node_ptr,
4177 node_cookie, node_debug_id,
4178 BR_DECREFS, "BR_DECREFS");
4179 if (orig_ptr == ptr)
4180 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4181 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4182 proc->pid, thread->pid,
4183 node_debug_id,
4184 (u64)node_ptr,
4185 (u64)node_cookie);
4186 if (ret)
4187 return ret;
4188 } break;
4189 case BINDER_WORK_DEAD_BINDER:
4190 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4191 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4192 struct binder_ref_death *death;
4193 uint32_t cmd;
4194 binder_uintptr_t cookie;
4195
4196 death = container_of(w, struct binder_ref_death, work);
4197 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4198 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4199 else
4200 cmd = BR_DEAD_BINDER;
4201 cookie = death->cookie;
4202
4203 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4204 "%d:%d %s %016llx\n",
4205 proc->pid, thread->pid,
4206 cmd == BR_DEAD_BINDER ?
4207 "BR_DEAD_BINDER" :
4208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4209 (u64)cookie);
4210 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4211 binder_inner_proc_unlock(proc);
4212 kfree(death);
4213 binder_stats_deleted(BINDER_STAT_DEATH);
4214 } else {
4215 binder_enqueue_work_ilocked(
4216 w, &proc->delivered_death);
4217 binder_inner_proc_unlock(proc);
4218 }
4219 if (put_user(cmd, (uint32_t __user *)ptr))
4220 return -EFAULT;
4221 ptr += sizeof(uint32_t);
4222 if (put_user(cookie,
4223 (binder_uintptr_t __user *)ptr))
4224 return -EFAULT;
4225 ptr += sizeof(binder_uintptr_t);
4226 binder_stat_br(proc, thread, cmd);
4227 if (cmd == BR_DEAD_BINDER)
4228 goto done; /* DEAD_BINDER notifications can cause transactions */
4229 } break;
4230 }
4231
4232 if (!t)
4233 continue;
4234
4235 BUG_ON(t->buffer == NULL);
4236 if (t->buffer->target_node) {
4237 struct binder_node *target_node = t->buffer->target_node;
4238 struct binder_priority node_prio;
4239
4240 tr.target.ptr = target_node->ptr;
4241 tr.cookie = target_node->cookie;
4242 node_prio.sched_policy = target_node->sched_policy;
4243 node_prio.prio = target_node->min_priority;
4244 binder_transaction_priority(current, t, node_prio,
4245 target_node->inherit_rt);
4246 cmd = BR_TRANSACTION;
4247 } else {
4248 tr.target.ptr = 0;
4249 tr.cookie = 0;
4250 cmd = BR_REPLY;
4251 }
4252 tr.code = t->code;
4253 tr.flags = t->flags;
4254 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4255
4256 t_from = binder_get_txn_from(t);
4257 if (t_from) {
4258 struct task_struct *sender = t_from->proc->tsk;
4259
4260 tr.sender_pid = task_tgid_nr_ns(sender,
4261 task_active_pid_ns(current));
4262 } else {
4263 tr.sender_pid = 0;
4264 }
4265
4266 tr.data_size = t->buffer->data_size;
4267 tr.offsets_size = t->buffer->offsets_size;
4268 tr.data.ptr.buffer = (binder_uintptr_t)
4269 ((uintptr_t)t->buffer->data +
4270 binder_alloc_get_user_buffer_offset(&proc->alloc));
4271 tr.data.ptr.offsets = tr.data.ptr.buffer +
4272 ALIGN(t->buffer->data_size,
4273 sizeof(void *));
4274
4275 if (put_user(cmd, (uint32_t __user *)ptr)) {
4276 if (t_from)
4277 binder_thread_dec_tmpref(t_from);
4278
4279 binder_cleanup_transaction(t, "put_user failed",
4280 BR_FAILED_REPLY);
4281
4282 return -EFAULT;
4283 }
4284 ptr += sizeof(uint32_t);
4285 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4286 if (t_from)
4287 binder_thread_dec_tmpref(t_from);
4288
4289 binder_cleanup_transaction(t, "copy_to_user failed",
4290 BR_FAILED_REPLY);
4291
4292 return -EFAULT;
4293 }
4294 ptr += sizeof(tr);
4295
4296 trace_binder_transaction_received(t);
4297 binder_stat_br(proc, thread, cmd);
4298 binder_debug(BINDER_DEBUG_TRANSACTION,
4299 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4300 proc->pid, thread->pid,
4301 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4302 "BR_REPLY",
4303 t->debug_id, t_from ? t_from->proc->pid : 0,
4304 t_from ? t_from->pid : 0, cmd,
4305 t->buffer->data_size, t->buffer->offsets_size,
4306 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4307
4308 if (t_from)
4309 binder_thread_dec_tmpref(t_from);
4310 t->buffer->allow_user_free = 1;
4311 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4312 binder_inner_proc_lock(thread->proc);
4313 t->to_parent = thread->transaction_stack;
4314 t->to_thread = thread;
4315 thread->transaction_stack = t;
4316 binder_inner_proc_unlock(thread->proc);
4317 } else {
4318 binder_free_transaction(t);
4319 }
4320 break;
4321 }
4322
4323 done:
4324
4325 *consumed = ptr - buffer;
4326 binder_inner_proc_lock(proc);
4327 if (proc->requested_threads == 0 &&
4328 list_empty(&thread->proc->waiting_threads) &&
4329 proc->requested_threads_started < proc->max_threads &&
4330 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4331 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4332 /*spawn a new thread if we leave this out */) {
4333 proc->requested_threads++;
4334 binder_inner_proc_unlock(proc);
4335 binder_debug(BINDER_DEBUG_THREADS,
4336 "%d:%d BR_SPAWN_LOOPER\n",
4337 proc->pid, thread->pid);
4338 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4339 return -EFAULT;
4340 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4341 } else
4342 binder_inner_proc_unlock(proc);
4343 return 0;
4344 }
4345
4346 static void binder_release_work(struct binder_proc *proc,
4347 struct list_head *list)
4348 {
4349 struct binder_work *w;
4350
4351 while (1) {
4352 w = binder_dequeue_work_head(proc, list);
4353 if (!w)
4354 return;
4355
4356 switch (w->type) {
4357 case BINDER_WORK_TRANSACTION: {
4358 struct binder_transaction *t;
4359
4360 t = container_of(w, struct binder_transaction, work);
4361
4362 binder_cleanup_transaction(t, "process died.",
4363 BR_DEAD_REPLY);
4364 } break;
4365 case BINDER_WORK_RETURN_ERROR: {
4366 struct binder_error *e = container_of(
4367 w, struct binder_error, work);
4368
4369 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4370 "undelivered TRANSACTION_ERROR: %u\n",
4371 e->cmd);
4372 } break;
4373 case BINDER_WORK_TRANSACTION_COMPLETE: {
4374 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4375 "undelivered TRANSACTION_COMPLETE\n");
4376 kfree(w);
4377 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4378 } break;
4379 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4380 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4381 struct binder_ref_death *death;
4382
4383 death = container_of(w, struct binder_ref_death, work);
4384 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4385 "undelivered death notification, %016llx\n",
4386 (u64)death->cookie);
4387 kfree(death);
4388 binder_stats_deleted(BINDER_STAT_DEATH);
4389 } break;
4390 default:
4391 pr_err("unexpected work type, %d, not freed\n",
4392 w->type);
4393 break;
4394 }
4395 }
4396
4397 }
4398
4399 static struct binder_thread *binder_get_thread_ilocked(
4400 struct binder_proc *proc, struct binder_thread *new_thread)
4401 {
4402 struct binder_thread *thread = NULL;
4403 struct rb_node *parent = NULL;
4404 struct rb_node **p = &proc->threads.rb_node;
4405
4406 while (*p) {
4407 parent = *p;
4408 thread = rb_entry(parent, struct binder_thread, rb_node);
4409
4410 if (current->pid < thread->pid)
4411 p = &(*p)->rb_left;
4412 else if (current->pid > thread->pid)
4413 p = &(*p)->rb_right;
4414 else
4415 return thread;
4416 }
4417 if (!new_thread)
4418 return NULL;
4419 thread = new_thread;
4420 binder_stats_created(BINDER_STAT_THREAD);
4421 thread->proc = proc;
4422 thread->pid = current->pid;
4423 get_task_struct(current);
4424 thread->task = current;
4425 atomic_set(&thread->tmp_ref, 0);
4426 init_waitqueue_head(&thread->wait);
4427 INIT_LIST_HEAD(&thread->todo);
4428 rb_link_node(&thread->rb_node, parent, p);
4429 rb_insert_color(&thread->rb_node, &proc->threads);
4430 thread->looper_need_return = true;
4431 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4432 thread->return_error.cmd = BR_OK;
4433 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4434 thread->reply_error.cmd = BR_OK;
4435 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4436 return thread;
4437 }
4438
4439 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4440 {
4441 struct binder_thread *thread;
4442 struct binder_thread *new_thread;
4443
4444 binder_inner_proc_lock(proc);
4445 thread = binder_get_thread_ilocked(proc, NULL);
4446 binder_inner_proc_unlock(proc);
4447 if (!thread) {
4448 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4449 if (new_thread == NULL)
4450 return NULL;
4451 binder_inner_proc_lock(proc);
4452 thread = binder_get_thread_ilocked(proc, new_thread);
4453 binder_inner_proc_unlock(proc);
4454 if (thread != new_thread)
4455 kfree(new_thread);
4456 }
4457 return thread;
4458 }
4459
4460 static void binder_free_proc(struct binder_proc *proc)
4461 {
4462 BUG_ON(!list_empty(&proc->todo));
4463 BUG_ON(!list_empty(&proc->delivered_death));
4464 binder_alloc_deferred_release(&proc->alloc);
4465 put_task_struct(proc->tsk);
4466 binder_stats_deleted(BINDER_STAT_PROC);
4467 kfree(proc);
4468 }
4469
4470 static void binder_free_thread(struct binder_thread *thread)
4471 {
4472 BUG_ON(!list_empty(&thread->todo));
4473 binder_stats_deleted(BINDER_STAT_THREAD);
4474 binder_proc_dec_tmpref(thread->proc);
4475 put_task_struct(thread->task);
4476 kfree(thread);
4477 }
4478
4479 static int binder_thread_release(struct binder_proc *proc,
4480 struct binder_thread *thread)
4481 {
4482 struct binder_transaction *t;
4483 struct binder_transaction *send_reply = NULL;
4484 int active_transactions = 0;
4485 struct binder_transaction *last_t = NULL;
4486
4487 binder_inner_proc_lock(thread->proc);
4488 /*
4489 * take a ref on the proc so it survives
4490 * after we remove this thread from proc->threads.
4491 * The corresponding dec is when we actually
4492 * free the thread in binder_free_thread()
4493 */
4494 proc->tmp_ref++;
4495 /*
4496 * take a ref on this thread to ensure it
4497 * survives while we are releasing it
4498 */
4499 atomic_inc(&thread->tmp_ref);
4500 rb_erase(&thread->rb_node, &proc->threads);
4501 t = thread->transaction_stack;
4502 if (t) {
4503 spin_lock(&t->lock);
4504 if (t->to_thread == thread)
4505 send_reply = t;
4506 }
4507 thread->is_dead = true;
4508
4509 while (t) {
4510 last_t = t;
4511 active_transactions++;
4512 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4513 "release %d:%d transaction %d %s, still active\n",
4514 proc->pid, thread->pid,
4515 t->debug_id,
4516 (t->to_thread == thread) ? "in" : "out");
4517
4518 if (t->to_thread == thread) {
4519 t->to_proc = NULL;
4520 t->to_thread = NULL;
4521 if (t->buffer) {
4522 t->buffer->transaction = NULL;
4523 t->buffer = NULL;
4524 }
4525 t = t->to_parent;
4526 } else if (t->from == thread) {
4527 t->from = NULL;
4528 t = t->from_parent;
4529 } else
4530 BUG();
4531 spin_unlock(&last_t->lock);
4532 if (t)
4533 spin_lock(&t->lock);
4534 }
4535 binder_inner_proc_unlock(thread->proc);
4536
4537 if (send_reply)
4538 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4539 binder_release_work(proc, &thread->todo);
4540 binder_thread_dec_tmpref(thread);
4541 return active_transactions;
4542 }
4543
4544 static unsigned int binder_poll(struct file *filp,
4545 struct poll_table_struct *wait)
4546 {
4547 struct binder_proc *proc = filp->private_data;
4548 struct binder_thread *thread = NULL;
4549 bool wait_for_proc_work;
4550
4551 thread = binder_get_thread(proc);
4552
4553 binder_inner_proc_lock(thread->proc);
4554 thread->looper |= BINDER_LOOPER_STATE_POLL;
4555 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4556
4557 binder_inner_proc_unlock(thread->proc);
4558
4559 poll_wait(filp, &thread->wait, wait);
4560
4561 if (binder_has_work(thread, wait_for_proc_work))
4562 return POLLIN;
4563
4564 return 0;
4565 }
4566
4567 static int binder_ioctl_write_read(struct file *filp,
4568 unsigned int cmd, unsigned long arg,
4569 struct binder_thread *thread)
4570 {
4571 int ret = 0;
4572 struct binder_proc *proc = filp->private_data;
4573 unsigned int size = _IOC_SIZE(cmd);
4574 void __user *ubuf = (void __user *)arg;
4575 struct binder_write_read bwr;
4576
4577 if (size != sizeof(struct binder_write_read)) {
4578 ret = -EINVAL;
4579 goto out;
4580 }
4581 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4582 ret = -EFAULT;
4583 goto out;
4584 }
4585 binder_debug(BINDER_DEBUG_READ_WRITE,
4586 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4587 proc->pid, thread->pid,
4588 (u64)bwr.write_size, (u64)bwr.write_buffer,
4589 (u64)bwr.read_size, (u64)bwr.read_buffer);
4590
4591 if (bwr.write_size > 0) {
4592 ret = binder_thread_write(proc, thread,
4593 bwr.write_buffer,
4594 bwr.write_size,
4595 &bwr.write_consumed);
4596 trace_binder_write_done(ret);
4597 if (ret < 0) {
4598 bwr.read_consumed = 0;
4599 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4600 ret = -EFAULT;
4601 goto out;
4602 }
4603 }
4604 if (bwr.read_size > 0) {
4605 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4606 bwr.read_size,
4607 &bwr.read_consumed,
4608 filp->f_flags & O_NONBLOCK);
4609 trace_binder_read_done(ret);
4610 binder_inner_proc_lock(proc);
4611 if (!binder_worklist_empty_ilocked(&proc->todo))
4612 binder_wakeup_proc_ilocked(proc);
4613 binder_inner_proc_unlock(proc);
4614 if (ret < 0) {
4615 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4616 ret = -EFAULT;
4617 goto out;
4618 }
4619 }
4620 binder_debug(BINDER_DEBUG_READ_WRITE,
4621 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4622 proc->pid, thread->pid,
4623 (u64)bwr.write_consumed, (u64)bwr.write_size,
4624 (u64)bwr.read_consumed, (u64)bwr.read_size);
4625 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4626 ret = -EFAULT;
4627 goto out;
4628 }
4629 out:
4630 return ret;
4631 }
4632
4633 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4634 {
4635 int ret = 0;
4636 struct binder_proc *proc = filp->private_data;
4637 struct binder_context *context = proc->context;
4638 struct binder_node *new_node;
4639 kuid_t curr_euid = current_euid();
4640
4641 mutex_lock(&context->context_mgr_node_lock);
4642 if (context->binder_context_mgr_node) {
4643 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4644 ret = -EBUSY;
4645 goto out;
4646 }
4647 ret = security_binder_set_context_mgr(proc->tsk);
4648 if (ret < 0)
4649 goto out;
4650 if (uid_valid(context->binder_context_mgr_uid)) {
4651 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4652 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4653 from_kuid(&init_user_ns, curr_euid),
4654 from_kuid(&init_user_ns,
4655 context->binder_context_mgr_uid));
4656 ret = -EPERM;
4657 goto out;
4658 }
4659 } else {
4660 context->binder_context_mgr_uid = curr_euid;
4661 }
4662 new_node = binder_new_node(proc, NULL);
4663 if (!new_node) {
4664 ret = -ENOMEM;
4665 goto out;
4666 }
4667 binder_node_lock(new_node);
4668 new_node->local_weak_refs++;
4669 new_node->local_strong_refs++;
4670 new_node->has_strong_ref = 1;
4671 new_node->has_weak_ref = 1;
4672 context->binder_context_mgr_node = new_node;
4673 binder_node_unlock(new_node);
4674 binder_put_node(new_node);
4675 out:
4676 mutex_unlock(&context->context_mgr_node_lock);
4677 return ret;
4678 }
4679
4680 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4681 struct binder_node_debug_info *info) {
4682 struct rb_node *n;
4683 binder_uintptr_t ptr = info->ptr;
4684
4685 memset(info, 0, sizeof(*info));
4686
4687 binder_inner_proc_lock(proc);
4688 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4689 struct binder_node *node = rb_entry(n, struct binder_node,
4690 rb_node);
4691 if (node->ptr > ptr) {
4692 info->ptr = node->ptr;
4693 info->cookie = node->cookie;
4694 info->has_strong_ref = node->has_strong_ref;
4695 info->has_weak_ref = node->has_weak_ref;
4696 break;
4697 }
4698 }
4699 binder_inner_proc_unlock(proc);
4700
4701 return 0;
4702 }
4703
4704 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4705 {
4706 int ret;
4707 struct binder_proc *proc = filp->private_data;
4708 struct binder_thread *thread;
4709 unsigned int size = _IOC_SIZE(cmd);
4710 void __user *ubuf = (void __user *)arg;
4711
4712 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4713 proc->pid, current->pid, cmd, arg);*/
4714
4715 binder_selftest_alloc(&proc->alloc);
4716
4717 trace_binder_ioctl(cmd, arg);
4718
4719 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4720 if (ret)
4721 goto err_unlocked;
4722
4723 thread = binder_get_thread(proc);
4724 if (thread == NULL) {
4725 ret = -ENOMEM;
4726 goto err;
4727 }
4728
4729 switch (cmd) {
4730 case BINDER_WRITE_READ:
4731 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4732 if (ret)
4733 goto err;
4734 break;
4735 case BINDER_SET_MAX_THREADS: {
4736 int max_threads;
4737
4738 if (copy_from_user(&max_threads, ubuf,
4739 sizeof(max_threads))) {
4740 ret = -EINVAL;
4741 goto err;
4742 }
4743 binder_inner_proc_lock(proc);
4744 proc->max_threads = max_threads;
4745 binder_inner_proc_unlock(proc);
4746 break;
4747 }
4748 case BINDER_SET_CONTEXT_MGR:
4749 ret = binder_ioctl_set_ctx_mgr(filp);
4750 if (ret)
4751 goto err;
4752 break;
4753 case BINDER_THREAD_EXIT:
4754 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4755 proc->pid, thread->pid);
4756 binder_thread_release(proc, thread);
4757 thread = NULL;
4758 break;
4759 case BINDER_VERSION: {
4760 struct binder_version __user *ver = ubuf;
4761
4762 if (size != sizeof(struct binder_version)) {
4763 ret = -EINVAL;
4764 goto err;
4765 }
4766 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4767 &ver->protocol_version)) {
4768 ret = -EINVAL;
4769 goto err;
4770 }
4771 break;
4772 }
4773 case BINDER_GET_NODE_DEBUG_INFO: {
4774 struct binder_node_debug_info info;
4775
4776 if (copy_from_user(&info, ubuf, sizeof(info))) {
4777 ret = -EFAULT;
4778 goto err;
4779 }
4780
4781 ret = binder_ioctl_get_node_debug_info(proc, &info);
4782 if (ret < 0)
4783 goto err;
4784
4785 if (copy_to_user(ubuf, &info, sizeof(info))) {
4786 ret = -EFAULT;
4787 goto err;
4788 }
4789 break;
4790 }
4791 default:
4792 ret = -EINVAL;
4793 goto err;
4794 }
4795 ret = 0;
4796 err:
4797 if (thread)
4798 thread->looper_need_return = false;
4799 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4800 if (ret && ret != -ERESTARTSYS)
4801 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4802 err_unlocked:
4803 trace_binder_ioctl_done(ret);
4804 return ret;
4805 }
4806
4807 static void binder_vma_open(struct vm_area_struct *vma)
4808 {
4809 struct binder_proc *proc = vma->vm_private_data;
4810
4811 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4812 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4813 proc->pid, vma->vm_start, vma->vm_end,
4814 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4815 (unsigned long)pgprot_val(vma->vm_page_prot));
4816 }
4817
4818 static void binder_vma_close(struct vm_area_struct *vma)
4819 {
4820 struct binder_proc *proc = vma->vm_private_data;
4821
4822 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4823 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4824 proc->pid, vma->vm_start, vma->vm_end,
4825 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4826 (unsigned long)pgprot_val(vma->vm_page_prot));
4827 binder_alloc_vma_close(&proc->alloc);
4828 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4829 }
4830
4831 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4832 {
4833 return VM_FAULT_SIGBUS;
4834 }
4835
4836 static struct vm_operations_struct binder_vm_ops = {
4837 .open = binder_vma_open,
4838 .close = binder_vma_close,
4839 .fault = binder_vm_fault,
4840 };
4841
4842 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4843 {
4844 int ret;
4845 struct binder_proc *proc = filp->private_data;
4846 const char *failure_string;
4847
4848 if (proc->tsk != current->group_leader)
4849 return -EINVAL;
4850
4851 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4852 vma->vm_end = vma->vm_start + SZ_4M;
4853
4854 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4855 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4856 __func__, proc->pid, vma->vm_start, vma->vm_end,
4857 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4858 (unsigned long)pgprot_val(vma->vm_page_prot));
4859
4860 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4861 ret = -EPERM;
4862 failure_string = "bad vm_flags";
4863 goto err_bad_arg;
4864 }
4865 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4866 vma->vm_ops = &binder_vm_ops;
4867 vma->vm_private_data = proc;
4868
4869 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4870 if (ret)
4871 return ret;
4872 proc->files = get_files_struct(current);
4873 return 0;
4874
4875 err_bad_arg:
4876 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4877 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4878 return ret;
4879 }
4880
4881 static int binder_open(struct inode *nodp, struct file *filp)
4882 {
4883 struct binder_proc *proc;
4884 struct binder_device *binder_dev;
4885
4886 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4887 current->group_leader->pid, current->pid);
4888
4889 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4890 if (proc == NULL)
4891 return -ENOMEM;
4892 spin_lock_init(&proc->inner_lock);
4893 spin_lock_init(&proc->outer_lock);
4894 get_task_struct(current->group_leader);
4895 proc->tsk = current->group_leader;
4896 INIT_LIST_HEAD(&proc->todo);
4897 if (binder_supported_policy(current->policy)) {
4898 proc->default_priority.sched_policy = current->policy;
4899 proc->default_priority.prio = current->normal_prio;
4900 } else {
4901 proc->default_priority.sched_policy = SCHED_NORMAL;
4902 proc->default_priority.prio = NICE_TO_PRIO(0);
4903 }
4904
4905 binder_dev = container_of(filp->private_data, struct binder_device,
4906 miscdev);
4907 proc->context = &binder_dev->context;
4908 binder_alloc_init(&proc->alloc);
4909
4910 binder_stats_created(BINDER_STAT_PROC);
4911 proc->pid = current->group_leader->pid;
4912 INIT_LIST_HEAD(&proc->delivered_death);
4913 INIT_LIST_HEAD(&proc->waiting_threads);
4914 filp->private_data = proc;
4915
4916 mutex_lock(&binder_procs_lock);
4917 hlist_add_head(&proc->proc_node, &binder_procs);
4918 mutex_unlock(&binder_procs_lock);
4919
4920 if (binder_debugfs_dir_entry_proc) {
4921 char strbuf[11];
4922
4923 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4924 /*
4925 * proc debug entries are shared between contexts, so
4926 * this will fail if the process tries to open the driver
4927 * again with a different context. The priting code will
4928 * anyway print all contexts that a given PID has, so this
4929 * is not a problem.
4930 */
4931 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4932 binder_debugfs_dir_entry_proc,
4933 (void *)(unsigned long)proc->pid,
4934 &binder_proc_fops);
4935 }
4936
4937 return 0;
4938 }
4939
4940 static int binder_flush(struct file *filp, fl_owner_t id)
4941 {
4942 struct binder_proc *proc = filp->private_data;
4943
4944 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4945
4946 return 0;
4947 }
4948
4949 static void binder_deferred_flush(struct binder_proc *proc)
4950 {
4951 struct rb_node *n;
4952 int wake_count = 0;
4953
4954 binder_inner_proc_lock(proc);
4955 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4956 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4957
4958 thread->looper_need_return = true;
4959 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4960 wake_up_interruptible(&thread->wait);
4961 wake_count++;
4962 }
4963 }
4964 binder_inner_proc_unlock(proc);
4965
4966 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4967 "binder_flush: %d woke %d threads\n", proc->pid,
4968 wake_count);
4969 }
4970
4971 static int binder_release(struct inode *nodp, struct file *filp)
4972 {
4973 struct binder_proc *proc = filp->private_data;
4974
4975 debugfs_remove(proc->debugfs_entry);
4976 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4977
4978 return 0;
4979 }
4980
4981 static int binder_node_release(struct binder_node *node, int refs)
4982 {
4983 struct binder_ref *ref;
4984 int death = 0;
4985 struct binder_proc *proc = node->proc;
4986
4987 binder_release_work(proc, &node->async_todo);
4988
4989 binder_node_lock(node);
4990 binder_inner_proc_lock(proc);
4991 binder_dequeue_work_ilocked(&node->work);
4992 /*
4993 * The caller must have taken a temporary ref on the node,
4994 */
4995 BUG_ON(!node->tmp_refs);
4996 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4997 binder_inner_proc_unlock(proc);
4998 binder_node_unlock(node);
4999 binder_free_node(node);
5000
5001 return refs;
5002 }
5003
5004 node->proc = NULL;
5005 node->local_strong_refs = 0;
5006 node->local_weak_refs = 0;
5007 binder_inner_proc_unlock(proc);
5008
5009 spin_lock(&binder_dead_nodes_lock);
5010 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5011 spin_unlock(&binder_dead_nodes_lock);
5012
5013 hlist_for_each_entry(ref, &node->refs, node_entry) {
5014 refs++;
5015 /*
5016 * Need the node lock to synchronize
5017 * with new notification requests and the
5018 * inner lock to synchronize with queued
5019 * death notifications.
5020 */
5021 binder_inner_proc_lock(ref->proc);
5022 if (!ref->death) {
5023 binder_inner_proc_unlock(ref->proc);
5024 continue;
5025 }
5026
5027 death++;
5028
5029 BUG_ON(!list_empty(&ref->death->work.entry));
5030 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5031 binder_enqueue_work_ilocked(&ref->death->work,
5032 &ref->proc->todo);
5033 binder_wakeup_proc_ilocked(ref->proc);
5034 binder_inner_proc_unlock(ref->proc);
5035 }
5036
5037 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5038 "node %d now dead, refs %d, death %d\n",
5039 node->debug_id, refs, death);
5040 binder_node_unlock(node);
5041 binder_put_node(node);
5042
5043 return refs;
5044 }
5045
5046 static void binder_deferred_release(struct binder_proc *proc)
5047 {
5048 struct binder_context *context = proc->context;
5049 struct rb_node *n;
5050 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5051
5052 BUG_ON(proc->files);
5053
5054 mutex_lock(&binder_procs_lock);
5055 hlist_del(&proc->proc_node);
5056 mutex_unlock(&binder_procs_lock);
5057
5058 mutex_lock(&context->context_mgr_node_lock);
5059 if (context->binder_context_mgr_node &&
5060 context->binder_context_mgr_node->proc == proc) {
5061 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5062 "%s: %d context_mgr_node gone\n",
5063 __func__, proc->pid);
5064 context->binder_context_mgr_node = NULL;
5065 }
5066 mutex_unlock(&context->context_mgr_node_lock);
5067 binder_inner_proc_lock(proc);
5068 /*
5069 * Make sure proc stays alive after we
5070 * remove all the threads
5071 */
5072 proc->tmp_ref++;
5073
5074 proc->is_dead = true;
5075 threads = 0;
5076 active_transactions = 0;
5077 while ((n = rb_first(&proc->threads))) {
5078 struct binder_thread *thread;
5079
5080 thread = rb_entry(n, struct binder_thread, rb_node);
5081 binder_inner_proc_unlock(proc);
5082 threads++;
5083 active_transactions += binder_thread_release(proc, thread);
5084 binder_inner_proc_lock(proc);
5085 }
5086
5087 nodes = 0;
5088 incoming_refs = 0;
5089 while ((n = rb_first(&proc->nodes))) {
5090 struct binder_node *node;
5091
5092 node = rb_entry(n, struct binder_node, rb_node);
5093 nodes++;
5094 /*
5095 * take a temporary ref on the node before
5096 * calling binder_node_release() which will either
5097 * kfree() the node or call binder_put_node()
5098 */
5099 binder_inc_node_tmpref_ilocked(node);
5100 rb_erase(&node->rb_node, &proc->nodes);
5101 binder_inner_proc_unlock(proc);
5102 incoming_refs = binder_node_release(node, incoming_refs);
5103 binder_inner_proc_lock(proc);
5104 }
5105 binder_inner_proc_unlock(proc);
5106
5107 outgoing_refs = 0;
5108 binder_proc_lock(proc);
5109 while ((n = rb_first(&proc->refs_by_desc))) {
5110 struct binder_ref *ref;
5111
5112 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5113 outgoing_refs++;
5114 binder_cleanup_ref_olocked(ref);
5115 binder_proc_unlock(proc);
5116 binder_free_ref(ref);
5117 binder_proc_lock(proc);
5118 }
5119 binder_proc_unlock(proc);
5120
5121 binder_release_work(proc, &proc->todo);
5122 binder_release_work(proc, &proc->delivered_death);
5123
5124 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5125 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5126 __func__, proc->pid, threads, nodes, incoming_refs,
5127 outgoing_refs, active_transactions);
5128
5129 binder_proc_dec_tmpref(proc);
5130 }
5131
5132 static void binder_deferred_func(struct work_struct *work)
5133 {
5134 struct binder_proc *proc;
5135 struct files_struct *files;
5136
5137 int defer;
5138
5139 do {
5140 mutex_lock(&binder_deferred_lock);
5141 if (!hlist_empty(&binder_deferred_list)) {
5142 proc = hlist_entry(binder_deferred_list.first,
5143 struct binder_proc, deferred_work_node);
5144 hlist_del_init(&proc->deferred_work_node);
5145 defer = proc->deferred_work;
5146 proc->deferred_work = 0;
5147 } else {
5148 proc = NULL;
5149 defer = 0;
5150 }
5151 mutex_unlock(&binder_deferred_lock);
5152
5153 files = NULL;
5154 if (defer & BINDER_DEFERRED_PUT_FILES) {
5155 files = proc->files;
5156 if (files)
5157 proc->files = NULL;
5158 }
5159
5160 if (defer & BINDER_DEFERRED_FLUSH)
5161 binder_deferred_flush(proc);
5162
5163 if (defer & BINDER_DEFERRED_RELEASE)
5164 binder_deferred_release(proc); /* frees proc */
5165
5166 if (files)
5167 put_files_struct(files);
5168 } while (proc);
5169 }
5170 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5171
5172 static void
5173 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5174 {
5175 mutex_lock(&binder_deferred_lock);
5176 proc->deferred_work |= defer;
5177 if (hlist_unhashed(&proc->deferred_work_node)) {
5178 hlist_add_head(&proc->deferred_work_node,
5179 &binder_deferred_list);
5180 queue_work(binder_deferred_workqueue, &binder_deferred_work);
5181 }
5182 mutex_unlock(&binder_deferred_lock);
5183 }
5184
5185 static void print_binder_transaction_ilocked(struct seq_file *m,
5186 struct binder_proc *proc,
5187 const char *prefix,
5188 struct binder_transaction *t)
5189 {
5190 struct binder_proc *to_proc;
5191 struct binder_buffer *buffer = t->buffer;
5192
5193 spin_lock(&t->lock);
5194 to_proc = t->to_proc;
5195 seq_printf(m,
5196 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5197 prefix, t->debug_id, t,
5198 t->from ? t->from->proc->pid : 0,
5199 t->from ? t->from->pid : 0,
5200 to_proc ? to_proc->pid : 0,
5201 t->to_thread ? t->to_thread->pid : 0,
5202 t->code, t->flags, t->priority.sched_policy,
5203 t->priority.prio, t->need_reply);
5204 spin_unlock(&t->lock);
5205
5206 if (proc != to_proc) {
5207 /*
5208 * Can only safely deref buffer if we are holding the
5209 * correct proc inner lock for this node
5210 */
5211 seq_puts(m, "\n");
5212 return;
5213 }
5214
5215 if (buffer == NULL) {
5216 seq_puts(m, " buffer free\n");
5217 return;
5218 }
5219 if (buffer->target_node)
5220 seq_printf(m, " node %d", buffer->target_node->debug_id);
5221 seq_printf(m, " size %zd:%zd data %p\n",
5222 buffer->data_size, buffer->offsets_size,
5223 buffer->data);
5224 }
5225
5226 static void print_binder_work_ilocked(struct seq_file *m,
5227 struct binder_proc *proc,
5228 const char *prefix,
5229 const char *transaction_prefix,
5230 struct binder_work *w)
5231 {
5232 struct binder_node *node;
5233 struct binder_transaction *t;
5234
5235 switch (w->type) {
5236 case BINDER_WORK_TRANSACTION:
5237 t = container_of(w, struct binder_transaction, work);
5238 print_binder_transaction_ilocked(
5239 m, proc, transaction_prefix, t);
5240 break;
5241 case BINDER_WORK_RETURN_ERROR: {
5242 struct binder_error *e = container_of(
5243 w, struct binder_error, work);
5244
5245 seq_printf(m, "%stransaction error: %u\n",
5246 prefix, e->cmd);
5247 } break;
5248 case BINDER_WORK_TRANSACTION_COMPLETE:
5249 seq_printf(m, "%stransaction complete\n", prefix);
5250 break;
5251 case BINDER_WORK_NODE:
5252 node = container_of(w, struct binder_node, work);
5253 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5254 prefix, node->debug_id,
5255 (u64)node->ptr, (u64)node->cookie);
5256 break;
5257 case BINDER_WORK_DEAD_BINDER:
5258 seq_printf(m, "%shas dead binder\n", prefix);
5259 break;
5260 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5261 seq_printf(m, "%shas cleared dead binder\n", prefix);
5262 break;
5263 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5264 seq_printf(m, "%shas cleared death notification\n", prefix);
5265 break;
5266 default:
5267 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5268 break;
5269 }
5270 }
5271
5272 static void print_binder_thread_ilocked(struct seq_file *m,
5273 struct binder_thread *thread,
5274 int print_always)
5275 {
5276 struct binder_transaction *t;
5277 struct binder_work *w;
5278 size_t start_pos = m->count;
5279 size_t header_pos;
5280
5281 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5282 thread->pid, thread->looper,
5283 thread->looper_need_return,
5284 atomic_read(&thread->tmp_ref));
5285 header_pos = m->count;
5286 t = thread->transaction_stack;
5287 while (t) {
5288 if (t->from == thread) {
5289 print_binder_transaction_ilocked(m, thread->proc,
5290 " outgoing transaction", t);
5291 t = t->from_parent;
5292 } else if (t->to_thread == thread) {
5293 print_binder_transaction_ilocked(m, thread->proc,
5294 " incoming transaction", t);
5295 t = t->to_parent;
5296 } else {
5297 print_binder_transaction_ilocked(m, thread->proc,
5298 " bad transaction", t);
5299 t = NULL;
5300 }
5301 }
5302 list_for_each_entry(w, &thread->todo, entry) {
5303 print_binder_work_ilocked(m, thread->proc, " ",
5304 " pending transaction", w);
5305 }
5306 if (!print_always && m->count == header_pos)
5307 m->count = start_pos;
5308 }
5309
5310 static void print_binder_node_nilocked(struct seq_file *m,
5311 struct binder_node *node)
5312 {
5313 struct binder_ref *ref;
5314 struct binder_work *w;
5315 int count;
5316
5317 count = 0;
5318 hlist_for_each_entry(ref, &node->refs, node_entry)
5319 count++;
5320
5321 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5322 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5323 node->sched_policy, node->min_priority,
5324 node->has_strong_ref, node->has_weak_ref,
5325 node->local_strong_refs, node->local_weak_refs,
5326 node->internal_strong_refs, count, node->tmp_refs);
5327 if (count) {
5328 seq_puts(m, " proc");
5329 hlist_for_each_entry(ref, &node->refs, node_entry)
5330 seq_printf(m, " %d", ref->proc->pid);
5331 }
5332 seq_puts(m, "\n");
5333 if (node->proc) {
5334 list_for_each_entry(w, &node->async_todo, entry)
5335 print_binder_work_ilocked(m, node->proc, " ",
5336 " pending async transaction", w);
5337 }
5338 }
5339
5340 static void print_binder_ref_olocked(struct seq_file *m,
5341 struct binder_ref *ref)
5342 {
5343 binder_node_lock(ref->node);
5344 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5345 ref->data.debug_id, ref->data.desc,
5346 ref->node->proc ? "" : "dead ",
5347 ref->node->debug_id, ref->data.strong,
5348 ref->data.weak, ref->death);
5349 binder_node_unlock(ref->node);
5350 }
5351
5352 static void print_binder_proc(struct seq_file *m,
5353 struct binder_proc *proc, int print_all)
5354 {
5355 struct binder_work *w;
5356 struct rb_node *n;
5357 size_t start_pos = m->count;
5358 size_t header_pos;
5359 struct binder_node *last_node = NULL;
5360
5361 seq_printf(m, "proc %d\n", proc->pid);
5362 seq_printf(m, "context %s\n", proc->context->name);
5363 header_pos = m->count;
5364
5365 binder_inner_proc_lock(proc);
5366 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5367 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5368 rb_node), print_all);
5369
5370 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5371 struct binder_node *node = rb_entry(n, struct binder_node,
5372 rb_node);
5373 /*
5374 * take a temporary reference on the node so it
5375 * survives and isn't removed from the tree
5376 * while we print it.
5377 */
5378 binder_inc_node_tmpref_ilocked(node);
5379 /* Need to drop inner lock to take node lock */
5380 binder_inner_proc_unlock(proc);
5381 if (last_node)
5382 binder_put_node(last_node);
5383 binder_node_inner_lock(node);
5384 print_binder_node_nilocked(m, node);
5385 binder_node_inner_unlock(node);
5386 last_node = node;
5387 binder_inner_proc_lock(proc);
5388 }
5389 binder_inner_proc_unlock(proc);
5390 if (last_node)
5391 binder_put_node(last_node);
5392
5393 if (print_all) {
5394 binder_proc_lock(proc);
5395 for (n = rb_first(&proc->refs_by_desc);
5396 n != NULL;
5397 n = rb_next(n))
5398 print_binder_ref_olocked(m, rb_entry(n,
5399 struct binder_ref,
5400 rb_node_desc));
5401 binder_proc_unlock(proc);
5402 }
5403 binder_alloc_print_allocated(m, &proc->alloc);
5404 binder_inner_proc_lock(proc);
5405 list_for_each_entry(w, &proc->todo, entry)
5406 print_binder_work_ilocked(m, proc, " ",
5407 " pending transaction", w);
5408 list_for_each_entry(w, &proc->delivered_death, entry) {
5409 seq_puts(m, " has delivered dead binder\n");
5410 break;
5411 }
5412 binder_inner_proc_unlock(proc);
5413 if (!print_all && m->count == header_pos)
5414 m->count = start_pos;
5415 }
5416
5417 static const char * const binder_return_strings[] = {
5418 "BR_ERROR",
5419 "BR_OK",
5420 "BR_TRANSACTION",
5421 "BR_REPLY",
5422 "BR_ACQUIRE_RESULT",
5423 "BR_DEAD_REPLY",
5424 "BR_TRANSACTION_COMPLETE",
5425 "BR_INCREFS",
5426 "BR_ACQUIRE",
5427 "BR_RELEASE",
5428 "BR_DECREFS",
5429 "BR_ATTEMPT_ACQUIRE",
5430 "BR_NOOP",
5431 "BR_SPAWN_LOOPER",
5432 "BR_FINISHED",
5433 "BR_DEAD_BINDER",
5434 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5435 "BR_FAILED_REPLY"
5436 };
5437
5438 static const char * const binder_command_strings[] = {
5439 "BC_TRANSACTION",
5440 "BC_REPLY",
5441 "BC_ACQUIRE_RESULT",
5442 "BC_FREE_BUFFER",
5443 "BC_INCREFS",
5444 "BC_ACQUIRE",
5445 "BC_RELEASE",
5446 "BC_DECREFS",
5447 "BC_INCREFS_DONE",
5448 "BC_ACQUIRE_DONE",
5449 "BC_ATTEMPT_ACQUIRE",
5450 "BC_REGISTER_LOOPER",
5451 "BC_ENTER_LOOPER",
5452 "BC_EXIT_LOOPER",
5453 "BC_REQUEST_DEATH_NOTIFICATION",
5454 "BC_CLEAR_DEATH_NOTIFICATION",
5455 "BC_DEAD_BINDER_DONE",
5456 "BC_TRANSACTION_SG",
5457 "BC_REPLY_SG",
5458 };
5459
5460 static const char * const binder_objstat_strings[] = {
5461 "proc",
5462 "thread",
5463 "node",
5464 "ref",
5465 "death",
5466 "transaction",
5467 "transaction_complete"
5468 };
5469
5470 static void print_binder_stats(struct seq_file *m, const char *prefix,
5471 struct binder_stats *stats)
5472 {
5473 int i;
5474
5475 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5476 ARRAY_SIZE(binder_command_strings));
5477 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5478 int temp = atomic_read(&stats->bc[i]);
5479
5480 if (temp)
5481 seq_printf(m, "%s%s: %d\n", prefix,
5482 binder_command_strings[i], temp);
5483 }
5484
5485 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5486 ARRAY_SIZE(binder_return_strings));
5487 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5488 int temp = atomic_read(&stats->br[i]);
5489
5490 if (temp)
5491 seq_printf(m, "%s%s: %d\n", prefix,
5492 binder_return_strings[i], temp);
5493 }
5494
5495 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5496 ARRAY_SIZE(binder_objstat_strings));
5497 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5498 ARRAY_SIZE(stats->obj_deleted));
5499 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5500 int created = atomic_read(&stats->obj_created[i]);
5501 int deleted = atomic_read(&stats->obj_deleted[i]);
5502
5503 if (created || deleted)
5504 seq_printf(m, "%s%s: active %d total %d\n",
5505 prefix,
5506 binder_objstat_strings[i],
5507 created - deleted,
5508 created);
5509 }
5510 }
5511
5512 static void print_binder_proc_stats(struct seq_file *m,
5513 struct binder_proc *proc)
5514 {
5515 struct binder_work *w;
5516 struct binder_thread *thread;
5517 struct rb_node *n;
5518 int count, strong, weak, ready_threads;
5519 size_t free_async_space =
5520 binder_alloc_get_free_async_space(&proc->alloc);
5521
5522 seq_printf(m, "proc %d\n", proc->pid);
5523 seq_printf(m, "context %s\n", proc->context->name);
5524 count = 0;
5525 ready_threads = 0;
5526 binder_inner_proc_lock(proc);
5527 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5528 count++;
5529
5530 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5531 ready_threads++;
5532
5533 seq_printf(m, " threads: %d\n", count);
5534 seq_printf(m, " requested threads: %d+%d/%d\n"
5535 " ready threads %d\n"
5536 " free async space %zd\n", proc->requested_threads,
5537 proc->requested_threads_started, proc->max_threads,
5538 ready_threads,
5539 free_async_space);
5540 count = 0;
5541 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5542 count++;
5543 binder_inner_proc_unlock(proc);
5544 seq_printf(m, " nodes: %d\n", count);
5545 count = 0;
5546 strong = 0;
5547 weak = 0;
5548 binder_proc_lock(proc);
5549 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5550 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5551 rb_node_desc);
5552 count++;
5553 strong += ref->data.strong;
5554 weak += ref->data.weak;
5555 }
5556 binder_proc_unlock(proc);
5557 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5558
5559 count = binder_alloc_get_allocated_count(&proc->alloc);
5560 seq_printf(m, " buffers: %d\n", count);
5561
5562 count = 0;
5563 binder_inner_proc_lock(proc);
5564 list_for_each_entry(w, &proc->todo, entry) {
5565 if (w->type == BINDER_WORK_TRANSACTION)
5566 count++;
5567 }
5568 binder_inner_proc_unlock(proc);
5569 seq_printf(m, " pending transactions: %d\n", count);
5570
5571 print_binder_stats(m, " ", &proc->stats);
5572 }
5573
5574
5575 static int binder_state_show(struct seq_file *m, void *unused)
5576 {
5577 struct binder_proc *proc;
5578 struct binder_node *node;
5579 struct binder_node *last_node = NULL;
5580
5581 seq_puts(m, "binder state:\n");
5582
5583 spin_lock(&binder_dead_nodes_lock);
5584 if (!hlist_empty(&binder_dead_nodes))
5585 seq_puts(m, "dead nodes:\n");
5586 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5587 /*
5588 * take a temporary reference on the node so it
5589 * survives and isn't removed from the list
5590 * while we print it.
5591 */
5592 node->tmp_refs++;
5593 spin_unlock(&binder_dead_nodes_lock);
5594 if (last_node)
5595 binder_put_node(last_node);
5596 binder_node_lock(node);
5597 print_binder_node_nilocked(m, node);
5598 binder_node_unlock(node);
5599 last_node = node;
5600 spin_lock(&binder_dead_nodes_lock);
5601 }
5602 spin_unlock(&binder_dead_nodes_lock);
5603 if (last_node)
5604 binder_put_node(last_node);
5605
5606 mutex_lock(&binder_procs_lock);
5607 hlist_for_each_entry(proc, &binder_procs, proc_node)
5608 print_binder_proc(m, proc, 1);
5609 mutex_unlock(&binder_procs_lock);
5610
5611 return 0;
5612 }
5613
5614 static int binder_stats_show(struct seq_file *m, void *unused)
5615 {
5616 struct binder_proc *proc;
5617
5618 seq_puts(m, "binder stats:\n");
5619
5620 print_binder_stats(m, "", &binder_stats);
5621
5622 mutex_lock(&binder_procs_lock);
5623 hlist_for_each_entry(proc, &binder_procs, proc_node)
5624 print_binder_proc_stats(m, proc);
5625 mutex_unlock(&binder_procs_lock);
5626
5627 return 0;
5628 }
5629
5630 static int binder_transactions_show(struct seq_file *m, void *unused)
5631 {
5632 struct binder_proc *proc;
5633
5634 seq_puts(m, "binder transactions:\n");
5635 mutex_lock(&binder_procs_lock);
5636 hlist_for_each_entry(proc, &binder_procs, proc_node)
5637 print_binder_proc(m, proc, 0);
5638 mutex_unlock(&binder_procs_lock);
5639
5640 return 0;
5641 }
5642
5643 static int binder_proc_show(struct seq_file *m, void *unused)
5644 {
5645 struct binder_proc *itr;
5646 int pid = (unsigned long)m->private;
5647
5648 mutex_lock(&binder_procs_lock);
5649 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5650 if (itr->pid == pid) {
5651 seq_puts(m, "binder proc state:\n");
5652 print_binder_proc(m, itr, 1);
5653 }
5654 }
5655 mutex_unlock(&binder_procs_lock);
5656
5657 return 0;
5658 }
5659
5660 static void print_binder_transaction_log_entry(struct seq_file *m,
5661 struct binder_transaction_log_entry *e)
5662 {
5663 int debug_id = READ_ONCE(e->debug_id_done);
5664 /*
5665 * read barrier to guarantee debug_id_done read before
5666 * we print the log values
5667 */
5668 smp_rmb();
5669 seq_printf(m,
5670 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5671 e->debug_id, (e->call_type == 2) ? "reply" :
5672 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5673 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5674 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5675 e->return_error, e->return_error_param,
5676 e->return_error_line);
5677 /*
5678 * read-barrier to guarantee read of debug_id_done after
5679 * done printing the fields of the entry
5680 */
5681 smp_rmb();
5682 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5683 "\n" : " (incomplete)\n");
5684 }
5685
5686 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5687 {
5688 struct binder_transaction_log *log = m->private;
5689 unsigned int log_cur = atomic_read(&log->cur);
5690 unsigned int count;
5691 unsigned int cur;
5692 int i;
5693
5694 count = log_cur + 1;
5695 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5696 0 : count % ARRAY_SIZE(log->entry);
5697 if (count > ARRAY_SIZE(log->entry) || log->full)
5698 count = ARRAY_SIZE(log->entry);
5699 for (i = 0; i < count; i++) {
5700 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5701
5702 print_binder_transaction_log_entry(m, &log->entry[index]);
5703 }
5704 return 0;
5705 }
5706
5707 static const struct file_operations binder_fops = {
5708 .owner = THIS_MODULE,
5709 .poll = binder_poll,
5710 .unlocked_ioctl = binder_ioctl,
5711 .compat_ioctl = binder_ioctl,
5712 .mmap = binder_mmap,
5713 .open = binder_open,
5714 .flush = binder_flush,
5715 .release = binder_release,
5716 };
5717
5718 BINDER_DEBUG_ENTRY(state);
5719 BINDER_DEBUG_ENTRY(stats);
5720 BINDER_DEBUG_ENTRY(transactions);
5721 BINDER_DEBUG_ENTRY(transaction_log);
5722
5723 static int __init init_binder_device(const char *name)
5724 {
5725 int ret;
5726 struct binder_device *binder_device;
5727
5728 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5729 if (!binder_device)
5730 return -ENOMEM;
5731
5732 binder_device->miscdev.fops = &binder_fops;
5733 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5734 binder_device->miscdev.name = name;
5735
5736 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5737 binder_device->context.name = name;
5738 mutex_init(&binder_device->context.context_mgr_node_lock);
5739
5740 ret = misc_register(&binder_device->miscdev);
5741 if (ret < 0) {
5742 kfree(binder_device);
5743 return ret;
5744 }
5745
5746 hlist_add_head(&binder_device->hlist, &binder_devices);
5747
5748 return ret;
5749 }
5750
5751 static int __init binder_init(void)
5752 {
5753 int ret;
5754 char *device_name, *device_names;
5755 struct binder_device *device;
5756 struct hlist_node *tmp;
5757
5758 atomic_set(&binder_transaction_log.cur, ~0U);
5759 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5760 binder_deferred_workqueue = create_singlethread_workqueue("binder");
5761 if (!binder_deferred_workqueue)
5762 return -ENOMEM;
5763
5764 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5765 if (binder_debugfs_dir_entry_root)
5766 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5767 binder_debugfs_dir_entry_root);
5768
5769 if (binder_debugfs_dir_entry_root) {
5770 debugfs_create_file("state",
5771 S_IRUGO,
5772 binder_debugfs_dir_entry_root,
5773 NULL,
5774 &binder_state_fops);
5775 debugfs_create_file("stats",
5776 S_IRUGO,
5777 binder_debugfs_dir_entry_root,
5778 NULL,
5779 &binder_stats_fops);
5780 debugfs_create_file("transactions",
5781 S_IRUGO,
5782 binder_debugfs_dir_entry_root,
5783 NULL,
5784 &binder_transactions_fops);
5785 debugfs_create_file("transaction_log",
5786 S_IRUGO,
5787 binder_debugfs_dir_entry_root,
5788 &binder_transaction_log,
5789 &binder_transaction_log_fops);
5790 debugfs_create_file("failed_transaction_log",
5791 S_IRUGO,
5792 binder_debugfs_dir_entry_root,
5793 &binder_transaction_log_failed,
5794 &binder_transaction_log_fops);
5795 }
5796
5797 /*
5798 * Copy the module_parameter string, because we don't want to
5799 * tokenize it in-place.
5800 */
5801 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5802 if (!device_names) {
5803 ret = -ENOMEM;
5804 goto err_alloc_device_names_failed;
5805 }
5806 strcpy(device_names, binder_devices_param);
5807
5808 while ((device_name = strsep(&device_names, ","))) {
5809 ret = init_binder_device(device_name);
5810 if (ret)
5811 goto err_init_binder_device_failed;
5812 }
5813
5814 return ret;
5815
5816 err_init_binder_device_failed:
5817 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5818 misc_deregister(&device->miscdev);
5819 hlist_del(&device->hlist);
5820 kfree(device);
5821 }
5822 err_alloc_device_names_failed:
5823 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5824
5825 destroy_workqueue(binder_deferred_workqueue);
5826
5827 return ret;
5828 }
5829
5830 device_initcall(binder_init);
5831
5832 #define CREATE_TRACE_POINTS
5833 #include "binder_trace.h"
5834
5835 MODULE_LICENSE("GPL v2");