ANDROID: binder: remove WARN() for redundant txn error
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / android / binder.c
1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
82
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
85
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
89
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
92
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
96
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
99 { \
100 return single_open(file, binder_##name##_show, inode->i_private); \
101 } \
102 \
103 static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
106 .read = seq_read, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
109 }
110
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
113
114 /* This is only defined in include/asm-arm/sizes.h */
115 #ifndef SZ_1K
116 #define SZ_1K 0x400
117 #endif
118
119 #ifndef SZ_4M
120 #define SZ_4M 0x400000
121 #endif
122
123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
124
125 enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
141 };
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
145
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
148
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
151
152 static int binder_set_stop_on_user_error(const char *val,
153 struct kernel_param *kp)
154 {
155 int ret;
156
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161 }
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
164
165 #define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
168 pr_info(x); \
169 } while (0)
170
171 #define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
174 pr_info(x); \
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
179 #define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
184 #define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
187 #define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
190 enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199 };
200
201 struct binder_stats {
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
206 };
207
208 static struct binder_stats binder_stats;
209
210 static inline void binder_stats_deleted(enum binder_stat_types type)
211 {
212 atomic_inc(&binder_stats.obj_deleted[type]);
213 }
214
215 static inline void binder_stats_created(enum binder_stat_types type)
216 {
217 atomic_inc(&binder_stats.obj_created[type]);
218 }
219
220 struct binder_transaction_log_entry {
221 int debug_id;
222 int debug_id_done;
223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
235 const char *context_name;
236 };
237 struct binder_transaction_log {
238 atomic_t cur;
239 bool full;
240 struct binder_transaction_log_entry entry[32];
241 };
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
244
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247 {
248 struct binder_transaction_log_entry *e;
249 unsigned int cur = atomic_inc_return(&log->cur);
250
251 if (cur >= ARRAY_SIZE(log->entry))
252 log->full = 1;
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255 /*
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
259 */
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
262 return e;
263 }
264
265 struct binder_context {
266 struct binder_node *binder_context_mgr_node;
267 struct mutex context_mgr_node_lock;
268
269 kuid_t binder_context_mgr_uid;
270 const char *name;
271 };
272
273 struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
277 };
278
279 /**
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
283 *
284 * There are separate work lists for proc, thread, and node (async).
285 */
286 struct binder_work {
287 struct list_head entry;
288
289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
292 BINDER_WORK_RETURN_ERROR,
293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298 };
299
300 struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303 };
304
305 /**
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
311 * (protected by @proc->inner_lock)
312 * @rb_node: element for proc->nodes tree
313 * (protected by @proc->inner_lock)
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
319 * (protected by @lock)
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
324 * @local_weak_refs: weak user refs from local process
325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
327 * @local_strong_refs: strong user refs from local process
328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
330 * @tmp_refs: temporary kernel refs
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
343 * @pending_strong_ref: userspace has acked notification of strong ref
344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
346 * @has_weak_ref: userspace notified of weak ref
347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
349 * @pending_weak_ref: userspace has acked notification of weak ref
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
352 * @has_async_transaction: async transaction to node in progress
353 * (protected by @lock)
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
359 * (protected by @proc->inner_lock)
360 *
361 * Bookkeeping structure for binder nodes.
362 */
363 struct binder_node {
364 int debug_id;
365 spinlock_t lock;
366 struct binder_work work;
367 union {
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
370 };
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
374 int local_weak_refs;
375 int local_strong_refs;
376 int tmp_refs;
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
379 struct {
380 /*
381 * bitfield elements protected by
382 * proc inner_lock
383 */
384 u8 has_strong_ref:1;
385 u8 pending_strong_ref:1;
386 u8 has_weak_ref:1;
387 u8 pending_weak_ref:1;
388 };
389 struct {
390 /*
391 * invariant after initialization
392 */
393 u8 accept_fds:1;
394 u8 min_priority;
395 };
396 bool has_async_transaction;
397 struct list_head async_todo;
398 };
399
400 struct binder_ref_death {
401 /**
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
405 */
406 struct binder_work work;
407 binder_uintptr_t cookie;
408 };
409
410 /**
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
416 *
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
421 */
422 struct binder_ref_data {
423 int debug_id;
424 uint32_t desc;
425 int strong;
426 int weak;
427 };
428
429 /**
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
435 * (protected by @node->lock)
436 * @proc: binder_proc containing ref
437 * @node: binder_node of target node. When cleaning up a
438 * ref for deletion in binder_cleanup_ref, a non-NULL
439 * @node indicates the node must be freed
440 * @death: pointer to death notification (ref_death) if requested
441 * (protected by @node->lock)
442 *
443 * Structure to track references from procA to target node (on procB). This
444 * structure is unsafe to access without holding @proc->outer_lock.
445 */
446 struct binder_ref {
447 /* Lookups needed: */
448 /* node + proc => ref (transaction) */
449 /* desc + proc => ref (transaction, inc/dec ref) */
450 /* node => refs + procs (proc exit) */
451 struct binder_ref_data data;
452 struct rb_node rb_node_desc;
453 struct rb_node rb_node_node;
454 struct hlist_node node_entry;
455 struct binder_proc *proc;
456 struct binder_node *node;
457 struct binder_ref_death *death;
458 };
459
460 enum binder_deferred_state {
461 BINDER_DEFERRED_PUT_FILES = 0x01,
462 BINDER_DEFERRED_FLUSH = 0x02,
463 BINDER_DEFERRED_RELEASE = 0x04,
464 };
465
466 /**
467 * struct binder_proc - binder process bookkeeping
468 * @proc_node: element for binder_procs list
469 * @threads: rbtree of binder_threads in this proc
470 * (protected by @inner_lock)
471 * @nodes: rbtree of binder nodes associated with
472 * this proc ordered by node->ptr
473 * (protected by @inner_lock)
474 * @refs_by_desc: rbtree of refs ordered by ref->desc
475 * (protected by @outer_lock)
476 * @refs_by_node: rbtree of refs ordered by ref->node
477 * (protected by @outer_lock)
478 * @waiting_threads: threads currently waiting for proc work
479 * (protected by @inner_lock)
480 * @pid PID of group_leader of process
481 * (invariant after initialized)
482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized)
484 * @files files_struct for process
485 * (protected by @files_lock)
486 * @files_lock mutex to protect @files
487 * @deferred_work_node: element for binder_deferred_list
488 * (protected by binder_deferred_lock)
489 * @deferred_work: bitmap of deferred work to perform
490 * (protected by binder_deferred_lock)
491 * @is_dead: process is dead and awaiting free
492 * when outstanding transactions are cleaned up
493 * (protected by @inner_lock)
494 * @todo: list of work for this process
495 * (protected by @inner_lock)
496 * @wait: wait queue head to wait for proc work
497 * (invariant after initialized)
498 * @stats: per-process binder statistics
499 * (atomics, no lock needed)
500 * @delivered_death: list of delivered death notification
501 * (protected by @inner_lock)
502 * @max_threads: cap on number of binder threads
503 * (protected by @inner_lock)
504 * @requested_threads: number of binder threads requested but not
505 * yet started. In current implementation, can
506 * only be 0 or 1.
507 * (protected by @inner_lock)
508 * @requested_threads_started: number binder threads started
509 * (protected by @inner_lock)
510 * @tmp_ref: temporary reference to indicate proc is in use
511 * (protected by @inner_lock)
512 * @default_priority: default scheduler priority
513 * (invariant after initialized)
514 * @debugfs_entry: debugfs node
515 * @alloc: binder allocator bookkeeping
516 * @context: binder_context for this proc
517 * (invariant after initialized)
518 * @inner_lock: can nest under outer_lock and/or node lock
519 * @outer_lock: no nesting under innor or node lock
520 * Lock order: 1) outer, 2) node, 3) inner
521 *
522 * Bookkeeping structure for binder processes
523 */
524 struct binder_proc {
525 struct hlist_node proc_node;
526 struct rb_root threads;
527 struct rb_root nodes;
528 struct rb_root refs_by_desc;
529 struct rb_root refs_by_node;
530 struct list_head waiting_threads;
531 int pid;
532 struct task_struct *tsk;
533 struct files_struct *files;
534 struct mutex files_lock;
535 struct hlist_node deferred_work_node;
536 int deferred_work;
537 bool is_dead;
538
539 struct list_head todo;
540 wait_queue_head_t wait;
541 struct binder_stats stats;
542 struct list_head delivered_death;
543 int max_threads;
544 int requested_threads;
545 int requested_threads_started;
546 int tmp_ref;
547 long default_priority;
548 struct dentry *debugfs_entry;
549 struct binder_alloc alloc;
550 struct binder_context *context;
551 spinlock_t inner_lock;
552 spinlock_t outer_lock;
553 };
554
555 enum {
556 BINDER_LOOPER_STATE_REGISTERED = 0x01,
557 BINDER_LOOPER_STATE_ENTERED = 0x02,
558 BINDER_LOOPER_STATE_EXITED = 0x04,
559 BINDER_LOOPER_STATE_INVALID = 0x08,
560 BINDER_LOOPER_STATE_WAITING = 0x10,
561 BINDER_LOOPER_STATE_POLL = 0x20,
562 };
563
564 /**
565 * struct binder_thread - binder thread bookkeeping
566 * @proc: binder process for this thread
567 * (invariant after initialization)
568 * @rb_node: element for proc->threads rbtree
569 * (protected by @proc->inner_lock)
570 * @waiting_thread_node: element for @proc->waiting_threads list
571 * (protected by @proc->inner_lock)
572 * @pid: PID for this thread
573 * (invariant after initialization)
574 * @looper: bitmap of looping state
575 * (only accessed by this thread)
576 * @looper_needs_return: looping thread needs to exit driver
577 * (no lock needed)
578 * @transaction_stack: stack of in-progress transactions for this thread
579 * (protected by @proc->inner_lock)
580 * @todo: list of work to do for this thread
581 * (protected by @proc->inner_lock)
582 * @return_error: transaction errors reported by this thread
583 * (only accessed by this thread)
584 * @reply_error: transaction errors reported by target thread
585 * (protected by @proc->inner_lock)
586 * @wait: wait queue for thread work
587 * @stats: per-thread statistics
588 * (atomics, no lock needed)
589 * @tmp_ref: temporary reference to indicate thread is in use
590 * (atomic since @proc->inner_lock cannot
591 * always be acquired)
592 * @is_dead: thread is dead and awaiting free
593 * when outstanding transactions are cleaned up
594 * (protected by @proc->inner_lock)
595 *
596 * Bookkeeping structure for binder threads.
597 */
598 struct binder_thread {
599 struct binder_proc *proc;
600 struct rb_node rb_node;
601 struct list_head waiting_thread_node;
602 int pid;
603 int looper; /* only modified by this thread */
604 bool looper_need_return; /* can be written by other thread */
605 struct binder_transaction *transaction_stack;
606 struct list_head todo;
607 struct binder_error return_error;
608 struct binder_error reply_error;
609 wait_queue_head_t wait;
610 struct binder_stats stats;
611 atomic_t tmp_ref;
612 bool is_dead;
613 };
614
615 struct binder_transaction {
616 int debug_id;
617 struct binder_work work;
618 struct binder_thread *from;
619 struct binder_transaction *from_parent;
620 struct binder_proc *to_proc;
621 struct binder_thread *to_thread;
622 struct binder_transaction *to_parent;
623 unsigned need_reply:1;
624 /* unsigned is_dead:1; */ /* not used at the moment */
625
626 struct binder_buffer *buffer;
627 unsigned int code;
628 unsigned int flags;
629 long priority;
630 long saved_priority;
631 kuid_t sender_euid;
632 /**
633 * @lock: protects @from, @to_proc, and @to_thread
634 *
635 * @from, @to_proc, and @to_thread can be set to NULL
636 * during thread teardown
637 */
638 spinlock_t lock;
639 };
640
641 /**
642 * binder_proc_lock() - Acquire outer lock for given binder_proc
643 * @proc: struct binder_proc to acquire
644 *
645 * Acquires proc->outer_lock. Used to protect binder_ref
646 * structures associated with the given proc.
647 */
648 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
649 static void
650 _binder_proc_lock(struct binder_proc *proc, int line)
651 {
652 binder_debug(BINDER_DEBUG_SPINLOCKS,
653 "%s: line=%d\n", __func__, line);
654 spin_lock(&proc->outer_lock);
655 }
656
657 /**
658 * binder_proc_unlock() - Release spinlock for given binder_proc
659 * @proc: struct binder_proc to acquire
660 *
661 * Release lock acquired via binder_proc_lock()
662 */
663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
664 static void
665 _binder_proc_unlock(struct binder_proc *proc, int line)
666 {
667 binder_debug(BINDER_DEBUG_SPINLOCKS,
668 "%s: line=%d\n", __func__, line);
669 spin_unlock(&proc->outer_lock);
670 }
671
672 /**
673 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
674 * @proc: struct binder_proc to acquire
675 *
676 * Acquires proc->inner_lock. Used to protect todo lists
677 */
678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
679 static void
680 _binder_inner_proc_lock(struct binder_proc *proc, int line)
681 {
682 binder_debug(BINDER_DEBUG_SPINLOCKS,
683 "%s: line=%d\n", __func__, line);
684 spin_lock(&proc->inner_lock);
685 }
686
687 /**
688 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
689 * @proc: struct binder_proc to acquire
690 *
691 * Release lock acquired via binder_inner_proc_lock()
692 */
693 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
694 static void
695 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
696 {
697 binder_debug(BINDER_DEBUG_SPINLOCKS,
698 "%s: line=%d\n", __func__, line);
699 spin_unlock(&proc->inner_lock);
700 }
701
702 /**
703 * binder_node_lock() - Acquire spinlock for given binder_node
704 * @node: struct binder_node to acquire
705 *
706 * Acquires node->lock. Used to protect binder_node fields
707 */
708 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
709 static void
710 _binder_node_lock(struct binder_node *node, int line)
711 {
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_lock(&node->lock);
715 }
716
717 /**
718 * binder_node_unlock() - Release spinlock for given binder_proc
719 * @node: struct binder_node to acquire
720 *
721 * Release lock acquired via binder_node_lock()
722 */
723 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
724 static void
725 _binder_node_unlock(struct binder_node *node, int line)
726 {
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_unlock(&node->lock);
730 }
731
732 /**
733 * binder_node_inner_lock() - Acquire node and inner locks
734 * @node: struct binder_node to acquire
735 *
736 * Acquires node->lock. If node->proc also acquires
737 * proc->inner_lock. Used to protect binder_node fields
738 */
739 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
740 static void
741 _binder_node_inner_lock(struct binder_node *node, int line)
742 {
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_lock(&node->lock);
746 if (node->proc)
747 binder_inner_proc_lock(node->proc);
748 }
749
750 /**
751 * binder_node_unlock() - Release node and inner locks
752 * @node: struct binder_node to acquire
753 *
754 * Release lock acquired via binder_node_lock()
755 */
756 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
757 static void
758 _binder_node_inner_unlock(struct binder_node *node, int line)
759 {
760 struct binder_proc *proc = node->proc;
761
762 binder_debug(BINDER_DEBUG_SPINLOCKS,
763 "%s: line=%d\n", __func__, line);
764 if (proc)
765 binder_inner_proc_unlock(proc);
766 spin_unlock(&node->lock);
767 }
768
769 static bool binder_worklist_empty_ilocked(struct list_head *list)
770 {
771 return list_empty(list);
772 }
773
774 /**
775 * binder_worklist_empty() - Check if no items on the work list
776 * @proc: binder_proc associated with list
777 * @list: list to check
778 *
779 * Return: true if there are no items on list, else false
780 */
781 static bool binder_worklist_empty(struct binder_proc *proc,
782 struct list_head *list)
783 {
784 bool ret;
785
786 binder_inner_proc_lock(proc);
787 ret = binder_worklist_empty_ilocked(list);
788 binder_inner_proc_unlock(proc);
789 return ret;
790 }
791
792 static void
793 binder_enqueue_work_ilocked(struct binder_work *work,
794 struct list_head *target_list)
795 {
796 BUG_ON(target_list == NULL);
797 BUG_ON(work->entry.next && !list_empty(&work->entry));
798 list_add_tail(&work->entry, target_list);
799 }
800
801 /**
802 * binder_enqueue_work() - Add an item to the work list
803 * @proc: binder_proc associated with list
804 * @work: struct binder_work to add to list
805 * @target_list: list to add work to
806 *
807 * Adds the work to the specified list. Asserts that work
808 * is not already on a list.
809 */
810 static void
811 binder_enqueue_work(struct binder_proc *proc,
812 struct binder_work *work,
813 struct list_head *target_list)
814 {
815 binder_inner_proc_lock(proc);
816 binder_enqueue_work_ilocked(work, target_list);
817 binder_inner_proc_unlock(proc);
818 }
819
820 static void
821 binder_dequeue_work_ilocked(struct binder_work *work)
822 {
823 list_del_init(&work->entry);
824 }
825
826 /**
827 * binder_dequeue_work() - Removes an item from the work list
828 * @proc: binder_proc associated with list
829 * @work: struct binder_work to remove from list
830 *
831 * Removes the specified work item from whatever list it is on.
832 * Can safely be called if work is not on any list.
833 */
834 static void
835 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
836 {
837 binder_inner_proc_lock(proc);
838 binder_dequeue_work_ilocked(work);
839 binder_inner_proc_unlock(proc);
840 }
841
842 static struct binder_work *binder_dequeue_work_head_ilocked(
843 struct list_head *list)
844 {
845 struct binder_work *w;
846
847 w = list_first_entry_or_null(list, struct binder_work, entry);
848 if (w)
849 list_del_init(&w->entry);
850 return w;
851 }
852
853 /**
854 * binder_dequeue_work_head() - Dequeues the item at head of list
855 * @proc: binder_proc associated with list
856 * @list: list to dequeue head
857 *
858 * Removes the head of the list if there are items on the list
859 *
860 * Return: pointer dequeued binder_work, NULL if list was empty
861 */
862 static struct binder_work *binder_dequeue_work_head(
863 struct binder_proc *proc,
864 struct list_head *list)
865 {
866 struct binder_work *w;
867
868 binder_inner_proc_lock(proc);
869 w = binder_dequeue_work_head_ilocked(list);
870 binder_inner_proc_unlock(proc);
871 return w;
872 }
873
874 static void
875 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
876 static void binder_free_thread(struct binder_thread *thread);
877 static void binder_free_proc(struct binder_proc *proc);
878 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
879
880 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
881 {
882 unsigned long rlim_cur;
883 unsigned long irqs;
884 int ret;
885
886 mutex_lock(&proc->files_lock);
887 if (proc->files == NULL) {
888 ret = -ESRCH;
889 goto err;
890 }
891 if (!lock_task_sighand(proc->tsk, &irqs)) {
892 ret = -EMFILE;
893 goto err;
894 }
895 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
896 unlock_task_sighand(proc->tsk, &irqs);
897
898 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
899 err:
900 mutex_unlock(&proc->files_lock);
901 return ret;
902 }
903
904 /*
905 * copied from fd_install
906 */
907 static void task_fd_install(
908 struct binder_proc *proc, unsigned int fd, struct file *file)
909 {
910 mutex_lock(&proc->files_lock);
911 if (proc->files)
912 __fd_install(proc->files, fd, file);
913 mutex_unlock(&proc->files_lock);
914 }
915
916 /*
917 * copied from sys_close
918 */
919 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
920 {
921 int retval;
922
923 mutex_lock(&proc->files_lock);
924 if (proc->files == NULL) {
925 retval = -ESRCH;
926 goto err;
927 }
928 retval = __close_fd(proc->files, fd);
929 /* can't restart close syscall because file table entry was cleared */
930 if (unlikely(retval == -ERESTARTSYS ||
931 retval == -ERESTARTNOINTR ||
932 retval == -ERESTARTNOHAND ||
933 retval == -ERESTART_RESTARTBLOCK))
934 retval = -EINTR;
935 err:
936 mutex_unlock(&proc->files_lock);
937 return retval;
938 }
939
940 static bool binder_has_work_ilocked(struct binder_thread *thread,
941 bool do_proc_work)
942 {
943 return !binder_worklist_empty_ilocked(&thread->todo) ||
944 thread->looper_need_return ||
945 (do_proc_work &&
946 !binder_worklist_empty_ilocked(&thread->proc->todo));
947 }
948
949 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
950 {
951 bool has_work;
952
953 binder_inner_proc_lock(thread->proc);
954 has_work = binder_has_work_ilocked(thread, do_proc_work);
955 binder_inner_proc_unlock(thread->proc);
956
957 return has_work;
958 }
959
960 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
961 {
962 return !thread->transaction_stack &&
963 binder_worklist_empty_ilocked(&thread->todo) &&
964 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
965 BINDER_LOOPER_STATE_REGISTERED));
966 }
967
968 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
969 bool sync)
970 {
971 struct rb_node *n;
972 struct binder_thread *thread;
973
974 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
975 thread = rb_entry(n, struct binder_thread, rb_node);
976 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
977 binder_available_for_proc_work_ilocked(thread)) {
978 if (sync)
979 wake_up_interruptible_sync(&thread->wait);
980 else
981 wake_up_interruptible(&thread->wait);
982 }
983 }
984 }
985
986 /**
987 * binder_select_thread_ilocked() - selects a thread for doing proc work.
988 * @proc: process to select a thread from
989 *
990 * Note that calling this function moves the thread off the waiting_threads
991 * list, so it can only be woken up by the caller of this function, or a
992 * signal. Therefore, callers *should* always wake up the thread this function
993 * returns.
994 *
995 * Return: If there's a thread currently waiting for process work,
996 * returns that thread. Otherwise returns NULL.
997 */
998 static struct binder_thread *
999 binder_select_thread_ilocked(struct binder_proc *proc)
1000 {
1001 struct binder_thread *thread;
1002
1003 assert_spin_locked(&proc->inner_lock);
1004 thread = list_first_entry_or_null(&proc->waiting_threads,
1005 struct binder_thread,
1006 waiting_thread_node);
1007
1008 if (thread)
1009 list_del_init(&thread->waiting_thread_node);
1010
1011 return thread;
1012 }
1013
1014 /**
1015 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1016 * @proc: process to wake up a thread in
1017 * @thread: specific thread to wake-up (may be NULL)
1018 * @sync: whether to do a synchronous wake-up
1019 *
1020 * This function wakes up a thread in the @proc process.
1021 * The caller may provide a specific thread to wake-up in
1022 * the @thread parameter. If @thread is NULL, this function
1023 * will wake up threads that have called poll().
1024 *
1025 * Note that for this function to work as expected, callers
1026 * should first call binder_select_thread() to find a thread
1027 * to handle the work (if they don't have a thread already),
1028 * and pass the result into the @thread parameter.
1029 */
1030 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1031 struct binder_thread *thread,
1032 bool sync)
1033 {
1034 assert_spin_locked(&proc->inner_lock);
1035
1036 if (thread) {
1037 if (sync)
1038 wake_up_interruptible_sync(&thread->wait);
1039 else
1040 wake_up_interruptible(&thread->wait);
1041 return;
1042 }
1043
1044 /* Didn't find a thread waiting for proc work; this can happen
1045 * in two scenarios:
1046 * 1. All threads are busy handling transactions
1047 * In that case, one of those threads should call back into
1048 * the kernel driver soon and pick up this work.
1049 * 2. Threads are using the (e)poll interface, in which case
1050 * they may be blocked on the waitqueue without having been
1051 * added to waiting_threads. For this case, we just iterate
1052 * over all threads not handling transaction work, and
1053 * wake them all up. We wake all because we don't know whether
1054 * a thread that called into (e)poll is handling non-binder
1055 * work currently.
1056 */
1057 binder_wakeup_poll_threads_ilocked(proc, sync);
1058 }
1059
1060 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1061 {
1062 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1063
1064 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1065 }
1066
1067 static void binder_set_nice(long nice)
1068 {
1069 long min_nice;
1070
1071 if (can_nice(current, nice)) {
1072 set_user_nice(current, nice);
1073 return;
1074 }
1075 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1076 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1077 "%d: nice value %ld not allowed use %ld instead\n",
1078 current->pid, nice, min_nice);
1079 set_user_nice(current, min_nice);
1080 if (min_nice <= MAX_NICE)
1081 return;
1082 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1083 }
1084
1085 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1086 binder_uintptr_t ptr)
1087 {
1088 struct rb_node *n = proc->nodes.rb_node;
1089 struct binder_node *node;
1090
1091 assert_spin_locked(&proc->inner_lock);
1092
1093 while (n) {
1094 node = rb_entry(n, struct binder_node, rb_node);
1095
1096 if (ptr < node->ptr)
1097 n = n->rb_left;
1098 else if (ptr > node->ptr)
1099 n = n->rb_right;
1100 else {
1101 /*
1102 * take an implicit weak reference
1103 * to ensure node stays alive until
1104 * call to binder_put_node()
1105 */
1106 binder_inc_node_tmpref_ilocked(node);
1107 return node;
1108 }
1109 }
1110 return NULL;
1111 }
1112
1113 static struct binder_node *binder_get_node(struct binder_proc *proc,
1114 binder_uintptr_t ptr)
1115 {
1116 struct binder_node *node;
1117
1118 binder_inner_proc_lock(proc);
1119 node = binder_get_node_ilocked(proc, ptr);
1120 binder_inner_proc_unlock(proc);
1121 return node;
1122 }
1123
1124 static struct binder_node *binder_init_node_ilocked(
1125 struct binder_proc *proc,
1126 struct binder_node *new_node,
1127 struct flat_binder_object *fp)
1128 {
1129 struct rb_node **p = &proc->nodes.rb_node;
1130 struct rb_node *parent = NULL;
1131 struct binder_node *node;
1132 binder_uintptr_t ptr = fp ? fp->binder : 0;
1133 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1134 __u32 flags = fp ? fp->flags : 0;
1135
1136 assert_spin_locked(&proc->inner_lock);
1137
1138 while (*p) {
1139
1140 parent = *p;
1141 node = rb_entry(parent, struct binder_node, rb_node);
1142
1143 if (ptr < node->ptr)
1144 p = &(*p)->rb_left;
1145 else if (ptr > node->ptr)
1146 p = &(*p)->rb_right;
1147 else {
1148 /*
1149 * A matching node is already in
1150 * the rb tree. Abandon the init
1151 * and return it.
1152 */
1153 binder_inc_node_tmpref_ilocked(node);
1154 return node;
1155 }
1156 }
1157 node = new_node;
1158 binder_stats_created(BINDER_STAT_NODE);
1159 node->tmp_refs++;
1160 rb_link_node(&node->rb_node, parent, p);
1161 rb_insert_color(&node->rb_node, &proc->nodes);
1162 node->debug_id = atomic_inc_return(&binder_last_id);
1163 node->proc = proc;
1164 node->ptr = ptr;
1165 node->cookie = cookie;
1166 node->work.type = BINDER_WORK_NODE;
1167 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1168 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1169 spin_lock_init(&node->lock);
1170 INIT_LIST_HEAD(&node->work.entry);
1171 INIT_LIST_HEAD(&node->async_todo);
1172 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1173 "%d:%d node %d u%016llx c%016llx created\n",
1174 proc->pid, current->pid, node->debug_id,
1175 (u64)node->ptr, (u64)node->cookie);
1176
1177 return node;
1178 }
1179
1180 static struct binder_node *binder_new_node(struct binder_proc *proc,
1181 struct flat_binder_object *fp)
1182 {
1183 struct binder_node *node;
1184 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1185
1186 if (!new_node)
1187 return NULL;
1188 binder_inner_proc_lock(proc);
1189 node = binder_init_node_ilocked(proc, new_node, fp);
1190 binder_inner_proc_unlock(proc);
1191 if (node != new_node)
1192 /*
1193 * The node was already added by another thread
1194 */
1195 kfree(new_node);
1196
1197 return node;
1198 }
1199
1200 static void binder_free_node(struct binder_node *node)
1201 {
1202 kfree(node);
1203 binder_stats_deleted(BINDER_STAT_NODE);
1204 }
1205
1206 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1207 int internal,
1208 struct list_head *target_list)
1209 {
1210 struct binder_proc *proc = node->proc;
1211
1212 assert_spin_locked(&node->lock);
1213 if (proc)
1214 assert_spin_locked(&proc->inner_lock);
1215 if (strong) {
1216 if (internal) {
1217 if (target_list == NULL &&
1218 node->internal_strong_refs == 0 &&
1219 !(node->proc &&
1220 node == node->proc->context->binder_context_mgr_node &&
1221 node->has_strong_ref)) {
1222 pr_err("invalid inc strong node for %d\n",
1223 node->debug_id);
1224 return -EINVAL;
1225 }
1226 node->internal_strong_refs++;
1227 } else
1228 node->local_strong_refs++;
1229 if (!node->has_strong_ref && target_list) {
1230 binder_dequeue_work_ilocked(&node->work);
1231 binder_enqueue_work_ilocked(&node->work, target_list);
1232 }
1233 } else {
1234 if (!internal)
1235 node->local_weak_refs++;
1236 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1237 if (target_list == NULL) {
1238 pr_err("invalid inc weak node for %d\n",
1239 node->debug_id);
1240 return -EINVAL;
1241 }
1242 binder_enqueue_work_ilocked(&node->work, target_list);
1243 }
1244 }
1245 return 0;
1246 }
1247
1248 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1249 struct list_head *target_list)
1250 {
1251 int ret;
1252
1253 binder_node_inner_lock(node);
1254 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1255 binder_node_inner_unlock(node);
1256
1257 return ret;
1258 }
1259
1260 static bool binder_dec_node_nilocked(struct binder_node *node,
1261 int strong, int internal)
1262 {
1263 struct binder_proc *proc = node->proc;
1264
1265 assert_spin_locked(&node->lock);
1266 if (proc)
1267 assert_spin_locked(&proc->inner_lock);
1268 if (strong) {
1269 if (internal)
1270 node->internal_strong_refs--;
1271 else
1272 node->local_strong_refs--;
1273 if (node->local_strong_refs || node->internal_strong_refs)
1274 return false;
1275 } else {
1276 if (!internal)
1277 node->local_weak_refs--;
1278 if (node->local_weak_refs || node->tmp_refs ||
1279 !hlist_empty(&node->refs))
1280 return false;
1281 }
1282
1283 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1284 if (list_empty(&node->work.entry)) {
1285 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1286 binder_wakeup_proc_ilocked(proc);
1287 }
1288 } else {
1289 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1290 !node->local_weak_refs && !node->tmp_refs) {
1291 if (proc) {
1292 binder_dequeue_work_ilocked(&node->work);
1293 rb_erase(&node->rb_node, &proc->nodes);
1294 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1295 "refless node %d deleted\n",
1296 node->debug_id);
1297 } else {
1298 BUG_ON(!list_empty(&node->work.entry));
1299 spin_lock(&binder_dead_nodes_lock);
1300 /*
1301 * tmp_refs could have changed so
1302 * check it again
1303 */
1304 if (node->tmp_refs) {
1305 spin_unlock(&binder_dead_nodes_lock);
1306 return false;
1307 }
1308 hlist_del(&node->dead_node);
1309 spin_unlock(&binder_dead_nodes_lock);
1310 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1311 "dead node %d deleted\n",
1312 node->debug_id);
1313 }
1314 return true;
1315 }
1316 }
1317 return false;
1318 }
1319
1320 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1321 {
1322 bool free_node;
1323
1324 binder_node_inner_lock(node);
1325 free_node = binder_dec_node_nilocked(node, strong, internal);
1326 binder_node_inner_unlock(node);
1327 if (free_node)
1328 binder_free_node(node);
1329 }
1330
1331 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1332 {
1333 /*
1334 * No call to binder_inc_node() is needed since we
1335 * don't need to inform userspace of any changes to
1336 * tmp_refs
1337 */
1338 node->tmp_refs++;
1339 }
1340
1341 /**
1342 * binder_inc_node_tmpref() - take a temporary reference on node
1343 * @node: node to reference
1344 *
1345 * Take reference on node to prevent the node from being freed
1346 * while referenced only by a local variable. The inner lock is
1347 * needed to serialize with the node work on the queue (which
1348 * isn't needed after the node is dead). If the node is dead
1349 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1350 * node->tmp_refs against dead-node-only cases where the node
1351 * lock cannot be acquired (eg traversing the dead node list to
1352 * print nodes)
1353 */
1354 static void binder_inc_node_tmpref(struct binder_node *node)
1355 {
1356 binder_node_lock(node);
1357 if (node->proc)
1358 binder_inner_proc_lock(node->proc);
1359 else
1360 spin_lock(&binder_dead_nodes_lock);
1361 binder_inc_node_tmpref_ilocked(node);
1362 if (node->proc)
1363 binder_inner_proc_unlock(node->proc);
1364 else
1365 spin_unlock(&binder_dead_nodes_lock);
1366 binder_node_unlock(node);
1367 }
1368
1369 /**
1370 * binder_dec_node_tmpref() - remove a temporary reference on node
1371 * @node: node to reference
1372 *
1373 * Release temporary reference on node taken via binder_inc_node_tmpref()
1374 */
1375 static void binder_dec_node_tmpref(struct binder_node *node)
1376 {
1377 bool free_node;
1378
1379 binder_node_inner_lock(node);
1380 if (!node->proc)
1381 spin_lock(&binder_dead_nodes_lock);
1382 node->tmp_refs--;
1383 BUG_ON(node->tmp_refs < 0);
1384 if (!node->proc)
1385 spin_unlock(&binder_dead_nodes_lock);
1386 /*
1387 * Call binder_dec_node() to check if all refcounts are 0
1388 * and cleanup is needed. Calling with strong=0 and internal=1
1389 * causes no actual reference to be released in binder_dec_node().
1390 * If that changes, a change is needed here too.
1391 */
1392 free_node = binder_dec_node_nilocked(node, 0, 1);
1393 binder_node_inner_unlock(node);
1394 if (free_node)
1395 binder_free_node(node);
1396 }
1397
1398 static void binder_put_node(struct binder_node *node)
1399 {
1400 binder_dec_node_tmpref(node);
1401 }
1402
1403 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1404 u32 desc, bool need_strong_ref)
1405 {
1406 struct rb_node *n = proc->refs_by_desc.rb_node;
1407 struct binder_ref *ref;
1408
1409 while (n) {
1410 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1411
1412 if (desc < ref->data.desc) {
1413 n = n->rb_left;
1414 } else if (desc > ref->data.desc) {
1415 n = n->rb_right;
1416 } else if (need_strong_ref && !ref->data.strong) {
1417 binder_user_error("tried to use weak ref as strong ref\n");
1418 return NULL;
1419 } else {
1420 return ref;
1421 }
1422 }
1423 return NULL;
1424 }
1425
1426 /**
1427 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1428 * @proc: binder_proc that owns the ref
1429 * @node: binder_node of target
1430 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1431 *
1432 * Look up the ref for the given node and return it if it exists
1433 *
1434 * If it doesn't exist and the caller provides a newly allocated
1435 * ref, initialize the fields of the newly allocated ref and insert
1436 * into the given proc rb_trees and node refs list.
1437 *
1438 * Return: the ref for node. It is possible that another thread
1439 * allocated/initialized the ref first in which case the
1440 * returned ref would be different than the passed-in
1441 * new_ref. new_ref must be kfree'd by the caller in
1442 * this case.
1443 */
1444 static struct binder_ref *binder_get_ref_for_node_olocked(
1445 struct binder_proc *proc,
1446 struct binder_node *node,
1447 struct binder_ref *new_ref)
1448 {
1449 struct binder_context *context = proc->context;
1450 struct rb_node **p = &proc->refs_by_node.rb_node;
1451 struct rb_node *parent = NULL;
1452 struct binder_ref *ref;
1453 struct rb_node *n;
1454
1455 while (*p) {
1456 parent = *p;
1457 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1458
1459 if (node < ref->node)
1460 p = &(*p)->rb_left;
1461 else if (node > ref->node)
1462 p = &(*p)->rb_right;
1463 else
1464 return ref;
1465 }
1466 if (!new_ref)
1467 return NULL;
1468
1469 binder_stats_created(BINDER_STAT_REF);
1470 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1471 new_ref->proc = proc;
1472 new_ref->node = node;
1473 rb_link_node(&new_ref->rb_node_node, parent, p);
1474 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1475
1476 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1477 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1478 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1479 if (ref->data.desc > new_ref->data.desc)
1480 break;
1481 new_ref->data.desc = ref->data.desc + 1;
1482 }
1483
1484 p = &proc->refs_by_desc.rb_node;
1485 while (*p) {
1486 parent = *p;
1487 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1488
1489 if (new_ref->data.desc < ref->data.desc)
1490 p = &(*p)->rb_left;
1491 else if (new_ref->data.desc > ref->data.desc)
1492 p = &(*p)->rb_right;
1493 else
1494 BUG();
1495 }
1496 rb_link_node(&new_ref->rb_node_desc, parent, p);
1497 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1498
1499 binder_node_lock(node);
1500 hlist_add_head(&new_ref->node_entry, &node->refs);
1501
1502 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1503 "%d new ref %d desc %d for node %d\n",
1504 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1505 node->debug_id);
1506 binder_node_unlock(node);
1507 return new_ref;
1508 }
1509
1510 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1511 {
1512 bool delete_node = false;
1513
1514 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1515 "%d delete ref %d desc %d for node %d\n",
1516 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1517 ref->node->debug_id);
1518
1519 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1520 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1521
1522 binder_node_inner_lock(ref->node);
1523 if (ref->data.strong)
1524 binder_dec_node_nilocked(ref->node, 1, 1);
1525
1526 hlist_del(&ref->node_entry);
1527 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1528 binder_node_inner_unlock(ref->node);
1529 /*
1530 * Clear ref->node unless we want the caller to free the node
1531 */
1532 if (!delete_node) {
1533 /*
1534 * The caller uses ref->node to determine
1535 * whether the node needs to be freed. Clear
1536 * it since the node is still alive.
1537 */
1538 ref->node = NULL;
1539 }
1540
1541 if (ref->death) {
1542 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1543 "%d delete ref %d desc %d has death notification\n",
1544 ref->proc->pid, ref->data.debug_id,
1545 ref->data.desc);
1546 binder_dequeue_work(ref->proc, &ref->death->work);
1547 binder_stats_deleted(BINDER_STAT_DEATH);
1548 }
1549 binder_stats_deleted(BINDER_STAT_REF);
1550 }
1551
1552 /**
1553 * binder_inc_ref_olocked() - increment the ref for given handle
1554 * @ref: ref to be incremented
1555 * @strong: if true, strong increment, else weak
1556 * @target_list: list to queue node work on
1557 *
1558 * Increment the ref. @ref->proc->outer_lock must be held on entry
1559 *
1560 * Return: 0, if successful, else errno
1561 */
1562 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1563 struct list_head *target_list)
1564 {
1565 int ret;
1566
1567 if (strong) {
1568 if (ref->data.strong == 0) {
1569 ret = binder_inc_node(ref->node, 1, 1, target_list);
1570 if (ret)
1571 return ret;
1572 }
1573 ref->data.strong++;
1574 } else {
1575 if (ref->data.weak == 0) {
1576 ret = binder_inc_node(ref->node, 0, 1, target_list);
1577 if (ret)
1578 return ret;
1579 }
1580 ref->data.weak++;
1581 }
1582 return 0;
1583 }
1584
1585 /**
1586 * binder_dec_ref() - dec the ref for given handle
1587 * @ref: ref to be decremented
1588 * @strong: if true, strong decrement, else weak
1589 *
1590 * Decrement the ref.
1591 *
1592 * Return: true if ref is cleaned up and ready to be freed
1593 */
1594 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1595 {
1596 if (strong) {
1597 if (ref->data.strong == 0) {
1598 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1599 ref->proc->pid, ref->data.debug_id,
1600 ref->data.desc, ref->data.strong,
1601 ref->data.weak);
1602 return false;
1603 }
1604 ref->data.strong--;
1605 if (ref->data.strong == 0)
1606 binder_dec_node(ref->node, strong, 1);
1607 } else {
1608 if (ref->data.weak == 0) {
1609 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1610 ref->proc->pid, ref->data.debug_id,
1611 ref->data.desc, ref->data.strong,
1612 ref->data.weak);
1613 return false;
1614 }
1615 ref->data.weak--;
1616 }
1617 if (ref->data.strong == 0 && ref->data.weak == 0) {
1618 binder_cleanup_ref_olocked(ref);
1619 return true;
1620 }
1621 return false;
1622 }
1623
1624 /**
1625 * binder_get_node_from_ref() - get the node from the given proc/desc
1626 * @proc: proc containing the ref
1627 * @desc: the handle associated with the ref
1628 * @need_strong_ref: if true, only return node if ref is strong
1629 * @rdata: the id/refcount data for the ref
1630 *
1631 * Given a proc and ref handle, return the associated binder_node
1632 *
1633 * Return: a binder_node or NULL if not found or not strong when strong required
1634 */
1635 static struct binder_node *binder_get_node_from_ref(
1636 struct binder_proc *proc,
1637 u32 desc, bool need_strong_ref,
1638 struct binder_ref_data *rdata)
1639 {
1640 struct binder_node *node;
1641 struct binder_ref *ref;
1642
1643 binder_proc_lock(proc);
1644 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1645 if (!ref)
1646 goto err_no_ref;
1647 node = ref->node;
1648 /*
1649 * Take an implicit reference on the node to ensure
1650 * it stays alive until the call to binder_put_node()
1651 */
1652 binder_inc_node_tmpref(node);
1653 if (rdata)
1654 *rdata = ref->data;
1655 binder_proc_unlock(proc);
1656
1657 return node;
1658
1659 err_no_ref:
1660 binder_proc_unlock(proc);
1661 return NULL;
1662 }
1663
1664 /**
1665 * binder_free_ref() - free the binder_ref
1666 * @ref: ref to free
1667 *
1668 * Free the binder_ref. Free the binder_node indicated by ref->node
1669 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1670 */
1671 static void binder_free_ref(struct binder_ref *ref)
1672 {
1673 if (ref->node)
1674 binder_free_node(ref->node);
1675 kfree(ref->death);
1676 kfree(ref);
1677 }
1678
1679 /**
1680 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1681 * @proc: proc containing the ref
1682 * @desc: the handle associated with the ref
1683 * @increment: true=inc reference, false=dec reference
1684 * @strong: true=strong reference, false=weak reference
1685 * @rdata: the id/refcount data for the ref
1686 *
1687 * Given a proc and ref handle, increment or decrement the ref
1688 * according to "increment" arg.
1689 *
1690 * Return: 0 if successful, else errno
1691 */
1692 static int binder_update_ref_for_handle(struct binder_proc *proc,
1693 uint32_t desc, bool increment, bool strong,
1694 struct binder_ref_data *rdata)
1695 {
1696 int ret = 0;
1697 struct binder_ref *ref;
1698 bool delete_ref = false;
1699
1700 binder_proc_lock(proc);
1701 ref = binder_get_ref_olocked(proc, desc, strong);
1702 if (!ref) {
1703 ret = -EINVAL;
1704 goto err_no_ref;
1705 }
1706 if (increment)
1707 ret = binder_inc_ref_olocked(ref, strong, NULL);
1708 else
1709 delete_ref = binder_dec_ref_olocked(ref, strong);
1710
1711 if (rdata)
1712 *rdata = ref->data;
1713 binder_proc_unlock(proc);
1714
1715 if (delete_ref)
1716 binder_free_ref(ref);
1717 return ret;
1718
1719 err_no_ref:
1720 binder_proc_unlock(proc);
1721 return ret;
1722 }
1723
1724 /**
1725 * binder_dec_ref_for_handle() - dec the ref for given handle
1726 * @proc: proc containing the ref
1727 * @desc: the handle associated with the ref
1728 * @strong: true=strong reference, false=weak reference
1729 * @rdata: the id/refcount data for the ref
1730 *
1731 * Just calls binder_update_ref_for_handle() to decrement the ref.
1732 *
1733 * Return: 0 if successful, else errno
1734 */
1735 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1736 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1737 {
1738 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1739 }
1740
1741
1742 /**
1743 * binder_inc_ref_for_node() - increment the ref for given proc/node
1744 * @proc: proc containing the ref
1745 * @node: target node
1746 * @strong: true=strong reference, false=weak reference
1747 * @target_list: worklist to use if node is incremented
1748 * @rdata: the id/refcount data for the ref
1749 *
1750 * Given a proc and node, increment the ref. Create the ref if it
1751 * doesn't already exist
1752 *
1753 * Return: 0 if successful, else errno
1754 */
1755 static int binder_inc_ref_for_node(struct binder_proc *proc,
1756 struct binder_node *node,
1757 bool strong,
1758 struct list_head *target_list,
1759 struct binder_ref_data *rdata)
1760 {
1761 struct binder_ref *ref;
1762 struct binder_ref *new_ref = NULL;
1763 int ret = 0;
1764
1765 binder_proc_lock(proc);
1766 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1767 if (!ref) {
1768 binder_proc_unlock(proc);
1769 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1770 if (!new_ref)
1771 return -ENOMEM;
1772 binder_proc_lock(proc);
1773 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1774 }
1775 ret = binder_inc_ref_olocked(ref, strong, target_list);
1776 *rdata = ref->data;
1777 binder_proc_unlock(proc);
1778 if (new_ref && ref != new_ref)
1779 /*
1780 * Another thread created the ref first so
1781 * free the one we allocated
1782 */
1783 kfree(new_ref);
1784 return ret;
1785 }
1786
1787 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1788 struct binder_transaction *t)
1789 {
1790 BUG_ON(!target_thread);
1791 assert_spin_locked(&target_thread->proc->inner_lock);
1792 BUG_ON(target_thread->transaction_stack != t);
1793 BUG_ON(target_thread->transaction_stack->from != target_thread);
1794 target_thread->transaction_stack =
1795 target_thread->transaction_stack->from_parent;
1796 t->from = NULL;
1797 }
1798
1799 /**
1800 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1801 * @thread: thread to decrement
1802 *
1803 * A thread needs to be kept alive while being used to create or
1804 * handle a transaction. binder_get_txn_from() is used to safely
1805 * extract t->from from a binder_transaction and keep the thread
1806 * indicated by t->from from being freed. When done with that
1807 * binder_thread, this function is called to decrement the
1808 * tmp_ref and free if appropriate (thread has been released
1809 * and no transaction being processed by the driver)
1810 */
1811 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1812 {
1813 /*
1814 * atomic is used to protect the counter value while
1815 * it cannot reach zero or thread->is_dead is false
1816 */
1817 binder_inner_proc_lock(thread->proc);
1818 atomic_dec(&thread->tmp_ref);
1819 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1820 binder_inner_proc_unlock(thread->proc);
1821 binder_free_thread(thread);
1822 return;
1823 }
1824 binder_inner_proc_unlock(thread->proc);
1825 }
1826
1827 /**
1828 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1829 * @proc: proc to decrement
1830 *
1831 * A binder_proc needs to be kept alive while being used to create or
1832 * handle a transaction. proc->tmp_ref is incremented when
1833 * creating a new transaction or the binder_proc is currently in-use
1834 * by threads that are being released. When done with the binder_proc,
1835 * this function is called to decrement the counter and free the
1836 * proc if appropriate (proc has been released, all threads have
1837 * been released and not currenly in-use to process a transaction).
1838 */
1839 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1840 {
1841 binder_inner_proc_lock(proc);
1842 proc->tmp_ref--;
1843 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1844 !proc->tmp_ref) {
1845 binder_inner_proc_unlock(proc);
1846 binder_free_proc(proc);
1847 return;
1848 }
1849 binder_inner_proc_unlock(proc);
1850 }
1851
1852 /**
1853 * binder_get_txn_from() - safely extract the "from" thread in transaction
1854 * @t: binder transaction for t->from
1855 *
1856 * Atomically return the "from" thread and increment the tmp_ref
1857 * count for the thread to ensure it stays alive until
1858 * binder_thread_dec_tmpref() is called.
1859 *
1860 * Return: the value of t->from
1861 */
1862 static struct binder_thread *binder_get_txn_from(
1863 struct binder_transaction *t)
1864 {
1865 struct binder_thread *from;
1866
1867 spin_lock(&t->lock);
1868 from = t->from;
1869 if (from)
1870 atomic_inc(&from->tmp_ref);
1871 spin_unlock(&t->lock);
1872 return from;
1873 }
1874
1875 /**
1876 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1877 * @t: binder transaction for t->from
1878 *
1879 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1880 * to guarantee that the thread cannot be released while operating on it.
1881 * The caller must call binder_inner_proc_unlock() to release the inner lock
1882 * as well as call binder_dec_thread_txn() to release the reference.
1883 *
1884 * Return: the value of t->from
1885 */
1886 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1887 struct binder_transaction *t)
1888 {
1889 struct binder_thread *from;
1890
1891 from = binder_get_txn_from(t);
1892 if (!from)
1893 return NULL;
1894 binder_inner_proc_lock(from->proc);
1895 if (t->from) {
1896 BUG_ON(from != t->from);
1897 return from;
1898 }
1899 binder_inner_proc_unlock(from->proc);
1900 binder_thread_dec_tmpref(from);
1901 return NULL;
1902 }
1903
1904 static void binder_free_transaction(struct binder_transaction *t)
1905 {
1906 if (t->buffer)
1907 t->buffer->transaction = NULL;
1908 kfree(t);
1909 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1910 }
1911
1912 static void binder_send_failed_reply(struct binder_transaction *t,
1913 uint32_t error_code)
1914 {
1915 struct binder_thread *target_thread;
1916 struct binder_transaction *next;
1917
1918 BUG_ON(t->flags & TF_ONE_WAY);
1919 while (1) {
1920 target_thread = binder_get_txn_from_and_acq_inner(t);
1921 if (target_thread) {
1922 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1923 "send failed reply for transaction %d to %d:%d\n",
1924 t->debug_id,
1925 target_thread->proc->pid,
1926 target_thread->pid);
1927
1928 binder_pop_transaction_ilocked(target_thread, t);
1929 if (target_thread->reply_error.cmd == BR_OK) {
1930 target_thread->reply_error.cmd = error_code;
1931 binder_enqueue_work_ilocked(
1932 &target_thread->reply_error.work,
1933 &target_thread->todo);
1934 wake_up_interruptible(&target_thread->wait);
1935 } else {
1936 /*
1937 * Cannot get here for normal operation, but
1938 * we can if multiple synchronous transactions
1939 * are sent without blocking for responses.
1940 * Just ignore the 2nd error in this case.
1941 */
1942 pr_warn("Unexpected reply error: %u\n",
1943 target_thread->reply_error.cmd);
1944 }
1945 binder_inner_proc_unlock(target_thread->proc);
1946 binder_thread_dec_tmpref(target_thread);
1947 binder_free_transaction(t);
1948 return;
1949 }
1950 next = t->from_parent;
1951
1952 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1953 "send failed reply for transaction %d, target dead\n",
1954 t->debug_id);
1955
1956 binder_free_transaction(t);
1957 if (next == NULL) {
1958 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1959 "reply failed, no target thread at root\n");
1960 return;
1961 }
1962 t = next;
1963 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1964 "reply failed, no target thread -- retry %d\n",
1965 t->debug_id);
1966 }
1967 }
1968
1969 /**
1970 * binder_cleanup_transaction() - cleans up undelivered transaction
1971 * @t: transaction that needs to be cleaned up
1972 * @reason: reason the transaction wasn't delivered
1973 * @error_code: error to return to caller (if synchronous call)
1974 */
1975 static void binder_cleanup_transaction(struct binder_transaction *t,
1976 const char *reason,
1977 uint32_t error_code)
1978 {
1979 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1980 binder_send_failed_reply(t, error_code);
1981 } else {
1982 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1983 "undelivered transaction %d, %s\n",
1984 t->debug_id, reason);
1985 binder_free_transaction(t);
1986 }
1987 }
1988
1989 /**
1990 * binder_validate_object() - checks for a valid metadata object in a buffer.
1991 * @buffer: binder_buffer that we're parsing.
1992 * @offset: offset in the buffer at which to validate an object.
1993 *
1994 * Return: If there's a valid metadata object at @offset in @buffer, the
1995 * size of that object. Otherwise, it returns zero.
1996 */
1997 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1998 {
1999 /* Check if we can read a header first */
2000 struct binder_object_header *hdr;
2001 size_t object_size = 0;
2002
2003 if (offset > buffer->data_size - sizeof(*hdr) ||
2004 buffer->data_size < sizeof(*hdr) ||
2005 !IS_ALIGNED(offset, sizeof(u32)))
2006 return 0;
2007
2008 /* Ok, now see if we can read a complete object. */
2009 hdr = (struct binder_object_header *)(buffer->data + offset);
2010 switch (hdr->type) {
2011 case BINDER_TYPE_BINDER:
2012 case BINDER_TYPE_WEAK_BINDER:
2013 case BINDER_TYPE_HANDLE:
2014 case BINDER_TYPE_WEAK_HANDLE:
2015 object_size = sizeof(struct flat_binder_object);
2016 break;
2017 case BINDER_TYPE_FD:
2018 object_size = sizeof(struct binder_fd_object);
2019 break;
2020 case BINDER_TYPE_PTR:
2021 object_size = sizeof(struct binder_buffer_object);
2022 break;
2023 case BINDER_TYPE_FDA:
2024 object_size = sizeof(struct binder_fd_array_object);
2025 break;
2026 default:
2027 return 0;
2028 }
2029 if (offset <= buffer->data_size - object_size &&
2030 buffer->data_size >= object_size)
2031 return object_size;
2032 else
2033 return 0;
2034 }
2035
2036 /**
2037 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2038 * @b: binder_buffer containing the object
2039 * @index: index in offset array at which the binder_buffer_object is
2040 * located
2041 * @start: points to the start of the offset array
2042 * @num_valid: the number of valid offsets in the offset array
2043 *
2044 * Return: If @index is within the valid range of the offset array
2045 * described by @start and @num_valid, and if there's a valid
2046 * binder_buffer_object at the offset found in index @index
2047 * of the offset array, that object is returned. Otherwise,
2048 * %NULL is returned.
2049 * Note that the offset found in index @index itself is not
2050 * verified; this function assumes that @num_valid elements
2051 * from @start were previously verified to have valid offsets.
2052 */
2053 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2054 binder_size_t index,
2055 binder_size_t *start,
2056 binder_size_t num_valid)
2057 {
2058 struct binder_buffer_object *buffer_obj;
2059 binder_size_t *offp;
2060
2061 if (index >= num_valid)
2062 return NULL;
2063
2064 offp = start + index;
2065 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2066 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2067 return NULL;
2068
2069 return buffer_obj;
2070 }
2071
2072 /**
2073 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2074 * @b: transaction buffer
2075 * @objects_start start of objects buffer
2076 * @buffer: binder_buffer_object in which to fix up
2077 * @offset: start offset in @buffer to fix up
2078 * @last_obj: last binder_buffer_object that we fixed up in
2079 * @last_min_offset: minimum fixup offset in @last_obj
2080 *
2081 * Return: %true if a fixup in buffer @buffer at offset @offset is
2082 * allowed.
2083 *
2084 * For safety reasons, we only allow fixups inside a buffer to happen
2085 * at increasing offsets; additionally, we only allow fixup on the last
2086 * buffer object that was verified, or one of its parents.
2087 *
2088 * Example of what is allowed:
2089 *
2090 * A
2091 * B (parent = A, offset = 0)
2092 * C (parent = A, offset = 16)
2093 * D (parent = C, offset = 0)
2094 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2095 *
2096 * Examples of what is not allowed:
2097 *
2098 * Decreasing offsets within the same parent:
2099 * A
2100 * C (parent = A, offset = 16)
2101 * B (parent = A, offset = 0) // decreasing offset within A
2102 *
2103 * Referring to a parent that wasn't the last object or any of its parents:
2104 * A
2105 * B (parent = A, offset = 0)
2106 * C (parent = A, offset = 0)
2107 * C (parent = A, offset = 16)
2108 * D (parent = B, offset = 0) // B is not A or any of A's parents
2109 */
2110 static bool binder_validate_fixup(struct binder_buffer *b,
2111 binder_size_t *objects_start,
2112 struct binder_buffer_object *buffer,
2113 binder_size_t fixup_offset,
2114 struct binder_buffer_object *last_obj,
2115 binder_size_t last_min_offset)
2116 {
2117 if (!last_obj) {
2118 /* Nothing to fix up in */
2119 return false;
2120 }
2121
2122 while (last_obj != buffer) {
2123 /*
2124 * Safe to retrieve the parent of last_obj, since it
2125 * was already previously verified by the driver.
2126 */
2127 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2128 return false;
2129 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2130 last_obj = (struct binder_buffer_object *)
2131 (b->data + *(objects_start + last_obj->parent));
2132 }
2133 return (fixup_offset >= last_min_offset);
2134 }
2135
2136 static void binder_transaction_buffer_release(struct binder_proc *proc,
2137 struct binder_buffer *buffer,
2138 binder_size_t *failed_at)
2139 {
2140 binder_size_t *offp, *off_start, *off_end;
2141 int debug_id = buffer->debug_id;
2142
2143 binder_debug(BINDER_DEBUG_TRANSACTION,
2144 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2145 proc->pid, buffer->debug_id,
2146 buffer->data_size, buffer->offsets_size, failed_at);
2147
2148 if (buffer->target_node)
2149 binder_dec_node(buffer->target_node, 1, 0);
2150
2151 off_start = (binder_size_t *)(buffer->data +
2152 ALIGN(buffer->data_size, sizeof(void *)));
2153 if (failed_at)
2154 off_end = failed_at;
2155 else
2156 off_end = (void *)off_start + buffer->offsets_size;
2157 for (offp = off_start; offp < off_end; offp++) {
2158 struct binder_object_header *hdr;
2159 size_t object_size = binder_validate_object(buffer, *offp);
2160
2161 if (object_size == 0) {
2162 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2163 debug_id, (u64)*offp, buffer->data_size);
2164 continue;
2165 }
2166 hdr = (struct binder_object_header *)(buffer->data + *offp);
2167 switch (hdr->type) {
2168 case BINDER_TYPE_BINDER:
2169 case BINDER_TYPE_WEAK_BINDER: {
2170 struct flat_binder_object *fp;
2171 struct binder_node *node;
2172
2173 fp = to_flat_binder_object(hdr);
2174 node = binder_get_node(proc, fp->binder);
2175 if (node == NULL) {
2176 pr_err("transaction release %d bad node %016llx\n",
2177 debug_id, (u64)fp->binder);
2178 break;
2179 }
2180 binder_debug(BINDER_DEBUG_TRANSACTION,
2181 " node %d u%016llx\n",
2182 node->debug_id, (u64)node->ptr);
2183 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2184 0);
2185 binder_put_node(node);
2186 } break;
2187 case BINDER_TYPE_HANDLE:
2188 case BINDER_TYPE_WEAK_HANDLE: {
2189 struct flat_binder_object *fp;
2190 struct binder_ref_data rdata;
2191 int ret;
2192
2193 fp = to_flat_binder_object(hdr);
2194 ret = binder_dec_ref_for_handle(proc, fp->handle,
2195 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2196
2197 if (ret) {
2198 pr_err("transaction release %d bad handle %d, ret = %d\n",
2199 debug_id, fp->handle, ret);
2200 break;
2201 }
2202 binder_debug(BINDER_DEBUG_TRANSACTION,
2203 " ref %d desc %d\n",
2204 rdata.debug_id, rdata.desc);
2205 } break;
2206
2207 case BINDER_TYPE_FD: {
2208 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2209
2210 binder_debug(BINDER_DEBUG_TRANSACTION,
2211 " fd %d\n", fp->fd);
2212 if (failed_at)
2213 task_close_fd(proc, fp->fd);
2214 } break;
2215 case BINDER_TYPE_PTR:
2216 /*
2217 * Nothing to do here, this will get cleaned up when the
2218 * transaction buffer gets freed
2219 */
2220 break;
2221 case BINDER_TYPE_FDA: {
2222 struct binder_fd_array_object *fda;
2223 struct binder_buffer_object *parent;
2224 uintptr_t parent_buffer;
2225 u32 *fd_array;
2226 size_t fd_index;
2227 binder_size_t fd_buf_size;
2228
2229 fda = to_binder_fd_array_object(hdr);
2230 parent = binder_validate_ptr(buffer, fda->parent,
2231 off_start,
2232 offp - off_start);
2233 if (!parent) {
2234 pr_err("transaction release %d bad parent offset",
2235 debug_id);
2236 continue;
2237 }
2238 /*
2239 * Since the parent was already fixed up, convert it
2240 * back to kernel address space to access it
2241 */
2242 parent_buffer = parent->buffer -
2243 binder_alloc_get_user_buffer_offset(
2244 &proc->alloc);
2245
2246 fd_buf_size = sizeof(u32) * fda->num_fds;
2247 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2248 pr_err("transaction release %d invalid number of fds (%lld)\n",
2249 debug_id, (u64)fda->num_fds);
2250 continue;
2251 }
2252 if (fd_buf_size > parent->length ||
2253 fda->parent_offset > parent->length - fd_buf_size) {
2254 /* No space for all file descriptors here. */
2255 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2256 debug_id, (u64)fda->num_fds);
2257 continue;
2258 }
2259 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2260 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2261 task_close_fd(proc, fd_array[fd_index]);
2262 } break;
2263 default:
2264 pr_err("transaction release %d bad object type %x\n",
2265 debug_id, hdr->type);
2266 break;
2267 }
2268 }
2269 }
2270
2271 static int binder_translate_binder(struct flat_binder_object *fp,
2272 struct binder_transaction *t,
2273 struct binder_thread *thread)
2274 {
2275 struct binder_node *node;
2276 struct binder_proc *proc = thread->proc;
2277 struct binder_proc *target_proc = t->to_proc;
2278 struct binder_ref_data rdata;
2279 int ret = 0;
2280
2281 node = binder_get_node(proc, fp->binder);
2282 if (!node) {
2283 node = binder_new_node(proc, fp);
2284 if (!node)
2285 return -ENOMEM;
2286 }
2287 if (fp->cookie != node->cookie) {
2288 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2289 proc->pid, thread->pid, (u64)fp->binder,
2290 node->debug_id, (u64)fp->cookie,
2291 (u64)node->cookie);
2292 ret = -EINVAL;
2293 goto done;
2294 }
2295 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2296 ret = -EPERM;
2297 goto done;
2298 }
2299
2300 ret = binder_inc_ref_for_node(target_proc, node,
2301 fp->hdr.type == BINDER_TYPE_BINDER,
2302 &thread->todo, &rdata);
2303 if (ret)
2304 goto done;
2305
2306 if (fp->hdr.type == BINDER_TYPE_BINDER)
2307 fp->hdr.type = BINDER_TYPE_HANDLE;
2308 else
2309 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2310 fp->binder = 0;
2311 fp->handle = rdata.desc;
2312 fp->cookie = 0;
2313
2314 trace_binder_transaction_node_to_ref(t, node, &rdata);
2315 binder_debug(BINDER_DEBUG_TRANSACTION,
2316 " node %d u%016llx -> ref %d desc %d\n",
2317 node->debug_id, (u64)node->ptr,
2318 rdata.debug_id, rdata.desc);
2319 done:
2320 binder_put_node(node);
2321 return ret;
2322 }
2323
2324 static int binder_translate_handle(struct flat_binder_object *fp,
2325 struct binder_transaction *t,
2326 struct binder_thread *thread)
2327 {
2328 struct binder_proc *proc = thread->proc;
2329 struct binder_proc *target_proc = t->to_proc;
2330 struct binder_node *node;
2331 struct binder_ref_data src_rdata;
2332 int ret = 0;
2333
2334 node = binder_get_node_from_ref(proc, fp->handle,
2335 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2336 if (!node) {
2337 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2338 proc->pid, thread->pid, fp->handle);
2339 return -EINVAL;
2340 }
2341 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2342 ret = -EPERM;
2343 goto done;
2344 }
2345
2346 binder_node_lock(node);
2347 if (node->proc == target_proc) {
2348 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2349 fp->hdr.type = BINDER_TYPE_BINDER;
2350 else
2351 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2352 fp->binder = node->ptr;
2353 fp->cookie = node->cookie;
2354 if (node->proc)
2355 binder_inner_proc_lock(node->proc);
2356 binder_inc_node_nilocked(node,
2357 fp->hdr.type == BINDER_TYPE_BINDER,
2358 0, NULL);
2359 if (node->proc)
2360 binder_inner_proc_unlock(node->proc);
2361 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2362 binder_debug(BINDER_DEBUG_TRANSACTION,
2363 " ref %d desc %d -> node %d u%016llx\n",
2364 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2365 (u64)node->ptr);
2366 binder_node_unlock(node);
2367 } else {
2368 struct binder_ref_data dest_rdata;
2369
2370 binder_node_unlock(node);
2371 ret = binder_inc_ref_for_node(target_proc, node,
2372 fp->hdr.type == BINDER_TYPE_HANDLE,
2373 NULL, &dest_rdata);
2374 if (ret)
2375 goto done;
2376
2377 fp->binder = 0;
2378 fp->handle = dest_rdata.desc;
2379 fp->cookie = 0;
2380 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2381 &dest_rdata);
2382 binder_debug(BINDER_DEBUG_TRANSACTION,
2383 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2384 src_rdata.debug_id, src_rdata.desc,
2385 dest_rdata.debug_id, dest_rdata.desc,
2386 node->debug_id);
2387 }
2388 done:
2389 binder_put_node(node);
2390 return ret;
2391 }
2392
2393 static int binder_translate_fd(int fd,
2394 struct binder_transaction *t,
2395 struct binder_thread *thread,
2396 struct binder_transaction *in_reply_to)
2397 {
2398 struct binder_proc *proc = thread->proc;
2399 struct binder_proc *target_proc = t->to_proc;
2400 int target_fd;
2401 struct file *file;
2402 int ret;
2403 bool target_allows_fd;
2404
2405 if (in_reply_to)
2406 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2407 else
2408 target_allows_fd = t->buffer->target_node->accept_fds;
2409 if (!target_allows_fd) {
2410 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2411 proc->pid, thread->pid,
2412 in_reply_to ? "reply" : "transaction",
2413 fd);
2414 ret = -EPERM;
2415 goto err_fd_not_accepted;
2416 }
2417
2418 file = fget(fd);
2419 if (!file) {
2420 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2421 proc->pid, thread->pid, fd);
2422 ret = -EBADF;
2423 goto err_fget;
2424 }
2425 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2426 if (ret < 0) {
2427 ret = -EPERM;
2428 goto err_security;
2429 }
2430
2431 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2432 if (target_fd < 0) {
2433 ret = -ENOMEM;
2434 goto err_get_unused_fd;
2435 }
2436 task_fd_install(target_proc, target_fd, file);
2437 trace_binder_transaction_fd(t, fd, target_fd);
2438 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2439 fd, target_fd);
2440
2441 return target_fd;
2442
2443 err_get_unused_fd:
2444 err_security:
2445 fput(file);
2446 err_fget:
2447 err_fd_not_accepted:
2448 return ret;
2449 }
2450
2451 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2452 struct binder_buffer_object *parent,
2453 struct binder_transaction *t,
2454 struct binder_thread *thread,
2455 struct binder_transaction *in_reply_to)
2456 {
2457 binder_size_t fdi, fd_buf_size, num_installed_fds;
2458 int target_fd;
2459 uintptr_t parent_buffer;
2460 u32 *fd_array;
2461 struct binder_proc *proc = thread->proc;
2462 struct binder_proc *target_proc = t->to_proc;
2463
2464 fd_buf_size = sizeof(u32) * fda->num_fds;
2465 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2466 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2467 proc->pid, thread->pid, (u64)fda->num_fds);
2468 return -EINVAL;
2469 }
2470 if (fd_buf_size > parent->length ||
2471 fda->parent_offset > parent->length - fd_buf_size) {
2472 /* No space for all file descriptors here. */
2473 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2474 proc->pid, thread->pid, (u64)fda->num_fds);
2475 return -EINVAL;
2476 }
2477 /*
2478 * Since the parent was already fixed up, convert it
2479 * back to the kernel address space to access it
2480 */
2481 parent_buffer = parent->buffer -
2482 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2483 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2484 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2485 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2486 proc->pid, thread->pid);
2487 return -EINVAL;
2488 }
2489 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2490 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2491 in_reply_to);
2492 if (target_fd < 0)
2493 goto err_translate_fd_failed;
2494 fd_array[fdi] = target_fd;
2495 }
2496 return 0;
2497
2498 err_translate_fd_failed:
2499 /*
2500 * Failed to allocate fd or security error, free fds
2501 * installed so far.
2502 */
2503 num_installed_fds = fdi;
2504 for (fdi = 0; fdi < num_installed_fds; fdi++)
2505 task_close_fd(target_proc, fd_array[fdi]);
2506 return target_fd;
2507 }
2508
2509 static int binder_fixup_parent(struct binder_transaction *t,
2510 struct binder_thread *thread,
2511 struct binder_buffer_object *bp,
2512 binder_size_t *off_start,
2513 binder_size_t num_valid,
2514 struct binder_buffer_object *last_fixup_obj,
2515 binder_size_t last_fixup_min_off)
2516 {
2517 struct binder_buffer_object *parent;
2518 u8 *parent_buffer;
2519 struct binder_buffer *b = t->buffer;
2520 struct binder_proc *proc = thread->proc;
2521 struct binder_proc *target_proc = t->to_proc;
2522
2523 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2524 return 0;
2525
2526 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2527 if (!parent) {
2528 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2529 proc->pid, thread->pid);
2530 return -EINVAL;
2531 }
2532
2533 if (!binder_validate_fixup(b, off_start,
2534 parent, bp->parent_offset,
2535 last_fixup_obj,
2536 last_fixup_min_off)) {
2537 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2538 proc->pid, thread->pid);
2539 return -EINVAL;
2540 }
2541
2542 if (parent->length < sizeof(binder_uintptr_t) ||
2543 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2544 /* No space for a pointer here! */
2545 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2546 proc->pid, thread->pid);
2547 return -EINVAL;
2548 }
2549 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2550 binder_alloc_get_user_buffer_offset(
2551 &target_proc->alloc));
2552 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2553
2554 return 0;
2555 }
2556
2557 /**
2558 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2559 * @t: transaction to send
2560 * @proc: process to send the transaction to
2561 * @thread: thread in @proc to send the transaction to (may be NULL)
2562 *
2563 * This function queues a transaction to the specified process. It will try
2564 * to find a thread in the target process to handle the transaction and
2565 * wake it up. If no thread is found, the work is queued to the proc
2566 * waitqueue.
2567 *
2568 * If the @thread parameter is not NULL, the transaction is always queued
2569 * to the waitlist of that specific thread.
2570 *
2571 * Return: true if the transactions was successfully queued
2572 * false if the target process or thread is dead
2573 */
2574 static bool binder_proc_transaction(struct binder_transaction *t,
2575 struct binder_proc *proc,
2576 struct binder_thread *thread)
2577 {
2578 struct list_head *target_list = NULL;
2579 struct binder_node *node = t->buffer->target_node;
2580 bool oneway = !!(t->flags & TF_ONE_WAY);
2581 bool wakeup = true;
2582
2583 BUG_ON(!node);
2584 binder_node_lock(node);
2585 if (oneway) {
2586 BUG_ON(thread);
2587 if (node->has_async_transaction) {
2588 target_list = &node->async_todo;
2589 wakeup = false;
2590 } else {
2591 node->has_async_transaction = 1;
2592 }
2593 }
2594
2595 binder_inner_proc_lock(proc);
2596
2597 if (proc->is_dead || (thread && thread->is_dead)) {
2598 binder_inner_proc_unlock(proc);
2599 binder_node_unlock(node);
2600 return false;
2601 }
2602
2603 if (!thread && !target_list)
2604 thread = binder_select_thread_ilocked(proc);
2605
2606 if (thread)
2607 target_list = &thread->todo;
2608 else if (!target_list)
2609 target_list = &proc->todo;
2610 else
2611 BUG_ON(target_list != &node->async_todo);
2612
2613 binder_enqueue_work_ilocked(&t->work, target_list);
2614
2615 if (wakeup)
2616 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2617
2618 binder_inner_proc_unlock(proc);
2619 binder_node_unlock(node);
2620
2621 return true;
2622 }
2623
2624 /**
2625 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2626 * @node: struct binder_node for which to get refs
2627 * @proc: returns @node->proc if valid
2628 * @error: if no @proc then returns BR_DEAD_REPLY
2629 *
2630 * User-space normally keeps the node alive when creating a transaction
2631 * since it has a reference to the target. The local strong ref keeps it
2632 * alive if the sending process dies before the target process processes
2633 * the transaction. If the source process is malicious or has a reference
2634 * counting bug, relying on the local strong ref can fail.
2635 *
2636 * Since user-space can cause the local strong ref to go away, we also take
2637 * a tmpref on the node to ensure it survives while we are constructing
2638 * the transaction. We also need a tmpref on the proc while we are
2639 * constructing the transaction, so we take that here as well.
2640 *
2641 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2642 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2643 * target proc has died, @error is set to BR_DEAD_REPLY
2644 */
2645 static struct binder_node *binder_get_node_refs_for_txn(
2646 struct binder_node *node,
2647 struct binder_proc **procp,
2648 uint32_t *error)
2649 {
2650 struct binder_node *target_node = NULL;
2651
2652 binder_node_inner_lock(node);
2653 if (node->proc) {
2654 target_node = node;
2655 binder_inc_node_nilocked(node, 1, 0, NULL);
2656 binder_inc_node_tmpref_ilocked(node);
2657 node->proc->tmp_ref++;
2658 *procp = node->proc;
2659 } else
2660 *error = BR_DEAD_REPLY;
2661 binder_node_inner_unlock(node);
2662
2663 return target_node;
2664 }
2665
2666 static void binder_transaction(struct binder_proc *proc,
2667 struct binder_thread *thread,
2668 struct binder_transaction_data *tr, int reply,
2669 binder_size_t extra_buffers_size)
2670 {
2671 int ret;
2672 struct binder_transaction *t;
2673 struct binder_work *tcomplete;
2674 binder_size_t *offp, *off_end, *off_start;
2675 binder_size_t off_min;
2676 u8 *sg_bufp, *sg_buf_end;
2677 struct binder_proc *target_proc = NULL;
2678 struct binder_thread *target_thread = NULL;
2679 struct binder_node *target_node = NULL;
2680 struct binder_transaction *in_reply_to = NULL;
2681 struct binder_transaction_log_entry *e;
2682 uint32_t return_error = 0;
2683 uint32_t return_error_param = 0;
2684 uint32_t return_error_line = 0;
2685 struct binder_buffer_object *last_fixup_obj = NULL;
2686 binder_size_t last_fixup_min_off = 0;
2687 struct binder_context *context = proc->context;
2688 int t_debug_id = atomic_inc_return(&binder_last_id);
2689
2690 e = binder_transaction_log_add(&binder_transaction_log);
2691 e->debug_id = t_debug_id;
2692 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2693 e->from_proc = proc->pid;
2694 e->from_thread = thread->pid;
2695 e->target_handle = tr->target.handle;
2696 e->data_size = tr->data_size;
2697 e->offsets_size = tr->offsets_size;
2698 e->context_name = proc->context->name;
2699
2700 if (reply) {
2701 binder_inner_proc_lock(proc);
2702 in_reply_to = thread->transaction_stack;
2703 if (in_reply_to == NULL) {
2704 binder_inner_proc_unlock(proc);
2705 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2706 proc->pid, thread->pid);
2707 return_error = BR_FAILED_REPLY;
2708 return_error_param = -EPROTO;
2709 return_error_line = __LINE__;
2710 goto err_empty_call_stack;
2711 }
2712 if (in_reply_to->to_thread != thread) {
2713 spin_lock(&in_reply_to->lock);
2714 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2715 proc->pid, thread->pid, in_reply_to->debug_id,
2716 in_reply_to->to_proc ?
2717 in_reply_to->to_proc->pid : 0,
2718 in_reply_to->to_thread ?
2719 in_reply_to->to_thread->pid : 0);
2720 spin_unlock(&in_reply_to->lock);
2721 binder_inner_proc_unlock(proc);
2722 return_error = BR_FAILED_REPLY;
2723 return_error_param = -EPROTO;
2724 return_error_line = __LINE__;
2725 in_reply_to = NULL;
2726 goto err_bad_call_stack;
2727 }
2728 thread->transaction_stack = in_reply_to->to_parent;
2729 binder_inner_proc_unlock(proc);
2730 binder_set_nice(in_reply_to->saved_priority);
2731 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2732 if (target_thread == NULL) {
2733 return_error = BR_DEAD_REPLY;
2734 return_error_line = __LINE__;
2735 goto err_dead_binder;
2736 }
2737 if (target_thread->transaction_stack != in_reply_to) {
2738 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2739 proc->pid, thread->pid,
2740 target_thread->transaction_stack ?
2741 target_thread->transaction_stack->debug_id : 0,
2742 in_reply_to->debug_id);
2743 binder_inner_proc_unlock(target_thread->proc);
2744 return_error = BR_FAILED_REPLY;
2745 return_error_param = -EPROTO;
2746 return_error_line = __LINE__;
2747 in_reply_to = NULL;
2748 target_thread = NULL;
2749 goto err_dead_binder;
2750 }
2751 target_proc = target_thread->proc;
2752 target_proc->tmp_ref++;
2753 binder_inner_proc_unlock(target_thread->proc);
2754 } else {
2755 if (tr->target.handle) {
2756 struct binder_ref *ref;
2757
2758 /*
2759 * There must already be a strong ref
2760 * on this node. If so, do a strong
2761 * increment on the node to ensure it
2762 * stays alive until the transaction is
2763 * done.
2764 */
2765 binder_proc_lock(proc);
2766 ref = binder_get_ref_olocked(proc, tr->target.handle,
2767 true);
2768 if (ref) {
2769 target_node = binder_get_node_refs_for_txn(
2770 ref->node, &target_proc,
2771 &return_error);
2772 } else {
2773 binder_user_error("%d:%d got transaction to invalid handle\n",
2774 proc->pid, thread->pid);
2775 return_error = BR_FAILED_REPLY;
2776 }
2777 binder_proc_unlock(proc);
2778 } else {
2779 mutex_lock(&context->context_mgr_node_lock);
2780 target_node = context->binder_context_mgr_node;
2781 if (target_node)
2782 target_node = binder_get_node_refs_for_txn(
2783 target_node, &target_proc,
2784 &return_error);
2785 else
2786 return_error = BR_DEAD_REPLY;
2787 mutex_unlock(&context->context_mgr_node_lock);
2788 }
2789 if (!target_node) {
2790 /*
2791 * return_error is set above
2792 */
2793 return_error_param = -EINVAL;
2794 return_error_line = __LINE__;
2795 goto err_dead_binder;
2796 }
2797 e->to_node = target_node->debug_id;
2798 if (security_binder_transaction(proc->tsk,
2799 target_proc->tsk) < 0) {
2800 return_error = BR_FAILED_REPLY;
2801 return_error_param = -EPERM;
2802 return_error_line = __LINE__;
2803 goto err_invalid_target_handle;
2804 }
2805 binder_inner_proc_lock(proc);
2806 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2807 struct binder_transaction *tmp;
2808
2809 tmp = thread->transaction_stack;
2810 if (tmp->to_thread != thread) {
2811 spin_lock(&tmp->lock);
2812 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2813 proc->pid, thread->pid, tmp->debug_id,
2814 tmp->to_proc ? tmp->to_proc->pid : 0,
2815 tmp->to_thread ?
2816 tmp->to_thread->pid : 0);
2817 spin_unlock(&tmp->lock);
2818 binder_inner_proc_unlock(proc);
2819 return_error = BR_FAILED_REPLY;
2820 return_error_param = -EPROTO;
2821 return_error_line = __LINE__;
2822 goto err_bad_call_stack;
2823 }
2824 while (tmp) {
2825 struct binder_thread *from;
2826
2827 spin_lock(&tmp->lock);
2828 from = tmp->from;
2829 if (from && from->proc == target_proc) {
2830 atomic_inc(&from->tmp_ref);
2831 target_thread = from;
2832 spin_unlock(&tmp->lock);
2833 break;
2834 }
2835 spin_unlock(&tmp->lock);
2836 tmp = tmp->from_parent;
2837 }
2838 }
2839 binder_inner_proc_unlock(proc);
2840 }
2841 if (target_thread)
2842 e->to_thread = target_thread->pid;
2843 e->to_proc = target_proc->pid;
2844
2845 /* TODO: reuse incoming transaction for reply */
2846 t = kzalloc(sizeof(*t), GFP_KERNEL);
2847 if (t == NULL) {
2848 return_error = BR_FAILED_REPLY;
2849 return_error_param = -ENOMEM;
2850 return_error_line = __LINE__;
2851 goto err_alloc_t_failed;
2852 }
2853 binder_stats_created(BINDER_STAT_TRANSACTION);
2854 spin_lock_init(&t->lock);
2855
2856 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2857 if (tcomplete == NULL) {
2858 return_error = BR_FAILED_REPLY;
2859 return_error_param = -ENOMEM;
2860 return_error_line = __LINE__;
2861 goto err_alloc_tcomplete_failed;
2862 }
2863 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2864
2865 t->debug_id = t_debug_id;
2866
2867 if (reply)
2868 binder_debug(BINDER_DEBUG_TRANSACTION,
2869 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2870 proc->pid, thread->pid, t->debug_id,
2871 target_proc->pid, target_thread->pid,
2872 (u64)tr->data.ptr.buffer,
2873 (u64)tr->data.ptr.offsets,
2874 (u64)tr->data_size, (u64)tr->offsets_size,
2875 (u64)extra_buffers_size);
2876 else
2877 binder_debug(BINDER_DEBUG_TRANSACTION,
2878 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2879 proc->pid, thread->pid, t->debug_id,
2880 target_proc->pid, target_node->debug_id,
2881 (u64)tr->data.ptr.buffer,
2882 (u64)tr->data.ptr.offsets,
2883 (u64)tr->data_size, (u64)tr->offsets_size,
2884 (u64)extra_buffers_size);
2885
2886 if (!reply && !(tr->flags & TF_ONE_WAY))
2887 t->from = thread;
2888 else
2889 t->from = NULL;
2890 t->sender_euid = task_euid(proc->tsk);
2891 t->to_proc = target_proc;
2892 t->to_thread = target_thread;
2893 t->code = tr->code;
2894 t->flags = tr->flags;
2895 t->priority = task_nice(current);
2896
2897 trace_binder_transaction(reply, t, target_node);
2898
2899 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2900 tr->offsets_size, extra_buffers_size,
2901 !reply && (t->flags & TF_ONE_WAY));
2902 if (IS_ERR(t->buffer)) {
2903 /*
2904 * -ESRCH indicates VMA cleared. The target is dying.
2905 */
2906 return_error_param = PTR_ERR(t->buffer);
2907 return_error = return_error_param == -ESRCH ?
2908 BR_DEAD_REPLY : BR_FAILED_REPLY;
2909 return_error_line = __LINE__;
2910 t->buffer = NULL;
2911 goto err_binder_alloc_buf_failed;
2912 }
2913 t->buffer->allow_user_free = 0;
2914 t->buffer->debug_id = t->debug_id;
2915 t->buffer->transaction = t;
2916 t->buffer->target_node = target_node;
2917 trace_binder_transaction_alloc_buf(t->buffer);
2918 off_start = (binder_size_t *)(t->buffer->data +
2919 ALIGN(tr->data_size, sizeof(void *)));
2920 offp = off_start;
2921
2922 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2923 tr->data.ptr.buffer, tr->data_size)) {
2924 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2925 proc->pid, thread->pid);
2926 return_error = BR_FAILED_REPLY;
2927 return_error_param = -EFAULT;
2928 return_error_line = __LINE__;
2929 goto err_copy_data_failed;
2930 }
2931 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2932 tr->data.ptr.offsets, tr->offsets_size)) {
2933 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2934 proc->pid, thread->pid);
2935 return_error = BR_FAILED_REPLY;
2936 return_error_param = -EFAULT;
2937 return_error_line = __LINE__;
2938 goto err_copy_data_failed;
2939 }
2940 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2941 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2942 proc->pid, thread->pid, (u64)tr->offsets_size);
2943 return_error = BR_FAILED_REPLY;
2944 return_error_param = -EINVAL;
2945 return_error_line = __LINE__;
2946 goto err_bad_offset;
2947 }
2948 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2949 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2950 proc->pid, thread->pid,
2951 (u64)extra_buffers_size);
2952 return_error = BR_FAILED_REPLY;
2953 return_error_param = -EINVAL;
2954 return_error_line = __LINE__;
2955 goto err_bad_offset;
2956 }
2957 off_end = (void *)off_start + tr->offsets_size;
2958 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2959 sg_buf_end = sg_bufp + extra_buffers_size;
2960 off_min = 0;
2961 for (; offp < off_end; offp++) {
2962 struct binder_object_header *hdr;
2963 size_t object_size = binder_validate_object(t->buffer, *offp);
2964
2965 if (object_size == 0 || *offp < off_min) {
2966 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2967 proc->pid, thread->pid, (u64)*offp,
2968 (u64)off_min,
2969 (u64)t->buffer->data_size);
2970 return_error = BR_FAILED_REPLY;
2971 return_error_param = -EINVAL;
2972 return_error_line = __LINE__;
2973 goto err_bad_offset;
2974 }
2975
2976 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2977 off_min = *offp + object_size;
2978 switch (hdr->type) {
2979 case BINDER_TYPE_BINDER:
2980 case BINDER_TYPE_WEAK_BINDER: {
2981 struct flat_binder_object *fp;
2982
2983 fp = to_flat_binder_object(hdr);
2984 ret = binder_translate_binder(fp, t, thread);
2985 if (ret < 0) {
2986 return_error = BR_FAILED_REPLY;
2987 return_error_param = ret;
2988 return_error_line = __LINE__;
2989 goto err_translate_failed;
2990 }
2991 } break;
2992 case BINDER_TYPE_HANDLE:
2993 case BINDER_TYPE_WEAK_HANDLE: {
2994 struct flat_binder_object *fp;
2995
2996 fp = to_flat_binder_object(hdr);
2997 ret = binder_translate_handle(fp, t, thread);
2998 if (ret < 0) {
2999 return_error = BR_FAILED_REPLY;
3000 return_error_param = ret;
3001 return_error_line = __LINE__;
3002 goto err_translate_failed;
3003 }
3004 } break;
3005
3006 case BINDER_TYPE_FD: {
3007 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3008 int target_fd = binder_translate_fd(fp->fd, t, thread,
3009 in_reply_to);
3010
3011 if (target_fd < 0) {
3012 return_error = BR_FAILED_REPLY;
3013 return_error_param = target_fd;
3014 return_error_line = __LINE__;
3015 goto err_translate_failed;
3016 }
3017 fp->pad_binder = 0;
3018 fp->fd = target_fd;
3019 } break;
3020 case BINDER_TYPE_FDA: {
3021 struct binder_fd_array_object *fda =
3022 to_binder_fd_array_object(hdr);
3023 struct binder_buffer_object *parent =
3024 binder_validate_ptr(t->buffer, fda->parent,
3025 off_start,
3026 offp - off_start);
3027 if (!parent) {
3028 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3029 proc->pid, thread->pid);
3030 return_error = BR_FAILED_REPLY;
3031 return_error_param = -EINVAL;
3032 return_error_line = __LINE__;
3033 goto err_bad_parent;
3034 }
3035 if (!binder_validate_fixup(t->buffer, off_start,
3036 parent, fda->parent_offset,
3037 last_fixup_obj,
3038 last_fixup_min_off)) {
3039 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3040 proc->pid, thread->pid);
3041 return_error = BR_FAILED_REPLY;
3042 return_error_param = -EINVAL;
3043 return_error_line = __LINE__;
3044 goto err_bad_parent;
3045 }
3046 ret = binder_translate_fd_array(fda, parent, t, thread,
3047 in_reply_to);
3048 if (ret < 0) {
3049 return_error = BR_FAILED_REPLY;
3050 return_error_param = ret;
3051 return_error_line = __LINE__;
3052 goto err_translate_failed;
3053 }
3054 last_fixup_obj = parent;
3055 last_fixup_min_off =
3056 fda->parent_offset + sizeof(u32) * fda->num_fds;
3057 } break;
3058 case BINDER_TYPE_PTR: {
3059 struct binder_buffer_object *bp =
3060 to_binder_buffer_object(hdr);
3061 size_t buf_left = sg_buf_end - sg_bufp;
3062
3063 if (bp->length > buf_left) {
3064 binder_user_error("%d:%d got transaction with too large buffer\n",
3065 proc->pid, thread->pid);
3066 return_error = BR_FAILED_REPLY;
3067 return_error_param = -EINVAL;
3068 return_error_line = __LINE__;
3069 goto err_bad_offset;
3070 }
3071 if (copy_from_user(sg_bufp,
3072 (const void __user *)(uintptr_t)
3073 bp->buffer, bp->length)) {
3074 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3075 proc->pid, thread->pid);
3076 return_error_param = -EFAULT;
3077 return_error = BR_FAILED_REPLY;
3078 return_error_line = __LINE__;
3079 goto err_copy_data_failed;
3080 }
3081 /* Fixup buffer pointer to target proc address space */
3082 bp->buffer = (uintptr_t)sg_bufp +
3083 binder_alloc_get_user_buffer_offset(
3084 &target_proc->alloc);
3085 sg_bufp += ALIGN(bp->length, sizeof(u64));
3086
3087 ret = binder_fixup_parent(t, thread, bp, off_start,
3088 offp - off_start,
3089 last_fixup_obj,
3090 last_fixup_min_off);
3091 if (ret < 0) {
3092 return_error = BR_FAILED_REPLY;
3093 return_error_param = ret;
3094 return_error_line = __LINE__;
3095 goto err_translate_failed;
3096 }
3097 last_fixup_obj = bp;
3098 last_fixup_min_off = 0;
3099 } break;
3100 default:
3101 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3102 proc->pid, thread->pid, hdr->type);
3103 return_error = BR_FAILED_REPLY;
3104 return_error_param = -EINVAL;
3105 return_error_line = __LINE__;
3106 goto err_bad_object_type;
3107 }
3108 }
3109 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3110 binder_enqueue_work(proc, tcomplete, &thread->todo);
3111 t->work.type = BINDER_WORK_TRANSACTION;
3112
3113 if (reply) {
3114 binder_inner_proc_lock(target_proc);
3115 if (target_thread->is_dead) {
3116 binder_inner_proc_unlock(target_proc);
3117 goto err_dead_proc_or_thread;
3118 }
3119 BUG_ON(t->buffer->async_transaction != 0);
3120 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3121 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3122 binder_inner_proc_unlock(target_proc);
3123 wake_up_interruptible_sync(&target_thread->wait);
3124 binder_free_transaction(in_reply_to);
3125 } else if (!(t->flags & TF_ONE_WAY)) {
3126 BUG_ON(t->buffer->async_transaction != 0);
3127 binder_inner_proc_lock(proc);
3128 t->need_reply = 1;
3129 t->from_parent = thread->transaction_stack;
3130 thread->transaction_stack = t;
3131 binder_inner_proc_unlock(proc);
3132 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3133 binder_inner_proc_lock(proc);
3134 binder_pop_transaction_ilocked(thread, t);
3135 binder_inner_proc_unlock(proc);
3136 goto err_dead_proc_or_thread;
3137 }
3138 } else {
3139 BUG_ON(target_node == NULL);
3140 BUG_ON(t->buffer->async_transaction != 1);
3141 if (!binder_proc_transaction(t, target_proc, NULL))
3142 goto err_dead_proc_or_thread;
3143 }
3144 if (target_thread)
3145 binder_thread_dec_tmpref(target_thread);
3146 binder_proc_dec_tmpref(target_proc);
3147 if (target_node)
3148 binder_dec_node_tmpref(target_node);
3149 /*
3150 * write barrier to synchronize with initialization
3151 * of log entry
3152 */
3153 smp_wmb();
3154 WRITE_ONCE(e->debug_id_done, t_debug_id);
3155 return;
3156
3157 err_dead_proc_or_thread:
3158 return_error = BR_DEAD_REPLY;
3159 return_error_line = __LINE__;
3160 binder_dequeue_work(proc, tcomplete);
3161 err_translate_failed:
3162 err_bad_object_type:
3163 err_bad_offset:
3164 err_bad_parent:
3165 err_copy_data_failed:
3166 trace_binder_transaction_failed_buffer_release(t->buffer);
3167 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3168 if (target_node)
3169 binder_dec_node_tmpref(target_node);
3170 target_node = NULL;
3171 t->buffer->transaction = NULL;
3172 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3173 err_binder_alloc_buf_failed:
3174 kfree(tcomplete);
3175 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3176 err_alloc_tcomplete_failed:
3177 kfree(t);
3178 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3179 err_alloc_t_failed:
3180 err_bad_call_stack:
3181 err_empty_call_stack:
3182 err_dead_binder:
3183 err_invalid_target_handle:
3184 if (target_thread)
3185 binder_thread_dec_tmpref(target_thread);
3186 if (target_proc)
3187 binder_proc_dec_tmpref(target_proc);
3188 if (target_node) {
3189 binder_dec_node(target_node, 1, 0);
3190 binder_dec_node_tmpref(target_node);
3191 }
3192
3193 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3194 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3195 proc->pid, thread->pid, return_error, return_error_param,
3196 (u64)tr->data_size, (u64)tr->offsets_size,
3197 return_error_line);
3198
3199 {
3200 struct binder_transaction_log_entry *fe;
3201
3202 e->return_error = return_error;
3203 e->return_error_param = return_error_param;
3204 e->return_error_line = return_error_line;
3205 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3206 *fe = *e;
3207 /*
3208 * write barrier to synchronize with initialization
3209 * of log entry
3210 */
3211 smp_wmb();
3212 WRITE_ONCE(e->debug_id_done, t_debug_id);
3213 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3214 }
3215
3216 BUG_ON(thread->return_error.cmd != BR_OK);
3217 if (in_reply_to) {
3218 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3219 binder_enqueue_work(thread->proc,
3220 &thread->return_error.work,
3221 &thread->todo);
3222 binder_send_failed_reply(in_reply_to, return_error);
3223 } else {
3224 thread->return_error.cmd = return_error;
3225 binder_enqueue_work(thread->proc,
3226 &thread->return_error.work,
3227 &thread->todo);
3228 }
3229 }
3230
3231 static int binder_thread_write(struct binder_proc *proc,
3232 struct binder_thread *thread,
3233 binder_uintptr_t binder_buffer, size_t size,
3234 binder_size_t *consumed)
3235 {
3236 uint32_t cmd;
3237 struct binder_context *context = proc->context;
3238 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3239 void __user *ptr = buffer + *consumed;
3240 void __user *end = buffer + size;
3241
3242 while (ptr < end && thread->return_error.cmd == BR_OK) {
3243 int ret;
3244
3245 if (get_user(cmd, (uint32_t __user *)ptr))
3246 return -EFAULT;
3247 ptr += sizeof(uint32_t);
3248 trace_binder_command(cmd);
3249 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3250 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3251 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3252 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3253 }
3254 switch (cmd) {
3255 case BC_INCREFS:
3256 case BC_ACQUIRE:
3257 case BC_RELEASE:
3258 case BC_DECREFS: {
3259 uint32_t target;
3260 const char *debug_string;
3261 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3262 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3263 struct binder_ref_data rdata;
3264
3265 if (get_user(target, (uint32_t __user *)ptr))
3266 return -EFAULT;
3267
3268 ptr += sizeof(uint32_t);
3269 ret = -1;
3270 if (increment && !target) {
3271 struct binder_node *ctx_mgr_node;
3272 mutex_lock(&context->context_mgr_node_lock);
3273 ctx_mgr_node = context->binder_context_mgr_node;
3274 if (ctx_mgr_node)
3275 ret = binder_inc_ref_for_node(
3276 proc, ctx_mgr_node,
3277 strong, NULL, &rdata);
3278 mutex_unlock(&context->context_mgr_node_lock);
3279 }
3280 if (ret)
3281 ret = binder_update_ref_for_handle(
3282 proc, target, increment, strong,
3283 &rdata);
3284 if (!ret && rdata.desc != target) {
3285 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3286 proc->pid, thread->pid,
3287 target, rdata.desc);
3288 }
3289 switch (cmd) {
3290 case BC_INCREFS:
3291 debug_string = "IncRefs";
3292 break;
3293 case BC_ACQUIRE:
3294 debug_string = "Acquire";
3295 break;
3296 case BC_RELEASE:
3297 debug_string = "Release";
3298 break;
3299 case BC_DECREFS:
3300 default:
3301 debug_string = "DecRefs";
3302 break;
3303 }
3304 if (ret) {
3305 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3306 proc->pid, thread->pid, debug_string,
3307 strong, target, ret);
3308 break;
3309 }
3310 binder_debug(BINDER_DEBUG_USER_REFS,
3311 "%d:%d %s ref %d desc %d s %d w %d\n",
3312 proc->pid, thread->pid, debug_string,
3313 rdata.debug_id, rdata.desc, rdata.strong,
3314 rdata.weak);
3315 break;
3316 }
3317 case BC_INCREFS_DONE:
3318 case BC_ACQUIRE_DONE: {
3319 binder_uintptr_t node_ptr;
3320 binder_uintptr_t cookie;
3321 struct binder_node *node;
3322 bool free_node;
3323
3324 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3325 return -EFAULT;
3326 ptr += sizeof(binder_uintptr_t);
3327 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3328 return -EFAULT;
3329 ptr += sizeof(binder_uintptr_t);
3330 node = binder_get_node(proc, node_ptr);
3331 if (node == NULL) {
3332 binder_user_error("%d:%d %s u%016llx no match\n",
3333 proc->pid, thread->pid,
3334 cmd == BC_INCREFS_DONE ?
3335 "BC_INCREFS_DONE" :
3336 "BC_ACQUIRE_DONE",
3337 (u64)node_ptr);
3338 break;
3339 }
3340 if (cookie != node->cookie) {
3341 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3342 proc->pid, thread->pid,
3343 cmd == BC_INCREFS_DONE ?
3344 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3345 (u64)node_ptr, node->debug_id,
3346 (u64)cookie, (u64)node->cookie);
3347 binder_put_node(node);
3348 break;
3349 }
3350 binder_node_inner_lock(node);
3351 if (cmd == BC_ACQUIRE_DONE) {
3352 if (node->pending_strong_ref == 0) {
3353 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3354 proc->pid, thread->pid,
3355 node->debug_id);
3356 binder_node_inner_unlock(node);
3357 binder_put_node(node);
3358 break;
3359 }
3360 node->pending_strong_ref = 0;
3361 } else {
3362 if (node->pending_weak_ref == 0) {
3363 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3364 proc->pid, thread->pid,
3365 node->debug_id);
3366 binder_node_inner_unlock(node);
3367 binder_put_node(node);
3368 break;
3369 }
3370 node->pending_weak_ref = 0;
3371 }
3372 free_node = binder_dec_node_nilocked(node,
3373 cmd == BC_ACQUIRE_DONE, 0);
3374 WARN_ON(free_node);
3375 binder_debug(BINDER_DEBUG_USER_REFS,
3376 "%d:%d %s node %d ls %d lw %d tr %d\n",
3377 proc->pid, thread->pid,
3378 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3379 node->debug_id, node->local_strong_refs,
3380 node->local_weak_refs, node->tmp_refs);
3381 binder_node_inner_unlock(node);
3382 binder_put_node(node);
3383 break;
3384 }
3385 case BC_ATTEMPT_ACQUIRE:
3386 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3387 return -EINVAL;
3388 case BC_ACQUIRE_RESULT:
3389 pr_err("BC_ACQUIRE_RESULT not supported\n");
3390 return -EINVAL;
3391
3392 case BC_FREE_BUFFER: {
3393 binder_uintptr_t data_ptr;
3394 struct binder_buffer *buffer;
3395
3396 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3397 return -EFAULT;
3398 ptr += sizeof(binder_uintptr_t);
3399
3400 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3401 data_ptr);
3402 if (buffer == NULL) {
3403 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3404 proc->pid, thread->pid, (u64)data_ptr);
3405 break;
3406 }
3407 if (!buffer->allow_user_free) {
3408 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3409 proc->pid, thread->pid, (u64)data_ptr);
3410 break;
3411 }
3412 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3413 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3414 proc->pid, thread->pid, (u64)data_ptr,
3415 buffer->debug_id,
3416 buffer->transaction ? "active" : "finished");
3417
3418 if (buffer->transaction) {
3419 buffer->transaction->buffer = NULL;
3420 buffer->transaction = NULL;
3421 }
3422 if (buffer->async_transaction && buffer->target_node) {
3423 struct binder_node *buf_node;
3424 struct binder_work *w;
3425
3426 buf_node = buffer->target_node;
3427 binder_node_inner_lock(buf_node);
3428 BUG_ON(!buf_node->has_async_transaction);
3429 BUG_ON(buf_node->proc != proc);
3430 w = binder_dequeue_work_head_ilocked(
3431 &buf_node->async_todo);
3432 if (!w) {
3433 buf_node->has_async_transaction = 0;
3434 } else {
3435 binder_enqueue_work_ilocked(
3436 w, &proc->todo);
3437 binder_wakeup_proc_ilocked(proc);
3438 }
3439 binder_node_inner_unlock(buf_node);
3440 }
3441 trace_binder_transaction_buffer_release(buffer);
3442 binder_transaction_buffer_release(proc, buffer, NULL);
3443 binder_alloc_free_buf(&proc->alloc, buffer);
3444 break;
3445 }
3446
3447 case BC_TRANSACTION_SG:
3448 case BC_REPLY_SG: {
3449 struct binder_transaction_data_sg tr;
3450
3451 if (copy_from_user(&tr, ptr, sizeof(tr)))
3452 return -EFAULT;
3453 ptr += sizeof(tr);
3454 binder_transaction(proc, thread, &tr.transaction_data,
3455 cmd == BC_REPLY_SG, tr.buffers_size);
3456 break;
3457 }
3458 case BC_TRANSACTION:
3459 case BC_REPLY: {
3460 struct binder_transaction_data tr;
3461
3462 if (copy_from_user(&tr, ptr, sizeof(tr)))
3463 return -EFAULT;
3464 ptr += sizeof(tr);
3465 binder_transaction(proc, thread, &tr,
3466 cmd == BC_REPLY, 0);
3467 break;
3468 }
3469
3470 case BC_REGISTER_LOOPER:
3471 binder_debug(BINDER_DEBUG_THREADS,
3472 "%d:%d BC_REGISTER_LOOPER\n",
3473 proc->pid, thread->pid);
3474 binder_inner_proc_lock(proc);
3475 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3476 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3477 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3478 proc->pid, thread->pid);
3479 } else if (proc->requested_threads == 0) {
3480 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3481 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3482 proc->pid, thread->pid);
3483 } else {
3484 proc->requested_threads--;
3485 proc->requested_threads_started++;
3486 }
3487 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3488 binder_inner_proc_unlock(proc);
3489 break;
3490 case BC_ENTER_LOOPER:
3491 binder_debug(BINDER_DEBUG_THREADS,
3492 "%d:%d BC_ENTER_LOOPER\n",
3493 proc->pid, thread->pid);
3494 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3495 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3496 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3497 proc->pid, thread->pid);
3498 }
3499 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3500 break;
3501 case BC_EXIT_LOOPER:
3502 binder_debug(BINDER_DEBUG_THREADS,
3503 "%d:%d BC_EXIT_LOOPER\n",
3504 proc->pid, thread->pid);
3505 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3506 break;
3507
3508 case BC_REQUEST_DEATH_NOTIFICATION:
3509 case BC_CLEAR_DEATH_NOTIFICATION: {
3510 uint32_t target;
3511 binder_uintptr_t cookie;
3512 struct binder_ref *ref;
3513 struct binder_ref_death *death = NULL;
3514
3515 if (get_user(target, (uint32_t __user *)ptr))
3516 return -EFAULT;
3517 ptr += sizeof(uint32_t);
3518 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3519 return -EFAULT;
3520 ptr += sizeof(binder_uintptr_t);
3521 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3522 /*
3523 * Allocate memory for death notification
3524 * before taking lock
3525 */
3526 death = kzalloc(sizeof(*death), GFP_KERNEL);
3527 if (death == NULL) {
3528 WARN_ON(thread->return_error.cmd !=
3529 BR_OK);
3530 thread->return_error.cmd = BR_ERROR;
3531 binder_enqueue_work(
3532 thread->proc,
3533 &thread->return_error.work,
3534 &thread->todo);
3535 binder_debug(
3536 BINDER_DEBUG_FAILED_TRANSACTION,
3537 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3538 proc->pid, thread->pid);
3539 break;
3540 }
3541 }
3542 binder_proc_lock(proc);
3543 ref = binder_get_ref_olocked(proc, target, false);
3544 if (ref == NULL) {
3545 binder_user_error("%d:%d %s invalid ref %d\n",
3546 proc->pid, thread->pid,
3547 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3548 "BC_REQUEST_DEATH_NOTIFICATION" :
3549 "BC_CLEAR_DEATH_NOTIFICATION",
3550 target);
3551 binder_proc_unlock(proc);
3552 kfree(death);
3553 break;
3554 }
3555
3556 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3557 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3558 proc->pid, thread->pid,
3559 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3560 "BC_REQUEST_DEATH_NOTIFICATION" :
3561 "BC_CLEAR_DEATH_NOTIFICATION",
3562 (u64)cookie, ref->data.debug_id,
3563 ref->data.desc, ref->data.strong,
3564 ref->data.weak, ref->node->debug_id);
3565
3566 binder_node_lock(ref->node);
3567 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3568 if (ref->death) {
3569 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3570 proc->pid, thread->pid);
3571 binder_node_unlock(ref->node);
3572 binder_proc_unlock(proc);
3573 kfree(death);
3574 break;
3575 }
3576 binder_stats_created(BINDER_STAT_DEATH);
3577 INIT_LIST_HEAD(&death->work.entry);
3578 death->cookie = cookie;
3579 ref->death = death;
3580 if (ref->node->proc == NULL) {
3581 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3582
3583 binder_inner_proc_lock(proc);
3584 binder_enqueue_work_ilocked(
3585 &ref->death->work, &proc->todo);
3586 binder_wakeup_proc_ilocked(proc);
3587 binder_inner_proc_unlock(proc);
3588 }
3589 } else {
3590 if (ref->death == NULL) {
3591 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3592 proc->pid, thread->pid);
3593 binder_node_unlock(ref->node);
3594 binder_proc_unlock(proc);
3595 break;
3596 }
3597 death = ref->death;
3598 if (death->cookie != cookie) {
3599 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3600 proc->pid, thread->pid,
3601 (u64)death->cookie,
3602 (u64)cookie);
3603 binder_node_unlock(ref->node);
3604 binder_proc_unlock(proc);
3605 break;
3606 }
3607 ref->death = NULL;
3608 binder_inner_proc_lock(proc);
3609 if (list_empty(&death->work.entry)) {
3610 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3611 if (thread->looper &
3612 (BINDER_LOOPER_STATE_REGISTERED |
3613 BINDER_LOOPER_STATE_ENTERED))
3614 binder_enqueue_work_ilocked(
3615 &death->work,
3616 &thread->todo);
3617 else {
3618 binder_enqueue_work_ilocked(
3619 &death->work,
3620 &proc->todo);
3621 binder_wakeup_proc_ilocked(
3622 proc);
3623 }
3624 } else {
3625 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3626 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3627 }
3628 binder_inner_proc_unlock(proc);
3629 }
3630 binder_node_unlock(ref->node);
3631 binder_proc_unlock(proc);
3632 } break;
3633 case BC_DEAD_BINDER_DONE: {
3634 struct binder_work *w;
3635 binder_uintptr_t cookie;
3636 struct binder_ref_death *death = NULL;
3637
3638 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3639 return -EFAULT;
3640
3641 ptr += sizeof(cookie);
3642 binder_inner_proc_lock(proc);
3643 list_for_each_entry(w, &proc->delivered_death,
3644 entry) {
3645 struct binder_ref_death *tmp_death =
3646 container_of(w,
3647 struct binder_ref_death,
3648 work);
3649
3650 if (tmp_death->cookie == cookie) {
3651 death = tmp_death;
3652 break;
3653 }
3654 }
3655 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3656 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3657 proc->pid, thread->pid, (u64)cookie,
3658 death);
3659 if (death == NULL) {
3660 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3661 proc->pid, thread->pid, (u64)cookie);
3662 binder_inner_proc_unlock(proc);
3663 break;
3664 }
3665 binder_dequeue_work_ilocked(&death->work);
3666 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3667 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3668 if (thread->looper &
3669 (BINDER_LOOPER_STATE_REGISTERED |
3670 BINDER_LOOPER_STATE_ENTERED))
3671 binder_enqueue_work_ilocked(
3672 &death->work, &thread->todo);
3673 else {
3674 binder_enqueue_work_ilocked(
3675 &death->work,
3676 &proc->todo);
3677 binder_wakeup_proc_ilocked(proc);
3678 }
3679 }
3680 binder_inner_proc_unlock(proc);
3681 } break;
3682
3683 default:
3684 pr_err("%d:%d unknown command %d\n",
3685 proc->pid, thread->pid, cmd);
3686 return -EINVAL;
3687 }
3688 *consumed = ptr - buffer;
3689 }
3690 return 0;
3691 }
3692
3693 static void binder_stat_br(struct binder_proc *proc,
3694 struct binder_thread *thread, uint32_t cmd)
3695 {
3696 trace_binder_return(cmd);
3697 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3698 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3699 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3700 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3701 }
3702 }
3703
3704 static int binder_put_node_cmd(struct binder_proc *proc,
3705 struct binder_thread *thread,
3706 void __user **ptrp,
3707 binder_uintptr_t node_ptr,
3708 binder_uintptr_t node_cookie,
3709 int node_debug_id,
3710 uint32_t cmd, const char *cmd_name)
3711 {
3712 void __user *ptr = *ptrp;
3713
3714 if (put_user(cmd, (uint32_t __user *)ptr))
3715 return -EFAULT;
3716 ptr += sizeof(uint32_t);
3717
3718 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3719 return -EFAULT;
3720 ptr += sizeof(binder_uintptr_t);
3721
3722 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3723 return -EFAULT;
3724 ptr += sizeof(binder_uintptr_t);
3725
3726 binder_stat_br(proc, thread, cmd);
3727 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3728 proc->pid, thread->pid, cmd_name, node_debug_id,
3729 (u64)node_ptr, (u64)node_cookie);
3730
3731 *ptrp = ptr;
3732 return 0;
3733 }
3734
3735 static int binder_wait_for_work(struct binder_thread *thread,
3736 bool do_proc_work)
3737 {
3738 DEFINE_WAIT(wait);
3739 struct binder_proc *proc = thread->proc;
3740 int ret = 0;
3741
3742 freezer_do_not_count();
3743 binder_inner_proc_lock(proc);
3744 for (;;) {
3745 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3746 if (binder_has_work_ilocked(thread, do_proc_work))
3747 break;
3748 if (do_proc_work)
3749 list_add(&thread->waiting_thread_node,
3750 &proc->waiting_threads);
3751 binder_inner_proc_unlock(proc);
3752 schedule();
3753 binder_inner_proc_lock(proc);
3754 list_del_init(&thread->waiting_thread_node);
3755 if (signal_pending(current)) {
3756 ret = -ERESTARTSYS;
3757 break;
3758 }
3759 }
3760 finish_wait(&thread->wait, &wait);
3761 binder_inner_proc_unlock(proc);
3762 freezer_count();
3763
3764 return ret;
3765 }
3766
3767 static int binder_thread_read(struct binder_proc *proc,
3768 struct binder_thread *thread,
3769 binder_uintptr_t binder_buffer, size_t size,
3770 binder_size_t *consumed, int non_block)
3771 {
3772 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3773 void __user *ptr = buffer + *consumed;
3774 void __user *end = buffer + size;
3775
3776 int ret = 0;
3777 int wait_for_proc_work;
3778
3779 if (*consumed == 0) {
3780 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3781 return -EFAULT;
3782 ptr += sizeof(uint32_t);
3783 }
3784
3785 retry:
3786 binder_inner_proc_lock(proc);
3787 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3788 binder_inner_proc_unlock(proc);
3789
3790 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3791
3792 trace_binder_wait_for_work(wait_for_proc_work,
3793 !!thread->transaction_stack,
3794 !binder_worklist_empty(proc, &thread->todo));
3795 if (wait_for_proc_work) {
3796 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3797 BINDER_LOOPER_STATE_ENTERED))) {
3798 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3799 proc->pid, thread->pid, thread->looper);
3800 wait_event_interruptible(binder_user_error_wait,
3801 binder_stop_on_user_error < 2);
3802 }
3803 binder_set_nice(proc->default_priority);
3804 }
3805
3806 if (non_block) {
3807 if (!binder_has_work(thread, wait_for_proc_work))
3808 ret = -EAGAIN;
3809 } else {
3810 ret = binder_wait_for_work(thread, wait_for_proc_work);
3811 }
3812
3813 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3814
3815 if (ret)
3816 return ret;
3817
3818 while (1) {
3819 uint32_t cmd;
3820 struct binder_transaction_data tr;
3821 struct binder_work *w = NULL;
3822 struct list_head *list = NULL;
3823 struct binder_transaction *t = NULL;
3824 struct binder_thread *t_from;
3825
3826 binder_inner_proc_lock(proc);
3827 if (!binder_worklist_empty_ilocked(&thread->todo))
3828 list = &thread->todo;
3829 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3830 wait_for_proc_work)
3831 list = &proc->todo;
3832 else {
3833 binder_inner_proc_unlock(proc);
3834
3835 /* no data added */
3836 if (ptr - buffer == 4 && !thread->looper_need_return)
3837 goto retry;
3838 break;
3839 }
3840
3841 if (end - ptr < sizeof(tr) + 4) {
3842 binder_inner_proc_unlock(proc);
3843 break;
3844 }
3845 w = binder_dequeue_work_head_ilocked(list);
3846
3847 switch (w->type) {
3848 case BINDER_WORK_TRANSACTION: {
3849 binder_inner_proc_unlock(proc);
3850 t = container_of(w, struct binder_transaction, work);
3851 } break;
3852 case BINDER_WORK_RETURN_ERROR: {
3853 struct binder_error *e = container_of(
3854 w, struct binder_error, work);
3855
3856 WARN_ON(e->cmd == BR_OK);
3857 binder_inner_proc_unlock(proc);
3858 if (put_user(e->cmd, (uint32_t __user *)ptr))
3859 return -EFAULT;
3860 e->cmd = BR_OK;
3861 ptr += sizeof(uint32_t);
3862
3863 binder_stat_br(proc, thread, e->cmd);
3864 } break;
3865 case BINDER_WORK_TRANSACTION_COMPLETE: {
3866 binder_inner_proc_unlock(proc);
3867 cmd = BR_TRANSACTION_COMPLETE;
3868 if (put_user(cmd, (uint32_t __user *)ptr))
3869 return -EFAULT;
3870 ptr += sizeof(uint32_t);
3871
3872 binder_stat_br(proc, thread, cmd);
3873 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3874 "%d:%d BR_TRANSACTION_COMPLETE\n",
3875 proc->pid, thread->pid);
3876 kfree(w);
3877 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3878 } break;
3879 case BINDER_WORK_NODE: {
3880 struct binder_node *node = container_of(w, struct binder_node, work);
3881 int strong, weak;
3882 binder_uintptr_t node_ptr = node->ptr;
3883 binder_uintptr_t node_cookie = node->cookie;
3884 int node_debug_id = node->debug_id;
3885 int has_weak_ref;
3886 int has_strong_ref;
3887 void __user *orig_ptr = ptr;
3888
3889 BUG_ON(proc != node->proc);
3890 strong = node->internal_strong_refs ||
3891 node->local_strong_refs;
3892 weak = !hlist_empty(&node->refs) ||
3893 node->local_weak_refs ||
3894 node->tmp_refs || strong;
3895 has_strong_ref = node->has_strong_ref;
3896 has_weak_ref = node->has_weak_ref;
3897
3898 if (weak && !has_weak_ref) {
3899 node->has_weak_ref = 1;
3900 node->pending_weak_ref = 1;
3901 node->local_weak_refs++;
3902 }
3903 if (strong && !has_strong_ref) {
3904 node->has_strong_ref = 1;
3905 node->pending_strong_ref = 1;
3906 node->local_strong_refs++;
3907 }
3908 if (!strong && has_strong_ref)
3909 node->has_strong_ref = 0;
3910 if (!weak && has_weak_ref)
3911 node->has_weak_ref = 0;
3912 if (!weak && !strong) {
3913 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3914 "%d:%d node %d u%016llx c%016llx deleted\n",
3915 proc->pid, thread->pid,
3916 node_debug_id,
3917 (u64)node_ptr,
3918 (u64)node_cookie);
3919 rb_erase(&node->rb_node, &proc->nodes);
3920 binder_inner_proc_unlock(proc);
3921 binder_node_lock(node);
3922 /*
3923 * Acquire the node lock before freeing the
3924 * node to serialize with other threads that
3925 * may have been holding the node lock while
3926 * decrementing this node (avoids race where
3927 * this thread frees while the other thread
3928 * is unlocking the node after the final
3929 * decrement)
3930 */
3931 binder_node_unlock(node);
3932 binder_free_node(node);
3933 } else
3934 binder_inner_proc_unlock(proc);
3935
3936 if (weak && !has_weak_ref)
3937 ret = binder_put_node_cmd(
3938 proc, thread, &ptr, node_ptr,
3939 node_cookie, node_debug_id,
3940 BR_INCREFS, "BR_INCREFS");
3941 if (!ret && strong && !has_strong_ref)
3942 ret = binder_put_node_cmd(
3943 proc, thread, &ptr, node_ptr,
3944 node_cookie, node_debug_id,
3945 BR_ACQUIRE, "BR_ACQUIRE");
3946 if (!ret && !strong && has_strong_ref)
3947 ret = binder_put_node_cmd(
3948 proc, thread, &ptr, node_ptr,
3949 node_cookie, node_debug_id,
3950 BR_RELEASE, "BR_RELEASE");
3951 if (!ret && !weak && has_weak_ref)
3952 ret = binder_put_node_cmd(
3953 proc, thread, &ptr, node_ptr,
3954 node_cookie, node_debug_id,
3955 BR_DECREFS, "BR_DECREFS");
3956 if (orig_ptr == ptr)
3957 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3958 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3959 proc->pid, thread->pid,
3960 node_debug_id,
3961 (u64)node_ptr,
3962 (u64)node_cookie);
3963 if (ret)
3964 return ret;
3965 } break;
3966 case BINDER_WORK_DEAD_BINDER:
3967 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3968 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3969 struct binder_ref_death *death;
3970 uint32_t cmd;
3971 binder_uintptr_t cookie;
3972
3973 death = container_of(w, struct binder_ref_death, work);
3974 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3975 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3976 else
3977 cmd = BR_DEAD_BINDER;
3978 cookie = death->cookie;
3979
3980 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3981 "%d:%d %s %016llx\n",
3982 proc->pid, thread->pid,
3983 cmd == BR_DEAD_BINDER ?
3984 "BR_DEAD_BINDER" :
3985 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3986 (u64)cookie);
3987 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3988 binder_inner_proc_unlock(proc);
3989 kfree(death);
3990 binder_stats_deleted(BINDER_STAT_DEATH);
3991 } else {
3992 binder_enqueue_work_ilocked(
3993 w, &proc->delivered_death);
3994 binder_inner_proc_unlock(proc);
3995 }
3996 if (put_user(cmd, (uint32_t __user *)ptr))
3997 return -EFAULT;
3998 ptr += sizeof(uint32_t);
3999 if (put_user(cookie,
4000 (binder_uintptr_t __user *)ptr))
4001 return -EFAULT;
4002 ptr += sizeof(binder_uintptr_t);
4003 binder_stat_br(proc, thread, cmd);
4004 if (cmd == BR_DEAD_BINDER)
4005 goto done; /* DEAD_BINDER notifications can cause transactions */
4006 } break;
4007 }
4008
4009 if (!t)
4010 continue;
4011
4012 BUG_ON(t->buffer == NULL);
4013 if (t->buffer->target_node) {
4014 struct binder_node *target_node = t->buffer->target_node;
4015
4016 tr.target.ptr = target_node->ptr;
4017 tr.cookie = target_node->cookie;
4018 t->saved_priority = task_nice(current);
4019 if (t->priority < target_node->min_priority &&
4020 !(t->flags & TF_ONE_WAY))
4021 binder_set_nice(t->priority);
4022 else if (!(t->flags & TF_ONE_WAY) ||
4023 t->saved_priority > target_node->min_priority)
4024 binder_set_nice(target_node->min_priority);
4025 cmd = BR_TRANSACTION;
4026 } else {
4027 tr.target.ptr = 0;
4028 tr.cookie = 0;
4029 cmd = BR_REPLY;
4030 }
4031 tr.code = t->code;
4032 tr.flags = t->flags;
4033 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4034
4035 t_from = binder_get_txn_from(t);
4036 if (t_from) {
4037 struct task_struct *sender = t_from->proc->tsk;
4038
4039 tr.sender_pid = task_tgid_nr_ns(sender,
4040 task_active_pid_ns(current));
4041 } else {
4042 tr.sender_pid = 0;
4043 }
4044
4045 tr.data_size = t->buffer->data_size;
4046 tr.offsets_size = t->buffer->offsets_size;
4047 tr.data.ptr.buffer = (binder_uintptr_t)
4048 ((uintptr_t)t->buffer->data +
4049 binder_alloc_get_user_buffer_offset(&proc->alloc));
4050 tr.data.ptr.offsets = tr.data.ptr.buffer +
4051 ALIGN(t->buffer->data_size,
4052 sizeof(void *));
4053
4054 if (put_user(cmd, (uint32_t __user *)ptr)) {
4055 if (t_from)
4056 binder_thread_dec_tmpref(t_from);
4057
4058 binder_cleanup_transaction(t, "put_user failed",
4059 BR_FAILED_REPLY);
4060
4061 return -EFAULT;
4062 }
4063 ptr += sizeof(uint32_t);
4064 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4065 if (t_from)
4066 binder_thread_dec_tmpref(t_from);
4067
4068 binder_cleanup_transaction(t, "copy_to_user failed",
4069 BR_FAILED_REPLY);
4070
4071 return -EFAULT;
4072 }
4073 ptr += sizeof(tr);
4074
4075 trace_binder_transaction_received(t);
4076 binder_stat_br(proc, thread, cmd);
4077 binder_debug(BINDER_DEBUG_TRANSACTION,
4078 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4079 proc->pid, thread->pid,
4080 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4081 "BR_REPLY",
4082 t->debug_id, t_from ? t_from->proc->pid : 0,
4083 t_from ? t_from->pid : 0, cmd,
4084 t->buffer->data_size, t->buffer->offsets_size,
4085 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4086
4087 if (t_from)
4088 binder_thread_dec_tmpref(t_from);
4089 t->buffer->allow_user_free = 1;
4090 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4091 binder_inner_proc_lock(thread->proc);
4092 t->to_parent = thread->transaction_stack;
4093 t->to_thread = thread;
4094 thread->transaction_stack = t;
4095 binder_inner_proc_unlock(thread->proc);
4096 } else {
4097 binder_free_transaction(t);
4098 }
4099 break;
4100 }
4101
4102 done:
4103
4104 *consumed = ptr - buffer;
4105 binder_inner_proc_lock(proc);
4106 if (proc->requested_threads == 0 &&
4107 list_empty(&thread->proc->waiting_threads) &&
4108 proc->requested_threads_started < proc->max_threads &&
4109 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4110 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4111 /*spawn a new thread if we leave this out */) {
4112 proc->requested_threads++;
4113 binder_inner_proc_unlock(proc);
4114 binder_debug(BINDER_DEBUG_THREADS,
4115 "%d:%d BR_SPAWN_LOOPER\n",
4116 proc->pid, thread->pid);
4117 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4118 return -EFAULT;
4119 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4120 } else
4121 binder_inner_proc_unlock(proc);
4122 return 0;
4123 }
4124
4125 static void binder_release_work(struct binder_proc *proc,
4126 struct list_head *list)
4127 {
4128 struct binder_work *w;
4129
4130 while (1) {
4131 w = binder_dequeue_work_head(proc, list);
4132 if (!w)
4133 return;
4134
4135 switch (w->type) {
4136 case BINDER_WORK_TRANSACTION: {
4137 struct binder_transaction *t;
4138
4139 t = container_of(w, struct binder_transaction, work);
4140
4141 binder_cleanup_transaction(t, "process died.",
4142 BR_DEAD_REPLY);
4143 } break;
4144 case BINDER_WORK_RETURN_ERROR: {
4145 struct binder_error *e = container_of(
4146 w, struct binder_error, work);
4147
4148 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4149 "undelivered TRANSACTION_ERROR: %u\n",
4150 e->cmd);
4151 } break;
4152 case BINDER_WORK_TRANSACTION_COMPLETE: {
4153 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4154 "undelivered TRANSACTION_COMPLETE\n");
4155 kfree(w);
4156 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4157 } break;
4158 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4159 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4160 struct binder_ref_death *death;
4161
4162 death = container_of(w, struct binder_ref_death, work);
4163 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4164 "undelivered death notification, %016llx\n",
4165 (u64)death->cookie);
4166 kfree(death);
4167 binder_stats_deleted(BINDER_STAT_DEATH);
4168 } break;
4169 default:
4170 pr_err("unexpected work type, %d, not freed\n",
4171 w->type);
4172 break;
4173 }
4174 }
4175
4176 }
4177
4178 static struct binder_thread *binder_get_thread_ilocked(
4179 struct binder_proc *proc, struct binder_thread *new_thread)
4180 {
4181 struct binder_thread *thread = NULL;
4182 struct rb_node *parent = NULL;
4183 struct rb_node **p = &proc->threads.rb_node;
4184
4185 while (*p) {
4186 parent = *p;
4187 thread = rb_entry(parent, struct binder_thread, rb_node);
4188
4189 if (current->pid < thread->pid)
4190 p = &(*p)->rb_left;
4191 else if (current->pid > thread->pid)
4192 p = &(*p)->rb_right;
4193 else
4194 return thread;
4195 }
4196 if (!new_thread)
4197 return NULL;
4198 thread = new_thread;
4199 binder_stats_created(BINDER_STAT_THREAD);
4200 thread->proc = proc;
4201 thread->pid = current->pid;
4202 atomic_set(&thread->tmp_ref, 0);
4203 init_waitqueue_head(&thread->wait);
4204 INIT_LIST_HEAD(&thread->todo);
4205 rb_link_node(&thread->rb_node, parent, p);
4206 rb_insert_color(&thread->rb_node, &proc->threads);
4207 thread->looper_need_return = true;
4208 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4209 thread->return_error.cmd = BR_OK;
4210 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4211 thread->reply_error.cmd = BR_OK;
4212 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4213 return thread;
4214 }
4215
4216 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4217 {
4218 struct binder_thread *thread;
4219 struct binder_thread *new_thread;
4220
4221 binder_inner_proc_lock(proc);
4222 thread = binder_get_thread_ilocked(proc, NULL);
4223 binder_inner_proc_unlock(proc);
4224 if (!thread) {
4225 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4226 if (new_thread == NULL)
4227 return NULL;
4228 binder_inner_proc_lock(proc);
4229 thread = binder_get_thread_ilocked(proc, new_thread);
4230 binder_inner_proc_unlock(proc);
4231 if (thread != new_thread)
4232 kfree(new_thread);
4233 }
4234 return thread;
4235 }
4236
4237 static void binder_free_proc(struct binder_proc *proc)
4238 {
4239 BUG_ON(!list_empty(&proc->todo));
4240 BUG_ON(!list_empty(&proc->delivered_death));
4241 binder_alloc_deferred_release(&proc->alloc);
4242 put_task_struct(proc->tsk);
4243 binder_stats_deleted(BINDER_STAT_PROC);
4244 kfree(proc);
4245 }
4246
4247 static void binder_free_thread(struct binder_thread *thread)
4248 {
4249 BUG_ON(!list_empty(&thread->todo));
4250 binder_stats_deleted(BINDER_STAT_THREAD);
4251 binder_proc_dec_tmpref(thread->proc);
4252 kfree(thread);
4253 }
4254
4255 static int binder_thread_release(struct binder_proc *proc,
4256 struct binder_thread *thread)
4257 {
4258 struct binder_transaction *t;
4259 struct binder_transaction *send_reply = NULL;
4260 int active_transactions = 0;
4261 struct binder_transaction *last_t = NULL;
4262
4263 binder_inner_proc_lock(thread->proc);
4264 /*
4265 * take a ref on the proc so it survives
4266 * after we remove this thread from proc->threads.
4267 * The corresponding dec is when we actually
4268 * free the thread in binder_free_thread()
4269 */
4270 proc->tmp_ref++;
4271 /*
4272 * take a ref on this thread to ensure it
4273 * survives while we are releasing it
4274 */
4275 atomic_inc(&thread->tmp_ref);
4276 rb_erase(&thread->rb_node, &proc->threads);
4277 t = thread->transaction_stack;
4278 if (t) {
4279 spin_lock(&t->lock);
4280 if (t->to_thread == thread)
4281 send_reply = t;
4282 }
4283 thread->is_dead = true;
4284
4285 while (t) {
4286 last_t = t;
4287 active_transactions++;
4288 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4289 "release %d:%d transaction %d %s, still active\n",
4290 proc->pid, thread->pid,
4291 t->debug_id,
4292 (t->to_thread == thread) ? "in" : "out");
4293
4294 if (t->to_thread == thread) {
4295 t->to_proc = NULL;
4296 t->to_thread = NULL;
4297 if (t->buffer) {
4298 t->buffer->transaction = NULL;
4299 t->buffer = NULL;
4300 }
4301 t = t->to_parent;
4302 } else if (t->from == thread) {
4303 t->from = NULL;
4304 t = t->from_parent;
4305 } else
4306 BUG();
4307 spin_unlock(&last_t->lock);
4308 if (t)
4309 spin_lock(&t->lock);
4310 }
4311
4312 /*
4313 * If this thread used poll, make sure we remove the waitqueue
4314 * from any epoll data structures holding it with POLLFREE.
4315 * waitqueue_active() is safe to use here because we're holding
4316 * the inner lock.
4317 */
4318 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4319 waitqueue_active(&thread->wait)) {
4320 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4321 }
4322
4323 binder_inner_proc_unlock(thread->proc);
4324
4325 if (send_reply)
4326 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4327 binder_release_work(proc, &thread->todo);
4328 binder_thread_dec_tmpref(thread);
4329 return active_transactions;
4330 }
4331
4332 static unsigned int binder_poll(struct file *filp,
4333 struct poll_table_struct *wait)
4334 {
4335 struct binder_proc *proc = filp->private_data;
4336 struct binder_thread *thread = NULL;
4337 bool wait_for_proc_work;
4338
4339 thread = binder_get_thread(proc);
4340
4341 binder_inner_proc_lock(thread->proc);
4342 thread->looper |= BINDER_LOOPER_STATE_POLL;
4343 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4344
4345 binder_inner_proc_unlock(thread->proc);
4346
4347 poll_wait(filp, &thread->wait, wait);
4348
4349 if (binder_has_work(thread, wait_for_proc_work))
4350 return POLLIN;
4351
4352 return 0;
4353 }
4354
4355 static int binder_ioctl_write_read(struct file *filp,
4356 unsigned int cmd, unsigned long arg,
4357 struct binder_thread *thread)
4358 {
4359 int ret = 0;
4360 struct binder_proc *proc = filp->private_data;
4361 unsigned int size = _IOC_SIZE(cmd);
4362 void __user *ubuf = (void __user *)arg;
4363 struct binder_write_read bwr;
4364
4365 if (size != sizeof(struct binder_write_read)) {
4366 ret = -EINVAL;
4367 goto out;
4368 }
4369 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4370 ret = -EFAULT;
4371 goto out;
4372 }
4373 binder_debug(BINDER_DEBUG_READ_WRITE,
4374 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4375 proc->pid, thread->pid,
4376 (u64)bwr.write_size, (u64)bwr.write_buffer,
4377 (u64)bwr.read_size, (u64)bwr.read_buffer);
4378
4379 if (bwr.write_size > 0) {
4380 ret = binder_thread_write(proc, thread,
4381 bwr.write_buffer,
4382 bwr.write_size,
4383 &bwr.write_consumed);
4384 trace_binder_write_done(ret);
4385 if (ret < 0) {
4386 bwr.read_consumed = 0;
4387 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4388 ret = -EFAULT;
4389 goto out;
4390 }
4391 }
4392 if (bwr.read_size > 0) {
4393 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4394 bwr.read_size,
4395 &bwr.read_consumed,
4396 filp->f_flags & O_NONBLOCK);
4397 trace_binder_read_done(ret);
4398 binder_inner_proc_lock(proc);
4399 if (!binder_worklist_empty_ilocked(&proc->todo))
4400 binder_wakeup_proc_ilocked(proc);
4401 binder_inner_proc_unlock(proc);
4402 if (ret < 0) {
4403 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4404 ret = -EFAULT;
4405 goto out;
4406 }
4407 }
4408 binder_debug(BINDER_DEBUG_READ_WRITE,
4409 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4410 proc->pid, thread->pid,
4411 (u64)bwr.write_consumed, (u64)bwr.write_size,
4412 (u64)bwr.read_consumed, (u64)bwr.read_size);
4413 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4414 ret = -EFAULT;
4415 goto out;
4416 }
4417 out:
4418 return ret;
4419 }
4420
4421 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4422 {
4423 int ret = 0;
4424 struct binder_proc *proc = filp->private_data;
4425 struct binder_context *context = proc->context;
4426 struct binder_node *new_node;
4427 kuid_t curr_euid = current_euid();
4428
4429 mutex_lock(&context->context_mgr_node_lock);
4430 if (context->binder_context_mgr_node) {
4431 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4432 ret = -EBUSY;
4433 goto out;
4434 }
4435 ret = security_binder_set_context_mgr(proc->tsk);
4436 if (ret < 0)
4437 goto out;
4438 if (uid_valid(context->binder_context_mgr_uid)) {
4439 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4440 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4441 from_kuid(&init_user_ns, curr_euid),
4442 from_kuid(&init_user_ns,
4443 context->binder_context_mgr_uid));
4444 ret = -EPERM;
4445 goto out;
4446 }
4447 } else {
4448 context->binder_context_mgr_uid = curr_euid;
4449 }
4450 new_node = binder_new_node(proc, NULL);
4451 if (!new_node) {
4452 ret = -ENOMEM;
4453 goto out;
4454 }
4455 binder_node_lock(new_node);
4456 new_node->local_weak_refs++;
4457 new_node->local_strong_refs++;
4458 new_node->has_strong_ref = 1;
4459 new_node->has_weak_ref = 1;
4460 context->binder_context_mgr_node = new_node;
4461 binder_node_unlock(new_node);
4462 binder_put_node(new_node);
4463 out:
4464 mutex_unlock(&context->context_mgr_node_lock);
4465 return ret;
4466 }
4467
4468 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4469 struct binder_node_debug_info *info)
4470 {
4471 struct rb_node *n;
4472 binder_uintptr_t ptr = info->ptr;
4473
4474 memset(info, 0, sizeof(*info));
4475
4476 binder_inner_proc_lock(proc);
4477 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4478 struct binder_node *node = rb_entry(n, struct binder_node,
4479 rb_node);
4480 if (node->ptr > ptr) {
4481 info->ptr = node->ptr;
4482 info->cookie = node->cookie;
4483 info->has_strong_ref = node->has_strong_ref;
4484 info->has_weak_ref = node->has_weak_ref;
4485 break;
4486 }
4487 }
4488 binder_inner_proc_unlock(proc);
4489
4490 return 0;
4491 }
4492
4493 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4494 {
4495 int ret;
4496 struct binder_proc *proc = filp->private_data;
4497 struct binder_thread *thread;
4498 unsigned int size = _IOC_SIZE(cmd);
4499 void __user *ubuf = (void __user *)arg;
4500
4501 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4502 proc->pid, current->pid, cmd, arg);*/
4503
4504 binder_selftest_alloc(&proc->alloc);
4505
4506 trace_binder_ioctl(cmd, arg);
4507
4508 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4509 if (ret)
4510 goto err_unlocked;
4511
4512 thread = binder_get_thread(proc);
4513 if (thread == NULL) {
4514 ret = -ENOMEM;
4515 goto err;
4516 }
4517
4518 switch (cmd) {
4519 case BINDER_WRITE_READ:
4520 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4521 if (ret)
4522 goto err;
4523 break;
4524 case BINDER_SET_MAX_THREADS: {
4525 int max_threads;
4526
4527 if (copy_from_user(&max_threads, ubuf,
4528 sizeof(max_threads))) {
4529 ret = -EINVAL;
4530 goto err;
4531 }
4532 binder_inner_proc_lock(proc);
4533 proc->max_threads = max_threads;
4534 binder_inner_proc_unlock(proc);
4535 break;
4536 }
4537 case BINDER_SET_CONTEXT_MGR:
4538 ret = binder_ioctl_set_ctx_mgr(filp);
4539 if (ret)
4540 goto err;
4541 break;
4542 case BINDER_THREAD_EXIT:
4543 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4544 proc->pid, thread->pid);
4545 binder_thread_release(proc, thread);
4546 thread = NULL;
4547 break;
4548 case BINDER_VERSION: {
4549 struct binder_version __user *ver = ubuf;
4550
4551 if (size != sizeof(struct binder_version)) {
4552 ret = -EINVAL;
4553 goto err;
4554 }
4555 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4556 &ver->protocol_version)) {
4557 ret = -EINVAL;
4558 goto err;
4559 }
4560 break;
4561 }
4562 case BINDER_GET_NODE_DEBUG_INFO: {
4563 struct binder_node_debug_info info;
4564
4565 if (copy_from_user(&info, ubuf, sizeof(info))) {
4566 ret = -EFAULT;
4567 goto err;
4568 }
4569
4570 ret = binder_ioctl_get_node_debug_info(proc, &info);
4571 if (ret < 0)
4572 goto err;
4573
4574 if (copy_to_user(ubuf, &info, sizeof(info))) {
4575 ret = -EFAULT;
4576 goto err;
4577 }
4578 break;
4579 }
4580 default:
4581 ret = -EINVAL;
4582 goto err;
4583 }
4584 ret = 0;
4585 err:
4586 if (thread)
4587 thread->looper_need_return = false;
4588 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4589 if (ret && ret != -ERESTARTSYS)
4590 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4591 err_unlocked:
4592 trace_binder_ioctl_done(ret);
4593 return ret;
4594 }
4595
4596 static void binder_vma_open(struct vm_area_struct *vma)
4597 {
4598 struct binder_proc *proc = vma->vm_private_data;
4599
4600 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4601 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4602 proc->pid, vma->vm_start, vma->vm_end,
4603 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4604 (unsigned long)pgprot_val(vma->vm_page_prot));
4605 }
4606
4607 static void binder_vma_close(struct vm_area_struct *vma)
4608 {
4609 struct binder_proc *proc = vma->vm_private_data;
4610
4611 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4612 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4613 proc->pid, vma->vm_start, vma->vm_end,
4614 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4615 (unsigned long)pgprot_val(vma->vm_page_prot));
4616 binder_alloc_vma_close(&proc->alloc);
4617 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4618 }
4619
4620 static int binder_vm_fault(struct vm_fault *vmf)
4621 {
4622 return VM_FAULT_SIGBUS;
4623 }
4624
4625 static const struct vm_operations_struct binder_vm_ops = {
4626 .open = binder_vma_open,
4627 .close = binder_vma_close,
4628 .fault = binder_vm_fault,
4629 };
4630
4631 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4632 {
4633 int ret;
4634 struct binder_proc *proc = filp->private_data;
4635 const char *failure_string;
4636
4637 if (proc->tsk != current->group_leader)
4638 return -EINVAL;
4639
4640 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4641 vma->vm_end = vma->vm_start + SZ_4M;
4642
4643 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4644 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4645 __func__, proc->pid, vma->vm_start, vma->vm_end,
4646 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4647 (unsigned long)pgprot_val(vma->vm_page_prot));
4648
4649 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4650 ret = -EPERM;
4651 failure_string = "bad vm_flags";
4652 goto err_bad_arg;
4653 }
4654 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4655 vma->vm_ops = &binder_vm_ops;
4656 vma->vm_private_data = proc;
4657
4658 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4659 if (ret)
4660 return ret;
4661 mutex_lock(&proc->files_lock);
4662 proc->files = get_files_struct(current);
4663 mutex_unlock(&proc->files_lock);
4664 return 0;
4665
4666 err_bad_arg:
4667 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4668 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4669 return ret;
4670 }
4671
4672 static int binder_open(struct inode *nodp, struct file *filp)
4673 {
4674 struct binder_proc *proc;
4675 struct binder_device *binder_dev;
4676
4677 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4678 current->group_leader->pid, current->pid);
4679
4680 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4681 if (proc == NULL)
4682 return -ENOMEM;
4683 spin_lock_init(&proc->inner_lock);
4684 spin_lock_init(&proc->outer_lock);
4685 get_task_struct(current->group_leader);
4686 proc->tsk = current->group_leader;
4687 mutex_init(&proc->files_lock);
4688 INIT_LIST_HEAD(&proc->todo);
4689 proc->default_priority = task_nice(current);
4690 binder_dev = container_of(filp->private_data, struct binder_device,
4691 miscdev);
4692 proc->context = &binder_dev->context;
4693 binder_alloc_init(&proc->alloc);
4694
4695 binder_stats_created(BINDER_STAT_PROC);
4696 proc->pid = current->group_leader->pid;
4697 INIT_LIST_HEAD(&proc->delivered_death);
4698 INIT_LIST_HEAD(&proc->waiting_threads);
4699 filp->private_data = proc;
4700
4701 mutex_lock(&binder_procs_lock);
4702 hlist_add_head(&proc->proc_node, &binder_procs);
4703 mutex_unlock(&binder_procs_lock);
4704
4705 if (binder_debugfs_dir_entry_proc) {
4706 char strbuf[11];
4707
4708 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4709 /*
4710 * proc debug entries are shared between contexts, so
4711 * this will fail if the process tries to open the driver
4712 * again with a different context. The priting code will
4713 * anyway print all contexts that a given PID has, so this
4714 * is not a problem.
4715 */
4716 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4717 binder_debugfs_dir_entry_proc,
4718 (void *)(unsigned long)proc->pid,
4719 &binder_proc_fops);
4720 }
4721
4722 return 0;
4723 }
4724
4725 static int binder_flush(struct file *filp, fl_owner_t id)
4726 {
4727 struct binder_proc *proc = filp->private_data;
4728
4729 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4730
4731 return 0;
4732 }
4733
4734 static void binder_deferred_flush(struct binder_proc *proc)
4735 {
4736 struct rb_node *n;
4737 int wake_count = 0;
4738
4739 binder_inner_proc_lock(proc);
4740 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4741 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4742
4743 thread->looper_need_return = true;
4744 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4745 wake_up_interruptible(&thread->wait);
4746 wake_count++;
4747 }
4748 }
4749 binder_inner_proc_unlock(proc);
4750
4751 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4752 "binder_flush: %d woke %d threads\n", proc->pid,
4753 wake_count);
4754 }
4755
4756 static int binder_release(struct inode *nodp, struct file *filp)
4757 {
4758 struct binder_proc *proc = filp->private_data;
4759
4760 debugfs_remove(proc->debugfs_entry);
4761 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4762
4763 return 0;
4764 }
4765
4766 static int binder_node_release(struct binder_node *node, int refs)
4767 {
4768 struct binder_ref *ref;
4769 int death = 0;
4770 struct binder_proc *proc = node->proc;
4771
4772 binder_release_work(proc, &node->async_todo);
4773
4774 binder_node_lock(node);
4775 binder_inner_proc_lock(proc);
4776 binder_dequeue_work_ilocked(&node->work);
4777 /*
4778 * The caller must have taken a temporary ref on the node,
4779 */
4780 BUG_ON(!node->tmp_refs);
4781 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4782 binder_inner_proc_unlock(proc);
4783 binder_node_unlock(node);
4784 binder_free_node(node);
4785
4786 return refs;
4787 }
4788
4789 node->proc = NULL;
4790 node->local_strong_refs = 0;
4791 node->local_weak_refs = 0;
4792 binder_inner_proc_unlock(proc);
4793
4794 spin_lock(&binder_dead_nodes_lock);
4795 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4796 spin_unlock(&binder_dead_nodes_lock);
4797
4798 hlist_for_each_entry(ref, &node->refs, node_entry) {
4799 refs++;
4800 /*
4801 * Need the node lock to synchronize
4802 * with new notification requests and the
4803 * inner lock to synchronize with queued
4804 * death notifications.
4805 */
4806 binder_inner_proc_lock(ref->proc);
4807 if (!ref->death) {
4808 binder_inner_proc_unlock(ref->proc);
4809 continue;
4810 }
4811
4812 death++;
4813
4814 BUG_ON(!list_empty(&ref->death->work.entry));
4815 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4816 binder_enqueue_work_ilocked(&ref->death->work,
4817 &ref->proc->todo);
4818 binder_wakeup_proc_ilocked(ref->proc);
4819 binder_inner_proc_unlock(ref->proc);
4820 }
4821
4822 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4823 "node %d now dead, refs %d, death %d\n",
4824 node->debug_id, refs, death);
4825 binder_node_unlock(node);
4826 binder_put_node(node);
4827
4828 return refs;
4829 }
4830
4831 static void binder_deferred_release(struct binder_proc *proc)
4832 {
4833 struct binder_context *context = proc->context;
4834 struct rb_node *n;
4835 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4836
4837 BUG_ON(proc->files);
4838
4839 mutex_lock(&binder_procs_lock);
4840 hlist_del(&proc->proc_node);
4841 mutex_unlock(&binder_procs_lock);
4842
4843 mutex_lock(&context->context_mgr_node_lock);
4844 if (context->binder_context_mgr_node &&
4845 context->binder_context_mgr_node->proc == proc) {
4846 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4847 "%s: %d context_mgr_node gone\n",
4848 __func__, proc->pid);
4849 context->binder_context_mgr_node = NULL;
4850 }
4851 mutex_unlock(&context->context_mgr_node_lock);
4852 binder_inner_proc_lock(proc);
4853 /*
4854 * Make sure proc stays alive after we
4855 * remove all the threads
4856 */
4857 proc->tmp_ref++;
4858
4859 proc->is_dead = true;
4860 threads = 0;
4861 active_transactions = 0;
4862 while ((n = rb_first(&proc->threads))) {
4863 struct binder_thread *thread;
4864
4865 thread = rb_entry(n, struct binder_thread, rb_node);
4866 binder_inner_proc_unlock(proc);
4867 threads++;
4868 active_transactions += binder_thread_release(proc, thread);
4869 binder_inner_proc_lock(proc);
4870 }
4871
4872 nodes = 0;
4873 incoming_refs = 0;
4874 while ((n = rb_first(&proc->nodes))) {
4875 struct binder_node *node;
4876
4877 node = rb_entry(n, struct binder_node, rb_node);
4878 nodes++;
4879 /*
4880 * take a temporary ref on the node before
4881 * calling binder_node_release() which will either
4882 * kfree() the node or call binder_put_node()
4883 */
4884 binder_inc_node_tmpref_ilocked(node);
4885 rb_erase(&node->rb_node, &proc->nodes);
4886 binder_inner_proc_unlock(proc);
4887 incoming_refs = binder_node_release(node, incoming_refs);
4888 binder_inner_proc_lock(proc);
4889 }
4890 binder_inner_proc_unlock(proc);
4891
4892 outgoing_refs = 0;
4893 binder_proc_lock(proc);
4894 while ((n = rb_first(&proc->refs_by_desc))) {
4895 struct binder_ref *ref;
4896
4897 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4898 outgoing_refs++;
4899 binder_cleanup_ref_olocked(ref);
4900 binder_proc_unlock(proc);
4901 binder_free_ref(ref);
4902 binder_proc_lock(proc);
4903 }
4904 binder_proc_unlock(proc);
4905
4906 binder_release_work(proc, &proc->todo);
4907 binder_release_work(proc, &proc->delivered_death);
4908
4909 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4910 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4911 __func__, proc->pid, threads, nodes, incoming_refs,
4912 outgoing_refs, active_transactions);
4913
4914 binder_proc_dec_tmpref(proc);
4915 }
4916
4917 static void binder_deferred_func(struct work_struct *work)
4918 {
4919 struct binder_proc *proc;
4920 struct files_struct *files;
4921
4922 int defer;
4923
4924 do {
4925 mutex_lock(&binder_deferred_lock);
4926 if (!hlist_empty(&binder_deferred_list)) {
4927 proc = hlist_entry(binder_deferred_list.first,
4928 struct binder_proc, deferred_work_node);
4929 hlist_del_init(&proc->deferred_work_node);
4930 defer = proc->deferred_work;
4931 proc->deferred_work = 0;
4932 } else {
4933 proc = NULL;
4934 defer = 0;
4935 }
4936 mutex_unlock(&binder_deferred_lock);
4937
4938 files = NULL;
4939 if (defer & BINDER_DEFERRED_PUT_FILES) {
4940 mutex_lock(&proc->files_lock);
4941 files = proc->files;
4942 if (files)
4943 proc->files = NULL;
4944 mutex_unlock(&proc->files_lock);
4945 }
4946
4947 if (defer & BINDER_DEFERRED_FLUSH)
4948 binder_deferred_flush(proc);
4949
4950 if (defer & BINDER_DEFERRED_RELEASE)
4951 binder_deferred_release(proc); /* frees proc */
4952
4953 if (files)
4954 put_files_struct(files);
4955 } while (proc);
4956 }
4957 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4958
4959 static void
4960 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4961 {
4962 mutex_lock(&binder_deferred_lock);
4963 proc->deferred_work |= defer;
4964 if (hlist_unhashed(&proc->deferred_work_node)) {
4965 hlist_add_head(&proc->deferred_work_node,
4966 &binder_deferred_list);
4967 schedule_work(&binder_deferred_work);
4968 }
4969 mutex_unlock(&binder_deferred_lock);
4970 }
4971
4972 static void print_binder_transaction_ilocked(struct seq_file *m,
4973 struct binder_proc *proc,
4974 const char *prefix,
4975 struct binder_transaction *t)
4976 {
4977 struct binder_proc *to_proc;
4978 struct binder_buffer *buffer = t->buffer;
4979
4980 spin_lock(&t->lock);
4981 to_proc = t->to_proc;
4982 seq_printf(m,
4983 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4984 prefix, t->debug_id, t,
4985 t->from ? t->from->proc->pid : 0,
4986 t->from ? t->from->pid : 0,
4987 to_proc ? to_proc->pid : 0,
4988 t->to_thread ? t->to_thread->pid : 0,
4989 t->code, t->flags, t->priority, t->need_reply);
4990 spin_unlock(&t->lock);
4991
4992 if (proc != to_proc) {
4993 /*
4994 * Can only safely deref buffer if we are holding the
4995 * correct proc inner lock for this node
4996 */
4997 seq_puts(m, "\n");
4998 return;
4999 }
5000
5001 if (buffer == NULL) {
5002 seq_puts(m, " buffer free\n");
5003 return;
5004 }
5005 if (buffer->target_node)
5006 seq_printf(m, " node %d", buffer->target_node->debug_id);
5007 seq_printf(m, " size %zd:%zd data %p\n",
5008 buffer->data_size, buffer->offsets_size,
5009 buffer->data);
5010 }
5011
5012 static void print_binder_work_ilocked(struct seq_file *m,
5013 struct binder_proc *proc,
5014 const char *prefix,
5015 const char *transaction_prefix,
5016 struct binder_work *w)
5017 {
5018 struct binder_node *node;
5019 struct binder_transaction *t;
5020
5021 switch (w->type) {
5022 case BINDER_WORK_TRANSACTION:
5023 t = container_of(w, struct binder_transaction, work);
5024 print_binder_transaction_ilocked(
5025 m, proc, transaction_prefix, t);
5026 break;
5027 case BINDER_WORK_RETURN_ERROR: {
5028 struct binder_error *e = container_of(
5029 w, struct binder_error, work);
5030
5031 seq_printf(m, "%stransaction error: %u\n",
5032 prefix, e->cmd);
5033 } break;
5034 case BINDER_WORK_TRANSACTION_COMPLETE:
5035 seq_printf(m, "%stransaction complete\n", prefix);
5036 break;
5037 case BINDER_WORK_NODE:
5038 node = container_of(w, struct binder_node, work);
5039 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5040 prefix, node->debug_id,
5041 (u64)node->ptr, (u64)node->cookie);
5042 break;
5043 case BINDER_WORK_DEAD_BINDER:
5044 seq_printf(m, "%shas dead binder\n", prefix);
5045 break;
5046 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5047 seq_printf(m, "%shas cleared dead binder\n", prefix);
5048 break;
5049 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5050 seq_printf(m, "%shas cleared death notification\n", prefix);
5051 break;
5052 default:
5053 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5054 break;
5055 }
5056 }
5057
5058 static void print_binder_thread_ilocked(struct seq_file *m,
5059 struct binder_thread *thread,
5060 int print_always)
5061 {
5062 struct binder_transaction *t;
5063 struct binder_work *w;
5064 size_t start_pos = m->count;
5065 size_t header_pos;
5066
5067 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5068 thread->pid, thread->looper,
5069 thread->looper_need_return,
5070 atomic_read(&thread->tmp_ref));
5071 header_pos = m->count;
5072 t = thread->transaction_stack;
5073 while (t) {
5074 if (t->from == thread) {
5075 print_binder_transaction_ilocked(m, thread->proc,
5076 " outgoing transaction", t);
5077 t = t->from_parent;
5078 } else if (t->to_thread == thread) {
5079 print_binder_transaction_ilocked(m, thread->proc,
5080 " incoming transaction", t);
5081 t = t->to_parent;
5082 } else {
5083 print_binder_transaction_ilocked(m, thread->proc,
5084 " bad transaction", t);
5085 t = NULL;
5086 }
5087 }
5088 list_for_each_entry(w, &thread->todo, entry) {
5089 print_binder_work_ilocked(m, thread->proc, " ",
5090 " pending transaction", w);
5091 }
5092 if (!print_always && m->count == header_pos)
5093 m->count = start_pos;
5094 }
5095
5096 static void print_binder_node_nilocked(struct seq_file *m,
5097 struct binder_node *node)
5098 {
5099 struct binder_ref *ref;
5100 struct binder_work *w;
5101 int count;
5102
5103 count = 0;
5104 hlist_for_each_entry(ref, &node->refs, node_entry)
5105 count++;
5106
5107 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5108 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5109 node->has_strong_ref, node->has_weak_ref,
5110 node->local_strong_refs, node->local_weak_refs,
5111 node->internal_strong_refs, count, node->tmp_refs);
5112 if (count) {
5113 seq_puts(m, " proc");
5114 hlist_for_each_entry(ref, &node->refs, node_entry)
5115 seq_printf(m, " %d", ref->proc->pid);
5116 }
5117 seq_puts(m, "\n");
5118 if (node->proc) {
5119 list_for_each_entry(w, &node->async_todo, entry)
5120 print_binder_work_ilocked(m, node->proc, " ",
5121 " pending async transaction", w);
5122 }
5123 }
5124
5125 static void print_binder_ref_olocked(struct seq_file *m,
5126 struct binder_ref *ref)
5127 {
5128 binder_node_lock(ref->node);
5129 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5130 ref->data.debug_id, ref->data.desc,
5131 ref->node->proc ? "" : "dead ",
5132 ref->node->debug_id, ref->data.strong,
5133 ref->data.weak, ref->death);
5134 binder_node_unlock(ref->node);
5135 }
5136
5137 static void print_binder_proc(struct seq_file *m,
5138 struct binder_proc *proc, int print_all)
5139 {
5140 struct binder_work *w;
5141 struct rb_node *n;
5142 size_t start_pos = m->count;
5143 size_t header_pos;
5144 struct binder_node *last_node = NULL;
5145
5146 seq_printf(m, "proc %d\n", proc->pid);
5147 seq_printf(m, "context %s\n", proc->context->name);
5148 header_pos = m->count;
5149
5150 binder_inner_proc_lock(proc);
5151 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5152 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5153 rb_node), print_all);
5154
5155 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5156 struct binder_node *node = rb_entry(n, struct binder_node,
5157 rb_node);
5158 /*
5159 * take a temporary reference on the node so it
5160 * survives and isn't removed from the tree
5161 * while we print it.
5162 */
5163 binder_inc_node_tmpref_ilocked(node);
5164 /* Need to drop inner lock to take node lock */
5165 binder_inner_proc_unlock(proc);
5166 if (last_node)
5167 binder_put_node(last_node);
5168 binder_node_inner_lock(node);
5169 print_binder_node_nilocked(m, node);
5170 binder_node_inner_unlock(node);
5171 last_node = node;
5172 binder_inner_proc_lock(proc);
5173 }
5174 binder_inner_proc_unlock(proc);
5175 if (last_node)
5176 binder_put_node(last_node);
5177
5178 if (print_all) {
5179 binder_proc_lock(proc);
5180 for (n = rb_first(&proc->refs_by_desc);
5181 n != NULL;
5182 n = rb_next(n))
5183 print_binder_ref_olocked(m, rb_entry(n,
5184 struct binder_ref,
5185 rb_node_desc));
5186 binder_proc_unlock(proc);
5187 }
5188 binder_alloc_print_allocated(m, &proc->alloc);
5189 binder_inner_proc_lock(proc);
5190 list_for_each_entry(w, &proc->todo, entry)
5191 print_binder_work_ilocked(m, proc, " ",
5192 " pending transaction", w);
5193 list_for_each_entry(w, &proc->delivered_death, entry) {
5194 seq_puts(m, " has delivered dead binder\n");
5195 break;
5196 }
5197 binder_inner_proc_unlock(proc);
5198 if (!print_all && m->count == header_pos)
5199 m->count = start_pos;
5200 }
5201
5202 static const char * const binder_return_strings[] = {
5203 "BR_ERROR",
5204 "BR_OK",
5205 "BR_TRANSACTION",
5206 "BR_REPLY",
5207 "BR_ACQUIRE_RESULT",
5208 "BR_DEAD_REPLY",
5209 "BR_TRANSACTION_COMPLETE",
5210 "BR_INCREFS",
5211 "BR_ACQUIRE",
5212 "BR_RELEASE",
5213 "BR_DECREFS",
5214 "BR_ATTEMPT_ACQUIRE",
5215 "BR_NOOP",
5216 "BR_SPAWN_LOOPER",
5217 "BR_FINISHED",
5218 "BR_DEAD_BINDER",
5219 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5220 "BR_FAILED_REPLY"
5221 };
5222
5223 static const char * const binder_command_strings[] = {
5224 "BC_TRANSACTION",
5225 "BC_REPLY",
5226 "BC_ACQUIRE_RESULT",
5227 "BC_FREE_BUFFER",
5228 "BC_INCREFS",
5229 "BC_ACQUIRE",
5230 "BC_RELEASE",
5231 "BC_DECREFS",
5232 "BC_INCREFS_DONE",
5233 "BC_ACQUIRE_DONE",
5234 "BC_ATTEMPT_ACQUIRE",
5235 "BC_REGISTER_LOOPER",
5236 "BC_ENTER_LOOPER",
5237 "BC_EXIT_LOOPER",
5238 "BC_REQUEST_DEATH_NOTIFICATION",
5239 "BC_CLEAR_DEATH_NOTIFICATION",
5240 "BC_DEAD_BINDER_DONE",
5241 "BC_TRANSACTION_SG",
5242 "BC_REPLY_SG",
5243 };
5244
5245 static const char * const binder_objstat_strings[] = {
5246 "proc",
5247 "thread",
5248 "node",
5249 "ref",
5250 "death",
5251 "transaction",
5252 "transaction_complete"
5253 };
5254
5255 static void print_binder_stats(struct seq_file *m, const char *prefix,
5256 struct binder_stats *stats)
5257 {
5258 int i;
5259
5260 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5261 ARRAY_SIZE(binder_command_strings));
5262 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5263 int temp = atomic_read(&stats->bc[i]);
5264
5265 if (temp)
5266 seq_printf(m, "%s%s: %d\n", prefix,
5267 binder_command_strings[i], temp);
5268 }
5269
5270 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5271 ARRAY_SIZE(binder_return_strings));
5272 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5273 int temp = atomic_read(&stats->br[i]);
5274
5275 if (temp)
5276 seq_printf(m, "%s%s: %d\n", prefix,
5277 binder_return_strings[i], temp);
5278 }
5279
5280 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5281 ARRAY_SIZE(binder_objstat_strings));
5282 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5283 ARRAY_SIZE(stats->obj_deleted));
5284 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5285 int created = atomic_read(&stats->obj_created[i]);
5286 int deleted = atomic_read(&stats->obj_deleted[i]);
5287
5288 if (created || deleted)
5289 seq_printf(m, "%s%s: active %d total %d\n",
5290 prefix,
5291 binder_objstat_strings[i],
5292 created - deleted,
5293 created);
5294 }
5295 }
5296
5297 static void print_binder_proc_stats(struct seq_file *m,
5298 struct binder_proc *proc)
5299 {
5300 struct binder_work *w;
5301 struct binder_thread *thread;
5302 struct rb_node *n;
5303 int count, strong, weak, ready_threads;
5304 size_t free_async_space =
5305 binder_alloc_get_free_async_space(&proc->alloc);
5306
5307 seq_printf(m, "proc %d\n", proc->pid);
5308 seq_printf(m, "context %s\n", proc->context->name);
5309 count = 0;
5310 ready_threads = 0;
5311 binder_inner_proc_lock(proc);
5312 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5313 count++;
5314
5315 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5316 ready_threads++;
5317
5318 seq_printf(m, " threads: %d\n", count);
5319 seq_printf(m, " requested threads: %d+%d/%d\n"
5320 " ready threads %d\n"
5321 " free async space %zd\n", proc->requested_threads,
5322 proc->requested_threads_started, proc->max_threads,
5323 ready_threads,
5324 free_async_space);
5325 count = 0;
5326 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5327 count++;
5328 binder_inner_proc_unlock(proc);
5329 seq_printf(m, " nodes: %d\n", count);
5330 count = 0;
5331 strong = 0;
5332 weak = 0;
5333 binder_proc_lock(proc);
5334 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5335 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5336 rb_node_desc);
5337 count++;
5338 strong += ref->data.strong;
5339 weak += ref->data.weak;
5340 }
5341 binder_proc_unlock(proc);
5342 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5343
5344 count = binder_alloc_get_allocated_count(&proc->alloc);
5345 seq_printf(m, " buffers: %d\n", count);
5346
5347 binder_alloc_print_pages(m, &proc->alloc);
5348
5349 count = 0;
5350 binder_inner_proc_lock(proc);
5351 list_for_each_entry(w, &proc->todo, entry) {
5352 if (w->type == BINDER_WORK_TRANSACTION)
5353 count++;
5354 }
5355 binder_inner_proc_unlock(proc);
5356 seq_printf(m, " pending transactions: %d\n", count);
5357
5358 print_binder_stats(m, " ", &proc->stats);
5359 }
5360
5361
5362 static int binder_state_show(struct seq_file *m, void *unused)
5363 {
5364 struct binder_proc *proc;
5365 struct binder_node *node;
5366 struct binder_node *last_node = NULL;
5367
5368 seq_puts(m, "binder state:\n");
5369
5370 spin_lock(&binder_dead_nodes_lock);
5371 if (!hlist_empty(&binder_dead_nodes))
5372 seq_puts(m, "dead nodes:\n");
5373 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5374 /*
5375 * take a temporary reference on the node so it
5376 * survives and isn't removed from the list
5377 * while we print it.
5378 */
5379 node->tmp_refs++;
5380 spin_unlock(&binder_dead_nodes_lock);
5381 if (last_node)
5382 binder_put_node(last_node);
5383 binder_node_lock(node);
5384 print_binder_node_nilocked(m, node);
5385 binder_node_unlock(node);
5386 last_node = node;
5387 spin_lock(&binder_dead_nodes_lock);
5388 }
5389 spin_unlock(&binder_dead_nodes_lock);
5390 if (last_node)
5391 binder_put_node(last_node);
5392
5393 mutex_lock(&binder_procs_lock);
5394 hlist_for_each_entry(proc, &binder_procs, proc_node)
5395 print_binder_proc(m, proc, 1);
5396 mutex_unlock(&binder_procs_lock);
5397
5398 return 0;
5399 }
5400
5401 static int binder_stats_show(struct seq_file *m, void *unused)
5402 {
5403 struct binder_proc *proc;
5404
5405 seq_puts(m, "binder stats:\n");
5406
5407 print_binder_stats(m, "", &binder_stats);
5408
5409 mutex_lock(&binder_procs_lock);
5410 hlist_for_each_entry(proc, &binder_procs, proc_node)
5411 print_binder_proc_stats(m, proc);
5412 mutex_unlock(&binder_procs_lock);
5413
5414 return 0;
5415 }
5416
5417 static int binder_transactions_show(struct seq_file *m, void *unused)
5418 {
5419 struct binder_proc *proc;
5420
5421 seq_puts(m, "binder transactions:\n");
5422 mutex_lock(&binder_procs_lock);
5423 hlist_for_each_entry(proc, &binder_procs, proc_node)
5424 print_binder_proc(m, proc, 0);
5425 mutex_unlock(&binder_procs_lock);
5426
5427 return 0;
5428 }
5429
5430 static int binder_proc_show(struct seq_file *m, void *unused)
5431 {
5432 struct binder_proc *itr;
5433 int pid = (unsigned long)m->private;
5434
5435 mutex_lock(&binder_procs_lock);
5436 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5437 if (itr->pid == pid) {
5438 seq_puts(m, "binder proc state:\n");
5439 print_binder_proc(m, itr, 1);
5440 }
5441 }
5442 mutex_unlock(&binder_procs_lock);
5443
5444 return 0;
5445 }
5446
5447 static void print_binder_transaction_log_entry(struct seq_file *m,
5448 struct binder_transaction_log_entry *e)
5449 {
5450 int debug_id = READ_ONCE(e->debug_id_done);
5451 /*
5452 * read barrier to guarantee debug_id_done read before
5453 * we print the log values
5454 */
5455 smp_rmb();
5456 seq_printf(m,
5457 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5458 e->debug_id, (e->call_type == 2) ? "reply" :
5459 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5460 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5461 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5462 e->return_error, e->return_error_param,
5463 e->return_error_line);
5464 /*
5465 * read-barrier to guarantee read of debug_id_done after
5466 * done printing the fields of the entry
5467 */
5468 smp_rmb();
5469 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5470 "\n" : " (incomplete)\n");
5471 }
5472
5473 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5474 {
5475 struct binder_transaction_log *log = m->private;
5476 unsigned int log_cur = atomic_read(&log->cur);
5477 unsigned int count;
5478 unsigned int cur;
5479 int i;
5480
5481 count = log_cur + 1;
5482 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5483 0 : count % ARRAY_SIZE(log->entry);
5484 if (count > ARRAY_SIZE(log->entry) || log->full)
5485 count = ARRAY_SIZE(log->entry);
5486 for (i = 0; i < count; i++) {
5487 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5488
5489 print_binder_transaction_log_entry(m, &log->entry[index]);
5490 }
5491 return 0;
5492 }
5493
5494 static const struct file_operations binder_fops = {
5495 .owner = THIS_MODULE,
5496 .poll = binder_poll,
5497 .unlocked_ioctl = binder_ioctl,
5498 .compat_ioctl = binder_ioctl,
5499 .mmap = binder_mmap,
5500 .open = binder_open,
5501 .flush = binder_flush,
5502 .release = binder_release,
5503 };
5504
5505 BINDER_DEBUG_ENTRY(state);
5506 BINDER_DEBUG_ENTRY(stats);
5507 BINDER_DEBUG_ENTRY(transactions);
5508 BINDER_DEBUG_ENTRY(transaction_log);
5509
5510 static int __init init_binder_device(const char *name)
5511 {
5512 int ret;
5513 struct binder_device *binder_device;
5514
5515 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5516 if (!binder_device)
5517 return -ENOMEM;
5518
5519 binder_device->miscdev.fops = &binder_fops;
5520 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5521 binder_device->miscdev.name = name;
5522
5523 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5524 binder_device->context.name = name;
5525 mutex_init(&binder_device->context.context_mgr_node_lock);
5526
5527 ret = misc_register(&binder_device->miscdev);
5528 if (ret < 0) {
5529 kfree(binder_device);
5530 return ret;
5531 }
5532
5533 hlist_add_head(&binder_device->hlist, &binder_devices);
5534
5535 return ret;
5536 }
5537
5538 static int __init binder_init(void)
5539 {
5540 int ret;
5541 char *device_name, *device_names, *device_tmp;
5542 struct binder_device *device;
5543 struct hlist_node *tmp;
5544
5545 binder_alloc_shrinker_init();
5546
5547 atomic_set(&binder_transaction_log.cur, ~0U);
5548 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5549
5550 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5551 if (binder_debugfs_dir_entry_root)
5552 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5553 binder_debugfs_dir_entry_root);
5554
5555 if (binder_debugfs_dir_entry_root) {
5556 debugfs_create_file("state",
5557 S_IRUGO,
5558 binder_debugfs_dir_entry_root,
5559 NULL,
5560 &binder_state_fops);
5561 debugfs_create_file("stats",
5562 S_IRUGO,
5563 binder_debugfs_dir_entry_root,
5564 NULL,
5565 &binder_stats_fops);
5566 debugfs_create_file("transactions",
5567 S_IRUGO,
5568 binder_debugfs_dir_entry_root,
5569 NULL,
5570 &binder_transactions_fops);
5571 debugfs_create_file("transaction_log",
5572 S_IRUGO,
5573 binder_debugfs_dir_entry_root,
5574 &binder_transaction_log,
5575 &binder_transaction_log_fops);
5576 debugfs_create_file("failed_transaction_log",
5577 S_IRUGO,
5578 binder_debugfs_dir_entry_root,
5579 &binder_transaction_log_failed,
5580 &binder_transaction_log_fops);
5581 }
5582
5583 /*
5584 * Copy the module_parameter string, because we don't want to
5585 * tokenize it in-place.
5586 */
5587 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5588 if (!device_names) {
5589 ret = -ENOMEM;
5590 goto err_alloc_device_names_failed;
5591 }
5592 strcpy(device_names, binder_devices_param);
5593
5594 device_tmp = device_names;
5595 while ((device_name = strsep(&device_tmp, ","))) {
5596 ret = init_binder_device(device_name);
5597 if (ret)
5598 goto err_init_binder_device_failed;
5599 }
5600
5601 return ret;
5602
5603 err_init_binder_device_failed:
5604 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5605 misc_deregister(&device->miscdev);
5606 hlist_del(&device->hlist);
5607 kfree(device);
5608 }
5609
5610 kfree(device_names);
5611
5612 err_alloc_device_names_failed:
5613 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5614
5615 return ret;
5616 }
5617
5618 device_initcall(binder_init);
5619
5620 #define CREATE_TRACE_POINTS
5621 #include "binder_trace.h"
5622
5623 MODULE_LICENSE("GPL v2");