[common] binder: fix conflict (4.14.47 -> 4.14.62)
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / drivers / android / binder.c
1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
74
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
77 #endif
78
79 #include <uapi/linux/android/binder.h>
80 #include <uapi/linux/sched/types.h>
81 #include "binder_alloc.h"
82 #include "binder_trace.h"
83
84 static HLIST_HEAD(binder_deferred_list);
85 static DEFINE_MUTEX(binder_deferred_lock);
86
87 static HLIST_HEAD(binder_devices);
88 static HLIST_HEAD(binder_procs);
89 static DEFINE_MUTEX(binder_procs_lock);
90
91 static HLIST_HEAD(binder_dead_nodes);
92 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
93
94 static struct dentry *binder_debugfs_dir_entry_root;
95 static struct dentry *binder_debugfs_dir_entry_proc;
96 static atomic_t binder_last_id;
97
98 #define BINDER_DEBUG_ENTRY(name) \
99 static int binder_##name##_open(struct inode *inode, struct file *file) \
100 { \
101 return single_open(file, binder_##name##_show, inode->i_private); \
102 } \
103 \
104 static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110 }
111
112 static int binder_proc_show(struct seq_file *m, void *unused);
113 BINDER_DEBUG_ENTRY(proc);
114
115 /* This is only defined in include/asm-arm/sizes.h */
116 #ifndef SZ_1K
117 #define SZ_1K 0x400
118 #endif
119
120 #ifndef SZ_4M
121 #define SZ_4M 0x400000
122 #endif
123
124 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126 enum {
127 BINDER_DEBUG_USER_ERROR = 1U << 0,
128 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
129 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
130 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
131 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
132 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
133 BINDER_DEBUG_READ_WRITE = 1U << 6,
134 BINDER_DEBUG_USER_REFS = 1U << 7,
135 BINDER_DEBUG_THREADS = 1U << 8,
136 BINDER_DEBUG_TRANSACTION = 1U << 9,
137 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
138 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
139 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
140 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
141 BINDER_DEBUG_SPINLOCKS = 1U << 14,
142 };
143 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
144 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
145 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
146
147 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
148 module_param_named(devices, binder_devices_param, charp, 0444);
149
150 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
151 static int binder_stop_on_user_error;
152
153 static int binder_set_stop_on_user_error(const char *val,
154 const struct kernel_param *kp)
155 {
156 int ret;
157
158 ret = param_set_int(val, kp);
159 if (binder_stop_on_user_error < 2)
160 wake_up(&binder_user_error_wait);
161 return ret;
162 }
163 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
164 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
165
166 #define binder_debug(mask, x...) \
167 do { \
168 if (binder_debug_mask & mask) \
169 pr_info(x); \
170 } while (0)
171
172 #define binder_user_error(x...) \
173 do { \
174 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
175 pr_info(x); \
176 if (binder_stop_on_user_error) \
177 binder_stop_on_user_error = 2; \
178 } while (0)
179
180 #define to_flat_binder_object(hdr) \
181 container_of(hdr, struct flat_binder_object, hdr)
182
183 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184
185 #define to_binder_buffer_object(hdr) \
186 container_of(hdr, struct binder_buffer_object, hdr)
187
188 #define to_binder_fd_array_object(hdr) \
189 container_of(hdr, struct binder_fd_array_object, hdr)
190
191 enum binder_stat_types {
192 BINDER_STAT_PROC,
193 BINDER_STAT_THREAD,
194 BINDER_STAT_NODE,
195 BINDER_STAT_REF,
196 BINDER_STAT_DEATH,
197 BINDER_STAT_TRANSACTION,
198 BINDER_STAT_TRANSACTION_COMPLETE,
199 BINDER_STAT_COUNT
200 };
201
202 struct binder_stats {
203 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
204 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
205 atomic_t obj_created[BINDER_STAT_COUNT];
206 atomic_t obj_deleted[BINDER_STAT_COUNT];
207 };
208
209 static struct binder_stats binder_stats;
210
211 static inline void binder_stats_deleted(enum binder_stat_types type)
212 {
213 atomic_inc(&binder_stats.obj_deleted[type]);
214 }
215
216 static inline void binder_stats_created(enum binder_stat_types type)
217 {
218 atomic_inc(&binder_stats.obj_created[type]);
219 }
220
221 struct binder_transaction_log_entry {
222 int debug_id;
223 int debug_id_done;
224 int call_type;
225 int from_proc;
226 int from_thread;
227 int target_handle;
228 int to_proc;
229 int to_thread;
230 int to_node;
231 int data_size;
232 int offsets_size;
233 int return_error_line;
234 uint32_t return_error;
235 uint32_t return_error_param;
236 const char *context_name;
237 };
238 struct binder_transaction_log {
239 atomic_t cur;
240 bool full;
241 struct binder_transaction_log_entry entry[32];
242 };
243 static struct binder_transaction_log binder_transaction_log;
244 static struct binder_transaction_log binder_transaction_log_failed;
245
246 static struct binder_transaction_log_entry *binder_transaction_log_add(
247 struct binder_transaction_log *log)
248 {
249 struct binder_transaction_log_entry *e;
250 unsigned int cur = atomic_inc_return(&log->cur);
251
252 if (cur >= ARRAY_SIZE(log->entry))
253 log->full = 1;
254 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
255 WRITE_ONCE(e->debug_id_done, 0);
256 /*
257 * write-barrier to synchronize access to e->debug_id_done.
258 * We make sure the initialized 0 value is seen before
259 * memset() other fields are zeroed by memset.
260 */
261 smp_wmb();
262 memset(e, 0, sizeof(*e));
263 return e;
264 }
265
266 struct binder_context {
267 struct binder_node *binder_context_mgr_node;
268 struct mutex context_mgr_node_lock;
269
270 kuid_t binder_context_mgr_uid;
271 const char *name;
272 };
273
274 struct binder_device {
275 struct hlist_node hlist;
276 struct miscdevice miscdev;
277 struct binder_context context;
278 };
279
280 /**
281 * struct binder_work - work enqueued on a worklist
282 * @entry: node enqueued on list
283 * @type: type of work to be performed
284 *
285 * There are separate work lists for proc, thread, and node (async).
286 */
287 struct binder_work {
288 struct list_head entry;
289
290 enum {
291 BINDER_WORK_TRANSACTION = 1,
292 BINDER_WORK_TRANSACTION_COMPLETE,
293 BINDER_WORK_RETURN_ERROR,
294 BINDER_WORK_NODE,
295 BINDER_WORK_DEAD_BINDER,
296 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
297 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
298 } type;
299 };
300
301 struct binder_error {
302 struct binder_work work;
303 uint32_t cmd;
304 };
305
306 /**
307 * struct binder_node - binder node bookkeeping
308 * @debug_id: unique ID for debugging
309 * (invariant after initialized)
310 * @lock: lock for node fields
311 * @work: worklist element for node work
312 * (protected by @proc->inner_lock)
313 * @rb_node: element for proc->nodes tree
314 * (protected by @proc->inner_lock)
315 * @dead_node: element for binder_dead_nodes list
316 * (protected by binder_dead_nodes_lock)
317 * @proc: binder_proc that owns this node
318 * (invariant after initialized)
319 * @refs: list of references on this node
320 * (protected by @lock)
321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
325 * @local_weak_refs: weak user refs from local process
326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
328 * @local_strong_refs: strong user refs from local process
329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
331 * @tmp_refs: temporary kernel refs
332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
344 * @pending_strong_ref: userspace has acked notification of strong ref
345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
347 * @has_weak_ref: userspace notified of weak ref
348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
350 * @pending_weak_ref: userspace has acked notification of weak ref
351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
353 * @has_async_transaction: async transaction to node in progress
354 * (protected by @lock)
355 * @sched_policy: minimum scheduling policy for node
356 * (invariant after initialized)
357 * @accept_fds: file descriptor operations supported for node
358 * (invariant after initialized)
359 * @min_priority: minimum scheduling priority
360 * (invariant after initialized)
361 * @inherit_rt: inherit RT scheduling policy from caller
362 * (invariant after initialized)
363 * @async_todo: list of async work items
364 * (protected by @proc->inner_lock)
365 *
366 * Bookkeeping structure for binder nodes.
367 */
368 struct binder_node {
369 int debug_id;
370 spinlock_t lock;
371 struct binder_work work;
372 union {
373 struct rb_node rb_node;
374 struct hlist_node dead_node;
375 };
376 struct binder_proc *proc;
377 struct hlist_head refs;
378 int internal_strong_refs;
379 int local_weak_refs;
380 int local_strong_refs;
381 int tmp_refs;
382 binder_uintptr_t ptr;
383 binder_uintptr_t cookie;
384 struct {
385 /*
386 * bitfield elements protected by
387 * proc inner_lock
388 */
389 u8 has_strong_ref:1;
390 u8 pending_strong_ref:1;
391 u8 has_weak_ref:1;
392 u8 pending_weak_ref:1;
393 };
394 struct {
395 /*
396 * invariant after initialization
397 */
398 u8 sched_policy:2;
399 u8 inherit_rt:1;
400 u8 accept_fds:1;
401 u8 min_priority;
402 };
403 bool has_async_transaction;
404 struct list_head async_todo;
405 };
406
407 struct binder_ref_death {
408 /**
409 * @work: worklist element for death notifications
410 * (protected by inner_lock of the proc that
411 * this ref belongs to)
412 */
413 struct binder_work work;
414 binder_uintptr_t cookie;
415 };
416
417 /**
418 * struct binder_ref_data - binder_ref counts and id
419 * @debug_id: unique ID for the ref
420 * @desc: unique userspace handle for ref
421 * @strong: strong ref count (debugging only if not locked)
422 * @weak: weak ref count (debugging only if not locked)
423 *
424 * Structure to hold ref count and ref id information. Since
425 * the actual ref can only be accessed with a lock, this structure
426 * is used to return information about the ref to callers of
427 * ref inc/dec functions.
428 */
429 struct binder_ref_data {
430 int debug_id;
431 uint32_t desc;
432 int strong;
433 int weak;
434 };
435
436 /**
437 * struct binder_ref - struct to track references on nodes
438 * @data: binder_ref_data containing id, handle, and current refcounts
439 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
440 * @rb_node_node: node for lookup by @node in proc's rb_tree
441 * @node_entry: list entry for node->refs list in target node
442 * (protected by @node->lock)
443 * @proc: binder_proc containing ref
444 * @node: binder_node of target node. When cleaning up a
445 * ref for deletion in binder_cleanup_ref, a non-NULL
446 * @node indicates the node must be freed
447 * @death: pointer to death notification (ref_death) if requested
448 * (protected by @node->lock)
449 *
450 * Structure to track references from procA to target node (on procB). This
451 * structure is unsafe to access without holding @proc->outer_lock.
452 */
453 struct binder_ref {
454 /* Lookups needed: */
455 /* node + proc => ref (transaction) */
456 /* desc + proc => ref (transaction, inc/dec ref) */
457 /* node => refs + procs (proc exit) */
458 struct binder_ref_data data;
459 struct rb_node rb_node_desc;
460 struct rb_node rb_node_node;
461 struct hlist_node node_entry;
462 struct binder_proc *proc;
463 struct binder_node *node;
464 struct binder_ref_death *death;
465 };
466
467 enum binder_deferred_state {
468 BINDER_DEFERRED_PUT_FILES = 0x01,
469 BINDER_DEFERRED_FLUSH = 0x02,
470 BINDER_DEFERRED_RELEASE = 0x04,
471 };
472
473 /**
474 * struct binder_priority - scheduler policy and priority
475 * @sched_policy scheduler policy
476 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
477 *
478 * The binder driver supports inheriting the following scheduler policies:
479 * SCHED_NORMAL
480 * SCHED_BATCH
481 * SCHED_FIFO
482 * SCHED_RR
483 */
484 struct binder_priority {
485 unsigned int sched_policy;
486 int prio;
487 };
488
489 /**
490 * struct binder_proc - binder process bookkeeping
491 * @proc_node: element for binder_procs list
492 * @threads: rbtree of binder_threads in this proc
493 * (protected by @inner_lock)
494 * @nodes: rbtree of binder nodes associated with
495 * this proc ordered by node->ptr
496 * (protected by @inner_lock)
497 * @refs_by_desc: rbtree of refs ordered by ref->desc
498 * (protected by @outer_lock)
499 * @refs_by_node: rbtree of refs ordered by ref->node
500 * (protected by @outer_lock)
501 * @waiting_threads: threads currently waiting for proc work
502 * (protected by @inner_lock)
503 * @pid PID of group_leader of process
504 * (invariant after initialized)
505 * @tsk task_struct for group_leader of process
506 * (invariant after initialized)
507 * @files files_struct for process
508 * (protected by @files_lock)
509 * @files_lock mutex to protect @files
510 * @deferred_work_node: element for binder_deferred_list
511 * (protected by binder_deferred_lock)
512 * @deferred_work: bitmap of deferred work to perform
513 * (protected by binder_deferred_lock)
514 * @is_dead: process is dead and awaiting free
515 * when outstanding transactions are cleaned up
516 * (protected by @inner_lock)
517 * @todo: list of work for this process
518 * (protected by @inner_lock)
519 * @stats: per-process binder statistics
520 * (atomics, no lock needed)
521 * @delivered_death: list of delivered death notification
522 * (protected by @inner_lock)
523 * @max_threads: cap on number of binder threads
524 * (protected by @inner_lock)
525 * @requested_threads: number of binder threads requested but not
526 * yet started. In current implementation, can
527 * only be 0 or 1.
528 * (protected by @inner_lock)
529 * @requested_threads_started: number binder threads started
530 * (protected by @inner_lock)
531 * @tmp_ref: temporary reference to indicate proc is in use
532 * (protected by @inner_lock)
533 * @default_priority: default scheduler priority
534 * (invariant after initialized)
535 * @debugfs_entry: debugfs node
536 * @alloc: binder allocator bookkeeping
537 * @context: binder_context for this proc
538 * (invariant after initialized)
539 * @inner_lock: can nest under outer_lock and/or node lock
540 * @outer_lock: no nesting under innor or node lock
541 * Lock order: 1) outer, 2) node, 3) inner
542 *
543 * Bookkeeping structure for binder processes
544 */
545 struct binder_proc {
546 struct hlist_node proc_node;
547 struct rb_root threads;
548 struct rb_root nodes;
549 struct rb_root refs_by_desc;
550 struct rb_root refs_by_node;
551 struct list_head waiting_threads;
552 int pid;
553 struct task_struct *tsk;
554 struct files_struct *files;
555 struct mutex files_lock;
556 struct hlist_node deferred_work_node;
557 int deferred_work;
558 bool is_dead;
559
560 struct list_head todo;
561 struct binder_stats stats;
562 struct list_head delivered_death;
563 int max_threads;
564 int requested_threads;
565 int requested_threads_started;
566 int tmp_ref;
567 struct binder_priority default_priority;
568 struct dentry *debugfs_entry;
569 struct binder_alloc alloc;
570 struct binder_context *context;
571 spinlock_t inner_lock;
572 spinlock_t outer_lock;
573 };
574
575 enum {
576 BINDER_LOOPER_STATE_REGISTERED = 0x01,
577 BINDER_LOOPER_STATE_ENTERED = 0x02,
578 BINDER_LOOPER_STATE_EXITED = 0x04,
579 BINDER_LOOPER_STATE_INVALID = 0x08,
580 BINDER_LOOPER_STATE_WAITING = 0x10,
581 BINDER_LOOPER_STATE_POLL = 0x20,
582 };
583
584 /**
585 * struct binder_thread - binder thread bookkeeping
586 * @proc: binder process for this thread
587 * (invariant after initialization)
588 * @rb_node: element for proc->threads rbtree
589 * (protected by @proc->inner_lock)
590 * @waiting_thread_node: element for @proc->waiting_threads list
591 * (protected by @proc->inner_lock)
592 * @pid: PID for this thread
593 * (invariant after initialization)
594 * @looper: bitmap of looping state
595 * (only accessed by this thread)
596 * @looper_needs_return: looping thread needs to exit driver
597 * (no lock needed)
598 * @transaction_stack: stack of in-progress transactions for this thread
599 * (protected by @proc->inner_lock)
600 * @todo: list of work to do for this thread
601 * (protected by @proc->inner_lock)
602 * @process_todo: whether work in @todo should be processed
603 * (protected by @proc->inner_lock)
604 * @return_error: transaction errors reported by this thread
605 * (only accessed by this thread)
606 * @reply_error: transaction errors reported by target thread
607 * (protected by @proc->inner_lock)
608 * @wait: wait queue for thread work
609 * @stats: per-thread statistics
610 * (atomics, no lock needed)
611 * @tmp_ref: temporary reference to indicate thread is in use
612 * (atomic since @proc->inner_lock cannot
613 * always be acquired)
614 * @is_dead: thread is dead and awaiting free
615 * when outstanding transactions are cleaned up
616 * (protected by @proc->inner_lock)
617 * @task: struct task_struct for this thread
618 *
619 * Bookkeeping structure for binder threads.
620 */
621 struct binder_thread {
622 struct binder_proc *proc;
623 struct rb_node rb_node;
624 struct list_head waiting_thread_node;
625 int pid;
626 int looper; /* only modified by this thread */
627 bool looper_need_return; /* can be written by other thread */
628 struct binder_transaction *transaction_stack;
629 struct list_head todo;
630 bool process_todo;
631 struct binder_error return_error;
632 struct binder_error reply_error;
633 wait_queue_head_t wait;
634 struct binder_stats stats;
635 atomic_t tmp_ref;
636 bool is_dead;
637 struct task_struct *task;
638 };
639
640 struct binder_transaction {
641 int debug_id;
642 struct binder_work work;
643 struct binder_thread *from;
644 struct binder_transaction *from_parent;
645 struct binder_proc *to_proc;
646 struct binder_thread *to_thread;
647 struct binder_transaction *to_parent;
648 unsigned need_reply:1;
649 /* unsigned is_dead:1; */ /* not used at the moment */
650
651 struct binder_buffer *buffer;
652 unsigned int code;
653 unsigned int flags;
654 struct binder_priority priority;
655 struct binder_priority saved_priority;
656 bool set_priority_called;
657 kuid_t sender_euid;
658 /**
659 * @lock: protects @from, @to_proc, and @to_thread
660 *
661 * @from, @to_proc, and @to_thread can be set to NULL
662 * during thread teardown
663 */
664 spinlock_t lock;
665 };
666
667 /**
668 * binder_proc_lock() - Acquire outer lock for given binder_proc
669 * @proc: struct binder_proc to acquire
670 *
671 * Acquires proc->outer_lock. Used to protect binder_ref
672 * structures associated with the given proc.
673 */
674 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
675 static void
676 _binder_proc_lock(struct binder_proc *proc, int line)
677 {
678 binder_debug(BINDER_DEBUG_SPINLOCKS,
679 "%s: line=%d\n", __func__, line);
680 spin_lock(&proc->outer_lock);
681 }
682
683 /**
684 * binder_proc_unlock() - Release spinlock for given binder_proc
685 * @proc: struct binder_proc to acquire
686 *
687 * Release lock acquired via binder_proc_lock()
688 */
689 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
690 static void
691 _binder_proc_unlock(struct binder_proc *proc, int line)
692 {
693 binder_debug(BINDER_DEBUG_SPINLOCKS,
694 "%s: line=%d\n", __func__, line);
695 spin_unlock(&proc->outer_lock);
696 }
697
698 /**
699 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
700 * @proc: struct binder_proc to acquire
701 *
702 * Acquires proc->inner_lock. Used to protect todo lists
703 */
704 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
705 static void
706 _binder_inner_proc_lock(struct binder_proc *proc, int line)
707 {
708 binder_debug(BINDER_DEBUG_SPINLOCKS,
709 "%s: line=%d\n", __func__, line);
710 spin_lock(&proc->inner_lock);
711 }
712
713 /**
714 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
715 * @proc: struct binder_proc to acquire
716 *
717 * Release lock acquired via binder_inner_proc_lock()
718 */
719 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
720 static void
721 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
722 {
723 binder_debug(BINDER_DEBUG_SPINLOCKS,
724 "%s: line=%d\n", __func__, line);
725 spin_unlock(&proc->inner_lock);
726 }
727
728 /**
729 * binder_node_lock() - Acquire spinlock for given binder_node
730 * @node: struct binder_node to acquire
731 *
732 * Acquires node->lock. Used to protect binder_node fields
733 */
734 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
735 static void
736 _binder_node_lock(struct binder_node *node, int line)
737 {
738 binder_debug(BINDER_DEBUG_SPINLOCKS,
739 "%s: line=%d\n", __func__, line);
740 spin_lock(&node->lock);
741 }
742
743 /**
744 * binder_node_unlock() - Release spinlock for given binder_proc
745 * @node: struct binder_node to acquire
746 *
747 * Release lock acquired via binder_node_lock()
748 */
749 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
750 static void
751 _binder_node_unlock(struct binder_node *node, int line)
752 {
753 binder_debug(BINDER_DEBUG_SPINLOCKS,
754 "%s: line=%d\n", __func__, line);
755 spin_unlock(&node->lock);
756 }
757
758 /**
759 * binder_node_inner_lock() - Acquire node and inner locks
760 * @node: struct binder_node to acquire
761 *
762 * Acquires node->lock. If node->proc also acquires
763 * proc->inner_lock. Used to protect binder_node fields
764 */
765 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
766 static void
767 _binder_node_inner_lock(struct binder_node *node, int line)
768 {
769 binder_debug(BINDER_DEBUG_SPINLOCKS,
770 "%s: line=%d\n", __func__, line);
771 spin_lock(&node->lock);
772 if (node->proc)
773 binder_inner_proc_lock(node->proc);
774 }
775
776 /**
777 * binder_node_unlock() - Release node and inner locks
778 * @node: struct binder_node to acquire
779 *
780 * Release lock acquired via binder_node_lock()
781 */
782 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
783 static void
784 _binder_node_inner_unlock(struct binder_node *node, int line)
785 {
786 struct binder_proc *proc = node->proc;
787
788 binder_debug(BINDER_DEBUG_SPINLOCKS,
789 "%s: line=%d\n", __func__, line);
790 if (proc)
791 binder_inner_proc_unlock(proc);
792 spin_unlock(&node->lock);
793 }
794
795 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
796 /*
797 * Binder Debug Snapshot
798 */
799 static void init_binder_transaction_base(int type, struct trace_binder_transaction_base *base,
800 struct binder_transaction *t, struct binder_thread *from,
801 struct binder_thread *to)
802 {
803 struct binder_thread *t_from;
804 struct binder_thread *t_to;
805
806 if (base == NULL)
807 return;
808
809 t_from = t->from ? t->from : (from ? from : NULL);
810 t_to = t->to_thread ? t->to_thread : (to ? to : NULL);
811 base->trace_type = type;
812 base->transaction_id = t->debug_id;
813 base->from_pid = t_from ? t_from->proc->pid : 0;
814 base->from_tid = t_from ? t_from->pid : 0;
815 base->to_pid = t->to_proc ? t->to_proc->pid : 0;
816 base->to_tid = t_to ? t_to->pid : 0;
817 if (t_from) {
818 strncpy(base->from_pid_comm, t_from->proc->tsk->comm, TASK_COMM_LEN);
819 strncpy(base->from_tid_comm, t_from->task->comm, TASK_COMM_LEN);
820 } else {
821 base->from_pid_comm[0] = '\0';
822 base->from_tid_comm[0] = '\0';
823 }
824 if (t->to_proc)
825 strncpy(base->to_pid_comm, t->to_proc->tsk->comm, TASK_COMM_LEN);
826 else
827 base->to_pid_comm[0] = '\0';
828 if (t_to)
829 strncpy(base->to_tid_comm, t_to->task->comm, TASK_COMM_LEN);
830 else
831 base->to_tid_comm[0] = '\0';
832 }
833
834 static void dss_binder_transaction(int reply, struct binder_transaction *t, struct binder_thread *from, int to_node_id)
835 {
836 struct trace_binder_transaction_base base;
837 struct trace_binder_transaction transaction;
838
839 init_binder_transaction_base(TRANSACTION, &base, t, from, NULL);
840 transaction.to_node_id = to_node_id;
841 transaction.reply = reply;
842 transaction.flags = t->flags;
843 transaction.code = t->code;
844
845 dbg_snapshot_binder(&base, &transaction, NULL);
846 }
847
848 static void dss_binder_transaction_received(struct binder_transaction *t, struct binder_thread *to)
849 {
850 struct trace_binder_transaction_base base;
851
852 init_binder_transaction_base(TRANSACTION_DONE, &base, t, NULL, to);
853
854 dbg_snapshot_binder(&base, NULL, NULL);
855 }
856
857 static void dss_binder_transaction_failed(int reply, struct binder_transaction_log_entry *e,
858 char *from_pid_comm, char *from_tid_comm,
859 unsigned int flags, unsigned int code)
860 {
861 struct trace_binder_transaction_base base;
862 struct trace_binder_transaction transaction;
863 struct trace_binder_transaction_error error;
864
865 base.trace_type = TRANSACTION_ERROR;
866 base.transaction_id = e->debug_id;
867 base.from_pid = e->from_proc;
868 base.from_tid = e->from_thread;
869 base.to_pid = e->to_proc;
870 base.to_tid = e->to_thread;
871 strncpy(base.from_pid_comm, from_pid_comm, TASK_COMM_LEN);
872 strncpy(base.from_tid_comm, from_tid_comm, TASK_COMM_LEN);
873 base.to_pid_comm[0] = '\0';
874 base.to_tid_comm[0] = '\0';
875 transaction.to_node_id = e->to_node;
876 transaction.reply = reply;
877 transaction.flags = flags;
878 transaction.code = code;
879 error.return_error = e->return_error;
880 error.return_error_param = e->return_error_param;
881 error.return_error_line = e->return_error_line;
882
883 dbg_snapshot_binder(&base, &transaction, &error);
884 }
885 #endif /* CONFIG_DEBUG_SNAPSHOT_BINDER */
886
887 static bool binder_worklist_empty_ilocked(struct list_head *list)
888 {
889 return list_empty(list);
890 }
891
892 /**
893 * binder_worklist_empty() - Check if no items on the work list
894 * @proc: binder_proc associated with list
895 * @list: list to check
896 *
897 * Return: true if there are no items on list, else false
898 */
899 static bool binder_worklist_empty(struct binder_proc *proc,
900 struct list_head *list)
901 {
902 bool ret;
903
904 binder_inner_proc_lock(proc);
905 ret = binder_worklist_empty_ilocked(list);
906 binder_inner_proc_unlock(proc);
907 return ret;
908 }
909
910 /**
911 * binder_enqueue_work_ilocked() - Add an item to the work list
912 * @work: struct binder_work to add to list
913 * @target_list: list to add work to
914 *
915 * Adds the work to the specified list. Asserts that work
916 * is not already on a list.
917 *
918 * Requires the proc->inner_lock to be held.
919 */
920 static void
921 binder_enqueue_work_ilocked(struct binder_work *work,
922 struct list_head *target_list)
923 {
924 BUG_ON(target_list == NULL);
925 BUG_ON(work->entry.next && !list_empty(&work->entry));
926 list_add_tail(&work->entry, target_list);
927 }
928
929 /**
930 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
931 * @thread: thread to queue work to
932 * @work: struct binder_work to add to list
933 *
934 * Adds the work to the todo list of the thread. Doesn't set the process_todo
935 * flag, which means that (if it wasn't already set) the thread will go to
936 * sleep without handling this work when it calls read.
937 *
938 * Requires the proc->inner_lock to be held.
939 */
940 static void
941 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
942 struct binder_work *work)
943 {
944 binder_enqueue_work_ilocked(work, &thread->todo);
945 }
946
947 /**
948 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
949 * @thread: thread to queue work to
950 * @work: struct binder_work to add to list
951 *
952 * Adds the work to the todo list of the thread, and enables processing
953 * of the todo queue.
954 *
955 * Requires the proc->inner_lock to be held.
956 */
957 static void
958 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
959 struct binder_work *work)
960 {
961 binder_enqueue_work_ilocked(work, &thread->todo);
962 thread->process_todo = true;
963 }
964
965 /**
966 * binder_enqueue_thread_work() - Add an item to the thread work list
967 * @thread: thread to queue work to
968 * @work: struct binder_work to add to list
969 *
970 * Adds the work to the todo list of the thread, and enables processing
971 * of the todo queue.
972 */
973 static void
974 binder_enqueue_thread_work(struct binder_thread *thread,
975 struct binder_work *work)
976 {
977 binder_inner_proc_lock(thread->proc);
978 binder_enqueue_thread_work_ilocked(thread, work);
979 binder_inner_proc_unlock(thread->proc);
980 }
981
982 static void
983 binder_dequeue_work_ilocked(struct binder_work *work)
984 {
985 list_del_init(&work->entry);
986 }
987
988 /**
989 * binder_dequeue_work() - Removes an item from the work list
990 * @proc: binder_proc associated with list
991 * @work: struct binder_work to remove from list
992 *
993 * Removes the specified work item from whatever list it is on.
994 * Can safely be called if work is not on any list.
995 */
996 static void
997 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
998 {
999 binder_inner_proc_lock(proc);
1000 binder_dequeue_work_ilocked(work);
1001 binder_inner_proc_unlock(proc);
1002 }
1003
1004 static struct binder_work *binder_dequeue_work_head_ilocked(
1005 struct list_head *list)
1006 {
1007 struct binder_work *w;
1008
1009 w = list_first_entry_or_null(list, struct binder_work, entry);
1010 if (w)
1011 list_del_init(&w->entry);
1012 return w;
1013 }
1014
1015 /**
1016 * binder_dequeue_work_head() - Dequeues the item at head of list
1017 * @proc: binder_proc associated with list
1018 * @list: list to dequeue head
1019 *
1020 * Removes the head of the list if there are items on the list
1021 *
1022 * Return: pointer dequeued binder_work, NULL if list was empty
1023 */
1024 static struct binder_work *binder_dequeue_work_head(
1025 struct binder_proc *proc,
1026 struct list_head *list)
1027 {
1028 struct binder_work *w;
1029
1030 binder_inner_proc_lock(proc);
1031 w = binder_dequeue_work_head_ilocked(list);
1032 binder_inner_proc_unlock(proc);
1033 return w;
1034 }
1035
1036 static void
1037 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
1038 static void binder_free_thread(struct binder_thread *thread);
1039 static void binder_free_proc(struct binder_proc *proc);
1040 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
1041
1042 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
1043 {
1044 unsigned long rlim_cur;
1045 unsigned long irqs;
1046 int ret;
1047
1048 mutex_lock(&proc->files_lock);
1049 if (proc->files == NULL) {
1050 ret = -ESRCH;
1051 goto err;
1052 }
1053 if (!lock_task_sighand(proc->tsk, &irqs)) {
1054 ret = -EMFILE;
1055 goto err;
1056 }
1057 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
1058 unlock_task_sighand(proc->tsk, &irqs);
1059
1060 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
1061 err:
1062 mutex_unlock(&proc->files_lock);
1063 return ret;
1064 }
1065
1066 /*
1067 * copied from fd_install
1068 */
1069 static void task_fd_install(
1070 struct binder_proc *proc, unsigned int fd, struct file *file)
1071 {
1072 mutex_lock(&proc->files_lock);
1073 if (proc->files)
1074 __fd_install(proc->files, fd, file);
1075 mutex_unlock(&proc->files_lock);
1076 }
1077
1078 /*
1079 * copied from sys_close
1080 */
1081 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
1082 {
1083 int retval;
1084
1085 mutex_lock(&proc->files_lock);
1086 if (proc->files == NULL) {
1087 retval = -ESRCH;
1088 goto err;
1089 }
1090 retval = __close_fd(proc->files, fd);
1091 /* can't restart close syscall because file table entry was cleared */
1092 if (unlikely(retval == -ERESTARTSYS ||
1093 retval == -ERESTARTNOINTR ||
1094 retval == -ERESTARTNOHAND ||
1095 retval == -ERESTART_RESTARTBLOCK))
1096 retval = -EINTR;
1097 err:
1098 mutex_unlock(&proc->files_lock);
1099 return retval;
1100 }
1101
1102 static bool binder_has_work_ilocked(struct binder_thread *thread,
1103 bool do_proc_work)
1104 {
1105 return thread->process_todo ||
1106 thread->looper_need_return ||
1107 (do_proc_work &&
1108 !binder_worklist_empty_ilocked(&thread->proc->todo));
1109 }
1110
1111 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1112 {
1113 bool has_work;
1114
1115 binder_inner_proc_lock(thread->proc);
1116 has_work = binder_has_work_ilocked(thread, do_proc_work);
1117 binder_inner_proc_unlock(thread->proc);
1118
1119 return has_work;
1120 }
1121
1122 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1123 {
1124 return !thread->transaction_stack &&
1125 binder_worklist_empty_ilocked(&thread->todo) &&
1126 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1127 BINDER_LOOPER_STATE_REGISTERED));
1128 }
1129
1130 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1131 bool sync)
1132 {
1133 struct rb_node *n;
1134 struct binder_thread *thread;
1135
1136 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1137 thread = rb_entry(n, struct binder_thread, rb_node);
1138 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1139 binder_available_for_proc_work_ilocked(thread)) {
1140 if (sync)
1141 wake_up_interruptible_sync(&thread->wait);
1142 else
1143 wake_up_interruptible(&thread->wait);
1144 }
1145 }
1146 }
1147
1148 /**
1149 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1150 * @proc: process to select a thread from
1151 *
1152 * Note that calling this function moves the thread off the waiting_threads
1153 * list, so it can only be woken up by the caller of this function, or a
1154 * signal. Therefore, callers *should* always wake up the thread this function
1155 * returns.
1156 *
1157 * Return: If there's a thread currently waiting for process work,
1158 * returns that thread. Otherwise returns NULL.
1159 */
1160 static struct binder_thread *
1161 binder_select_thread_ilocked(struct binder_proc *proc)
1162 {
1163 struct binder_thread *thread;
1164
1165 assert_spin_locked(&proc->inner_lock);
1166 thread = list_first_entry_or_null(&proc->waiting_threads,
1167 struct binder_thread,
1168 waiting_thread_node);
1169
1170 if (thread)
1171 list_del_init(&thread->waiting_thread_node);
1172
1173 return thread;
1174 }
1175
1176 /**
1177 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1178 * @proc: process to wake up a thread in
1179 * @thread: specific thread to wake-up (may be NULL)
1180 * @sync: whether to do a synchronous wake-up
1181 *
1182 * This function wakes up a thread in the @proc process.
1183 * The caller may provide a specific thread to wake-up in
1184 * the @thread parameter. If @thread is NULL, this function
1185 * will wake up threads that have called poll().
1186 *
1187 * Note that for this function to work as expected, callers
1188 * should first call binder_select_thread() to find a thread
1189 * to handle the work (if they don't have a thread already),
1190 * and pass the result into the @thread parameter.
1191 */
1192 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1193 struct binder_thread *thread,
1194 bool sync)
1195 {
1196 assert_spin_locked(&proc->inner_lock);
1197
1198 if (thread) {
1199 if (sync)
1200 wake_up_interruptible_sync(&thread->wait);
1201 else
1202 wake_up_interruptible(&thread->wait);
1203 return;
1204 }
1205
1206 /* Didn't find a thread waiting for proc work; this can happen
1207 * in two scenarios:
1208 * 1. All threads are busy handling transactions
1209 * In that case, one of those threads should call back into
1210 * the kernel driver soon and pick up this work.
1211 * 2. Threads are using the (e)poll interface, in which case
1212 * they may be blocked on the waitqueue without having been
1213 * added to waiting_threads. For this case, we just iterate
1214 * over all threads not handling transaction work, and
1215 * wake them all up. We wake all because we don't know whether
1216 * a thread that called into (e)poll is handling non-binder
1217 * work currently.
1218 */
1219 binder_wakeup_poll_threads_ilocked(proc, sync);
1220 }
1221
1222 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1223 {
1224 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1225
1226 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1227 }
1228
1229 static bool is_rt_policy(int policy)
1230 {
1231 return policy == SCHED_FIFO || policy == SCHED_RR;
1232 }
1233
1234 static bool is_fair_policy(int policy)
1235 {
1236 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1237 }
1238
1239 static bool binder_supported_policy(int policy)
1240 {
1241 return is_fair_policy(policy) || is_rt_policy(policy);
1242 }
1243
1244 static int to_userspace_prio(int policy, int kernel_priority)
1245 {
1246 if (is_fair_policy(policy))
1247 return PRIO_TO_NICE(kernel_priority);
1248 else
1249 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1250 }
1251
1252 static int to_kernel_prio(int policy, int user_priority)
1253 {
1254 if (is_fair_policy(policy))
1255 return NICE_TO_PRIO(user_priority);
1256 else
1257 return MAX_USER_RT_PRIO - 1 - user_priority;
1258 }
1259
1260 static void binder_do_set_priority(struct task_struct *task,
1261 struct binder_priority desired,
1262 bool verify)
1263 {
1264 int priority; /* user-space prio value */
1265 bool has_cap_nice;
1266 unsigned int policy = desired.sched_policy;
1267
1268 if (task->policy == policy && task->normal_prio == desired.prio)
1269 return;
1270
1271 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1272
1273 priority = to_userspace_prio(policy, desired.prio);
1274
1275 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1276 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1277
1278 if (max_rtprio == 0) {
1279 policy = SCHED_NORMAL;
1280 priority = MIN_NICE;
1281 } else if (priority > max_rtprio) {
1282 priority = max_rtprio;
1283 }
1284 }
1285
1286 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1287 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1288
1289 if (min_nice > MAX_NICE) {
1290 binder_user_error("%d(%s) RLIMIT_NICE not set\n",
1291 task->pid, task->comm);
1292 return;
1293 } else if (priority < min_nice) {
1294 priority = min_nice;
1295 }
1296 }
1297
1298 if (policy != desired.sched_policy ||
1299 to_kernel_prio(policy, priority) != desired.prio)
1300 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1301 "%d(%s): priority %d not allowed, using %d instead\n",
1302 task->pid, task->comm, desired.prio,
1303 to_kernel_prio(policy, priority));
1304
1305 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1306 to_kernel_prio(policy, priority),
1307 desired.prio);
1308
1309 /* Set the actual priority */
1310 if (task->policy != policy || is_rt_policy(policy)) {
1311 struct sched_param params;
1312
1313 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1314
1315 sched_setscheduler_nocheck(task,
1316 policy | SCHED_RESET_ON_FORK,
1317 &params);
1318 }
1319 if (is_fair_policy(policy))
1320 set_user_nice(task, priority);
1321 }
1322
1323 static void binder_set_priority(struct task_struct *task,
1324 struct binder_priority desired)
1325 {
1326 binder_do_set_priority(task, desired, /* verify = */ true);
1327 }
1328
1329 static void binder_restore_priority(struct task_struct *task,
1330 struct binder_priority desired)
1331 {
1332 binder_do_set_priority(task, desired, /* verify = */ false);
1333 }
1334
1335 static void binder_transaction_priority(struct task_struct *task,
1336 struct binder_transaction *t,
1337 struct binder_priority node_prio,
1338 bool inherit_rt)
1339 {
1340 struct binder_priority desired_prio = t->priority;
1341
1342 if (t->set_priority_called)
1343 return;
1344
1345 t->set_priority_called = true;
1346 t->saved_priority.sched_policy = task->policy;
1347 t->saved_priority.prio = task->normal_prio;
1348
1349 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1350 desired_prio.prio = NICE_TO_PRIO(0);
1351 desired_prio.sched_policy = SCHED_NORMAL;
1352 }
1353
1354 if (node_prio.prio < t->priority.prio ||
1355 (node_prio.prio == t->priority.prio &&
1356 node_prio.sched_policy == SCHED_FIFO)) {
1357 /*
1358 * In case the minimum priority on the node is
1359 * higher (lower value), use that priority. If
1360 * the priority is the same, but the node uses
1361 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1362 * run unbounded, unlike SCHED_RR.
1363 */
1364 desired_prio = node_prio;
1365 }
1366
1367 binder_set_priority(task, desired_prio);
1368 }
1369
1370 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1371 binder_uintptr_t ptr)
1372 {
1373 struct rb_node *n = proc->nodes.rb_node;
1374 struct binder_node *node;
1375
1376 assert_spin_locked(&proc->inner_lock);
1377
1378 while (n) {
1379 node = rb_entry(n, struct binder_node, rb_node);
1380
1381 if (ptr < node->ptr)
1382 n = n->rb_left;
1383 else if (ptr > node->ptr)
1384 n = n->rb_right;
1385 else {
1386 /*
1387 * take an implicit weak reference
1388 * to ensure node stays alive until
1389 * call to binder_put_node()
1390 */
1391 binder_inc_node_tmpref_ilocked(node);
1392 return node;
1393 }
1394 }
1395 return NULL;
1396 }
1397
1398 static struct binder_node *binder_get_node(struct binder_proc *proc,
1399 binder_uintptr_t ptr)
1400 {
1401 struct binder_node *node;
1402
1403 binder_inner_proc_lock(proc);
1404 node = binder_get_node_ilocked(proc, ptr);
1405 binder_inner_proc_unlock(proc);
1406 return node;
1407 }
1408
1409 static struct binder_node *binder_init_node_ilocked(
1410 struct binder_proc *proc,
1411 struct binder_node *new_node,
1412 struct flat_binder_object *fp)
1413 {
1414 struct rb_node **p = &proc->nodes.rb_node;
1415 struct rb_node *parent = NULL;
1416 struct binder_node *node;
1417 binder_uintptr_t ptr = fp ? fp->binder : 0;
1418 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1419 __u32 flags = fp ? fp->flags : 0;
1420 s8 priority;
1421
1422 assert_spin_locked(&proc->inner_lock);
1423
1424 while (*p) {
1425
1426 parent = *p;
1427 node = rb_entry(parent, struct binder_node, rb_node);
1428
1429 if (ptr < node->ptr)
1430 p = &(*p)->rb_left;
1431 else if (ptr > node->ptr)
1432 p = &(*p)->rb_right;
1433 else {
1434 /*
1435 * A matching node is already in
1436 * the rb tree. Abandon the init
1437 * and return it.
1438 */
1439 binder_inc_node_tmpref_ilocked(node);
1440 return node;
1441 }
1442 }
1443 node = new_node;
1444 binder_stats_created(BINDER_STAT_NODE);
1445 node->tmp_refs++;
1446 rb_link_node(&node->rb_node, parent, p);
1447 rb_insert_color(&node->rb_node, &proc->nodes);
1448 node->debug_id = atomic_inc_return(&binder_last_id);
1449 node->proc = proc;
1450 node->ptr = ptr;
1451 node->cookie = cookie;
1452 node->work.type = BINDER_WORK_NODE;
1453 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1454 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1455 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1456 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1457 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1458 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1459 spin_lock_init(&node->lock);
1460 INIT_LIST_HEAD(&node->work.entry);
1461 INIT_LIST_HEAD(&node->async_todo);
1462 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1463 "%d:%d(%s:%s) node %d u%016llx c%016llx created\n",
1464 proc->pid, current->pid, proc->tsk->comm, current->comm, node->debug_id,
1465 (u64)node->ptr, (u64)node->cookie);
1466
1467 return node;
1468 }
1469
1470 static struct binder_node *binder_new_node(struct binder_proc *proc,
1471 struct flat_binder_object *fp)
1472 {
1473 struct binder_node *node;
1474 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1475
1476 if (!new_node)
1477 return NULL;
1478 binder_inner_proc_lock(proc);
1479 node = binder_init_node_ilocked(proc, new_node, fp);
1480 binder_inner_proc_unlock(proc);
1481 if (node != new_node)
1482 /*
1483 * The node was already added by another thread
1484 */
1485 kfree(new_node);
1486
1487 return node;
1488 }
1489
1490 static void binder_free_node(struct binder_node *node)
1491 {
1492 kfree(node);
1493 binder_stats_deleted(BINDER_STAT_NODE);
1494 }
1495
1496 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1497 int internal,
1498 struct list_head *target_list)
1499 {
1500 struct binder_proc *proc = node->proc;
1501
1502 assert_spin_locked(&node->lock);
1503 if (proc)
1504 assert_spin_locked(&proc->inner_lock);
1505 if (strong) {
1506 if (internal) {
1507 if (target_list == NULL &&
1508 node->internal_strong_refs == 0 &&
1509 !(node->proc &&
1510 node == node->proc->context->binder_context_mgr_node &&
1511 node->has_strong_ref)) {
1512 pr_err("invalid inc strong node for %d\n",
1513 node->debug_id);
1514 return -EINVAL;
1515 }
1516 node->internal_strong_refs++;
1517 } else
1518 node->local_strong_refs++;
1519 if (!node->has_strong_ref && target_list) {
1520 binder_dequeue_work_ilocked(&node->work);
1521 /*
1522 * Note: this function is the only place where we queue
1523 * directly to a thread->todo without using the
1524 * corresponding binder_enqueue_thread_work() helper
1525 * functions; in this case it's ok to not set the
1526 * process_todo flag, since we know this node work will
1527 * always be followed by other work that starts queue
1528 * processing: in case of synchronous transactions, a
1529 * BR_REPLY or BR_ERROR; in case of oneway
1530 * transactions, a BR_TRANSACTION_COMPLETE.
1531 */
1532 binder_enqueue_work_ilocked(&node->work, target_list);
1533 }
1534 } else {
1535 if (!internal)
1536 node->local_weak_refs++;
1537 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1538 if (target_list == NULL) {
1539 pr_err("invalid inc weak node for %d\n",
1540 node->debug_id);
1541 return -EINVAL;
1542 }
1543 /*
1544 * See comment above
1545 */
1546 binder_enqueue_work_ilocked(&node->work, target_list);
1547 }
1548 }
1549 return 0;
1550 }
1551
1552 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1553 struct list_head *target_list)
1554 {
1555 int ret;
1556
1557 binder_node_inner_lock(node);
1558 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1559 binder_node_inner_unlock(node);
1560
1561 return ret;
1562 }
1563
1564 static bool binder_dec_node_nilocked(struct binder_node *node,
1565 int strong, int internal)
1566 {
1567 struct binder_proc *proc = node->proc;
1568
1569 assert_spin_locked(&node->lock);
1570 if (proc)
1571 assert_spin_locked(&proc->inner_lock);
1572 if (strong) {
1573 if (internal)
1574 node->internal_strong_refs--;
1575 else
1576 node->local_strong_refs--;
1577 if (node->local_strong_refs || node->internal_strong_refs)
1578 return false;
1579 } else {
1580 if (!internal)
1581 node->local_weak_refs--;
1582 if (node->local_weak_refs || node->tmp_refs ||
1583 !hlist_empty(&node->refs))
1584 return false;
1585 }
1586
1587 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1588 if (list_empty(&node->work.entry)) {
1589 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1590 binder_wakeup_proc_ilocked(proc);
1591 }
1592 } else {
1593 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1594 !node->local_weak_refs && !node->tmp_refs) {
1595 if (proc) {
1596 binder_dequeue_work_ilocked(&node->work);
1597 rb_erase(&node->rb_node, &proc->nodes);
1598 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1599 "refless node %d deleted\n",
1600 node->debug_id);
1601 } else {
1602 BUG_ON(!list_empty(&node->work.entry));
1603 spin_lock(&binder_dead_nodes_lock);
1604 /*
1605 * tmp_refs could have changed so
1606 * check it again
1607 */
1608 if (node->tmp_refs) {
1609 spin_unlock(&binder_dead_nodes_lock);
1610 return false;
1611 }
1612 hlist_del(&node->dead_node);
1613 spin_unlock(&binder_dead_nodes_lock);
1614 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1615 "dead node %d deleted\n",
1616 node->debug_id);
1617 }
1618 return true;
1619 }
1620 }
1621 return false;
1622 }
1623
1624 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1625 {
1626 bool free_node;
1627
1628 binder_node_inner_lock(node);
1629 free_node = binder_dec_node_nilocked(node, strong, internal);
1630 binder_node_inner_unlock(node);
1631 if (free_node)
1632 binder_free_node(node);
1633 }
1634
1635 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1636 {
1637 /*
1638 * No call to binder_inc_node() is needed since we
1639 * don't need to inform userspace of any changes to
1640 * tmp_refs
1641 */
1642 node->tmp_refs++;
1643 }
1644
1645 /**
1646 * binder_inc_node_tmpref() - take a temporary reference on node
1647 * @node: node to reference
1648 *
1649 * Take reference on node to prevent the node from being freed
1650 * while referenced only by a local variable. The inner lock is
1651 * needed to serialize with the node work on the queue (which
1652 * isn't needed after the node is dead). If the node is dead
1653 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1654 * node->tmp_refs against dead-node-only cases where the node
1655 * lock cannot be acquired (eg traversing the dead node list to
1656 * print nodes)
1657 */
1658 static void binder_inc_node_tmpref(struct binder_node *node)
1659 {
1660 binder_node_lock(node);
1661 if (node->proc)
1662 binder_inner_proc_lock(node->proc);
1663 else
1664 spin_lock(&binder_dead_nodes_lock);
1665 binder_inc_node_tmpref_ilocked(node);
1666 if (node->proc)
1667 binder_inner_proc_unlock(node->proc);
1668 else
1669 spin_unlock(&binder_dead_nodes_lock);
1670 binder_node_unlock(node);
1671 }
1672
1673 /**
1674 * binder_dec_node_tmpref() - remove a temporary reference on node
1675 * @node: node to reference
1676 *
1677 * Release temporary reference on node taken via binder_inc_node_tmpref()
1678 */
1679 static void binder_dec_node_tmpref(struct binder_node *node)
1680 {
1681 bool free_node;
1682
1683 binder_node_inner_lock(node);
1684 if (!node->proc)
1685 spin_lock(&binder_dead_nodes_lock);
1686 node->tmp_refs--;
1687 BUG_ON(node->tmp_refs < 0);
1688 if (!node->proc)
1689 spin_unlock(&binder_dead_nodes_lock);
1690 /*
1691 * Call binder_dec_node() to check if all refcounts are 0
1692 * and cleanup is needed. Calling with strong=0 and internal=1
1693 * causes no actual reference to be released in binder_dec_node().
1694 * If that changes, a change is needed here too.
1695 */
1696 free_node = binder_dec_node_nilocked(node, 0, 1);
1697 binder_node_inner_unlock(node);
1698 if (free_node)
1699 binder_free_node(node);
1700 }
1701
1702 static void binder_put_node(struct binder_node *node)
1703 {
1704 binder_dec_node_tmpref(node);
1705 }
1706
1707 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1708 u32 desc, bool need_strong_ref)
1709 {
1710 struct rb_node *n = proc->refs_by_desc.rb_node;
1711 struct binder_ref *ref;
1712
1713 while (n) {
1714 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1715
1716 if (desc < ref->data.desc) {
1717 n = n->rb_left;
1718 } else if (desc > ref->data.desc) {
1719 n = n->rb_right;
1720 } else if (need_strong_ref && !ref->data.strong) {
1721 binder_user_error("tried to use weak ref as strong ref\n");
1722 return NULL;
1723 } else {
1724 return ref;
1725 }
1726 }
1727 return NULL;
1728 }
1729
1730 /**
1731 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1732 * @proc: binder_proc that owns the ref
1733 * @node: binder_node of target
1734 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1735 *
1736 * Look up the ref for the given node and return it if it exists
1737 *
1738 * If it doesn't exist and the caller provides a newly allocated
1739 * ref, initialize the fields of the newly allocated ref and insert
1740 * into the given proc rb_trees and node refs list.
1741 *
1742 * Return: the ref for node. It is possible that another thread
1743 * allocated/initialized the ref first in which case the
1744 * returned ref would be different than the passed-in
1745 * new_ref. new_ref must be kfree'd by the caller in
1746 * this case.
1747 */
1748 static struct binder_ref *binder_get_ref_for_node_olocked(
1749 struct binder_proc *proc,
1750 struct binder_node *node,
1751 struct binder_ref *new_ref)
1752 {
1753 struct binder_context *context = proc->context;
1754 struct rb_node **p = &proc->refs_by_node.rb_node;
1755 struct rb_node *parent = NULL;
1756 struct binder_ref *ref;
1757 struct rb_node *n;
1758
1759 while (*p) {
1760 parent = *p;
1761 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1762
1763 if (node < ref->node)
1764 p = &(*p)->rb_left;
1765 else if (node > ref->node)
1766 p = &(*p)->rb_right;
1767 else
1768 return ref;
1769 }
1770 if (!new_ref)
1771 return NULL;
1772
1773 binder_stats_created(BINDER_STAT_REF);
1774 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1775 new_ref->proc = proc;
1776 new_ref->node = node;
1777 rb_link_node(&new_ref->rb_node_node, parent, p);
1778 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1779
1780 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1781 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1782 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1783 if (ref->data.desc > new_ref->data.desc)
1784 break;
1785 new_ref->data.desc = ref->data.desc + 1;
1786 }
1787
1788 p = &proc->refs_by_desc.rb_node;
1789 while (*p) {
1790 parent = *p;
1791 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1792
1793 if (new_ref->data.desc < ref->data.desc)
1794 p = &(*p)->rb_left;
1795 else if (new_ref->data.desc > ref->data.desc)
1796 p = &(*p)->rb_right;
1797 else
1798 BUG();
1799 }
1800 rb_link_node(&new_ref->rb_node_desc, parent, p);
1801 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1802
1803 binder_node_lock(node);
1804 hlist_add_head(&new_ref->node_entry, &node->refs);
1805
1806 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1807 "%d(%s) new ref %d desc %d for node %d\n",
1808 proc->pid, proc->tsk->comm, new_ref->data.debug_id, new_ref->data.desc,
1809 node->debug_id);
1810 binder_node_unlock(node);
1811 return new_ref;
1812 }
1813
1814 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1815 {
1816 bool delete_node = false;
1817
1818 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1819 "%d(%s) delete ref %d desc %d for node %d\n",
1820 ref->proc->pid, ref->proc->tsk->comm, ref->data.debug_id, ref->data.desc,
1821 ref->node->debug_id);
1822
1823 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1824 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1825
1826 binder_node_inner_lock(ref->node);
1827 if (ref->data.strong)
1828 binder_dec_node_nilocked(ref->node, 1, 1);
1829
1830 hlist_del(&ref->node_entry);
1831 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1832 binder_node_inner_unlock(ref->node);
1833 /*
1834 * Clear ref->node unless we want the caller to free the node
1835 */
1836 if (!delete_node) {
1837 /*
1838 * The caller uses ref->node to determine
1839 * whether the node needs to be freed. Clear
1840 * it since the node is still alive.
1841 */
1842 ref->node = NULL;
1843 }
1844
1845 if (ref->death) {
1846 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1847 "%d(%s) delete ref %d desc %d has death notification\n",
1848 ref->proc->pid, ref->proc->tsk->comm, ref->data.debug_id,
1849 ref->data.desc);
1850 binder_dequeue_work(ref->proc, &ref->death->work);
1851 binder_stats_deleted(BINDER_STAT_DEATH);
1852 }
1853 binder_stats_deleted(BINDER_STAT_REF);
1854 }
1855
1856 /**
1857 * binder_inc_ref_olocked() - increment the ref for given handle
1858 * @ref: ref to be incremented
1859 * @strong: if true, strong increment, else weak
1860 * @target_list: list to queue node work on
1861 *
1862 * Increment the ref. @ref->proc->outer_lock must be held on entry
1863 *
1864 * Return: 0, if successful, else errno
1865 */
1866 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1867 struct list_head *target_list)
1868 {
1869 int ret;
1870
1871 if (strong) {
1872 if (ref->data.strong == 0) {
1873 ret = binder_inc_node(ref->node, 1, 1, target_list);
1874 if (ret)
1875 return ret;
1876 }
1877 ref->data.strong++;
1878 } else {
1879 if (ref->data.weak == 0) {
1880 ret = binder_inc_node(ref->node, 0, 1, target_list);
1881 if (ret)
1882 return ret;
1883 }
1884 ref->data.weak++;
1885 }
1886 return 0;
1887 }
1888
1889 /**
1890 * binder_dec_ref() - dec the ref for given handle
1891 * @ref: ref to be decremented
1892 * @strong: if true, strong decrement, else weak
1893 *
1894 * Decrement the ref.
1895 *
1896 * Return: true if ref is cleaned up and ready to be freed
1897 */
1898 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1899 {
1900 if (strong) {
1901 if (ref->data.strong == 0) {
1902 binder_user_error("%d(%s) invalid dec strong, ref %d desc %d s %d w %d\n",
1903 ref->proc->pid, ref->proc->tsk->comm, ref->data.debug_id,
1904 ref->data.desc, ref->data.strong,
1905 ref->data.weak);
1906 return false;
1907 }
1908 ref->data.strong--;
1909 if (ref->data.strong == 0)
1910 binder_dec_node(ref->node, strong, 1);
1911 } else {
1912 if (ref->data.weak == 0) {
1913 binder_user_error("%d(%s) invalid dec weak, ref %d desc %d s %d w %d\n",
1914 ref->proc->pid, ref->proc->tsk->comm, ref->data.debug_id,
1915 ref->data.desc, ref->data.strong,
1916 ref->data.weak);
1917 return false;
1918 }
1919 ref->data.weak--;
1920 }
1921 if (ref->data.strong == 0 && ref->data.weak == 0) {
1922 binder_cleanup_ref_olocked(ref);
1923 return true;
1924 }
1925 return false;
1926 }
1927
1928 /**
1929 * binder_get_node_from_ref() - get the node from the given proc/desc
1930 * @proc: proc containing the ref
1931 * @desc: the handle associated with the ref
1932 * @need_strong_ref: if true, only return node if ref is strong
1933 * @rdata: the id/refcount data for the ref
1934 *
1935 * Given a proc and ref handle, return the associated binder_node
1936 *
1937 * Return: a binder_node or NULL if not found or not strong when strong required
1938 */
1939 static struct binder_node *binder_get_node_from_ref(
1940 struct binder_proc *proc,
1941 u32 desc, bool need_strong_ref,
1942 struct binder_ref_data *rdata)
1943 {
1944 struct binder_node *node;
1945 struct binder_ref *ref;
1946
1947 binder_proc_lock(proc);
1948 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1949 if (!ref)
1950 goto err_no_ref;
1951 node = ref->node;
1952 /*
1953 * Take an implicit reference on the node to ensure
1954 * it stays alive until the call to binder_put_node()
1955 */
1956 binder_inc_node_tmpref(node);
1957 if (rdata)
1958 *rdata = ref->data;
1959 binder_proc_unlock(proc);
1960
1961 return node;
1962
1963 err_no_ref:
1964 binder_proc_unlock(proc);
1965 return NULL;
1966 }
1967
1968 /**
1969 * binder_free_ref() - free the binder_ref
1970 * @ref: ref to free
1971 *
1972 * Free the binder_ref. Free the binder_node indicated by ref->node
1973 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1974 */
1975 static void binder_free_ref(struct binder_ref *ref)
1976 {
1977 if (ref->node)
1978 binder_free_node(ref->node);
1979 kfree(ref->death);
1980 kfree(ref);
1981 }
1982
1983 /**
1984 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1985 * @proc: proc containing the ref
1986 * @desc: the handle associated with the ref
1987 * @increment: true=inc reference, false=dec reference
1988 * @strong: true=strong reference, false=weak reference
1989 * @rdata: the id/refcount data for the ref
1990 *
1991 * Given a proc and ref handle, increment or decrement the ref
1992 * according to "increment" arg.
1993 *
1994 * Return: 0 if successful, else errno
1995 */
1996 static int binder_update_ref_for_handle(struct binder_proc *proc,
1997 uint32_t desc, bool increment, bool strong,
1998 struct binder_ref_data *rdata)
1999 {
2000 int ret = 0;
2001 struct binder_ref *ref;
2002 bool delete_ref = false;
2003
2004 binder_proc_lock(proc);
2005 ref = binder_get_ref_olocked(proc, desc, strong);
2006 if (!ref) {
2007 ret = -EINVAL;
2008 goto err_no_ref;
2009 }
2010 if (increment)
2011 ret = binder_inc_ref_olocked(ref, strong, NULL);
2012 else
2013 delete_ref = binder_dec_ref_olocked(ref, strong);
2014
2015 if (rdata)
2016 *rdata = ref->data;
2017 binder_proc_unlock(proc);
2018
2019 if (delete_ref)
2020 binder_free_ref(ref);
2021 return ret;
2022
2023 err_no_ref:
2024 binder_proc_unlock(proc);
2025 return ret;
2026 }
2027
2028 /**
2029 * binder_dec_ref_for_handle() - dec the ref for given handle
2030 * @proc: proc containing the ref
2031 * @desc: the handle associated with the ref
2032 * @strong: true=strong reference, false=weak reference
2033 * @rdata: the id/refcount data for the ref
2034 *
2035 * Just calls binder_update_ref_for_handle() to decrement the ref.
2036 *
2037 * Return: 0 if successful, else errno
2038 */
2039 static int binder_dec_ref_for_handle(struct binder_proc *proc,
2040 uint32_t desc, bool strong, struct binder_ref_data *rdata)
2041 {
2042 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
2043 }
2044
2045
2046 /**
2047 * binder_inc_ref_for_node() - increment the ref for given proc/node
2048 * @proc: proc containing the ref
2049 * @node: target node
2050 * @strong: true=strong reference, false=weak reference
2051 * @target_list: worklist to use if node is incremented
2052 * @rdata: the id/refcount data for the ref
2053 *
2054 * Given a proc and node, increment the ref. Create the ref if it
2055 * doesn't already exist
2056 *
2057 * Return: 0 if successful, else errno
2058 */
2059 static int binder_inc_ref_for_node(struct binder_proc *proc,
2060 struct binder_node *node,
2061 bool strong,
2062 struct list_head *target_list,
2063 struct binder_ref_data *rdata)
2064 {
2065 struct binder_ref *ref;
2066 struct binder_ref *new_ref = NULL;
2067 int ret = 0;
2068
2069 binder_proc_lock(proc);
2070 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
2071 if (!ref) {
2072 binder_proc_unlock(proc);
2073 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
2074 if (!new_ref)
2075 return -ENOMEM;
2076 binder_proc_lock(proc);
2077 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
2078 }
2079 ret = binder_inc_ref_olocked(ref, strong, target_list);
2080 *rdata = ref->data;
2081 binder_proc_unlock(proc);
2082 if (new_ref && ref != new_ref)
2083 /*
2084 * Another thread created the ref first so
2085 * free the one we allocated
2086 */
2087 kfree(new_ref);
2088 return ret;
2089 }
2090
2091 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2092 struct binder_transaction *t)
2093 {
2094 BUG_ON(!target_thread);
2095 assert_spin_locked(&target_thread->proc->inner_lock);
2096 BUG_ON(target_thread->transaction_stack != t);
2097 BUG_ON(target_thread->transaction_stack->from != target_thread);
2098 target_thread->transaction_stack =
2099 target_thread->transaction_stack->from_parent;
2100 t->from = NULL;
2101 }
2102
2103 /**
2104 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2105 * @thread: thread to decrement
2106 *
2107 * A thread needs to be kept alive while being used to create or
2108 * handle a transaction. binder_get_txn_from() is used to safely
2109 * extract t->from from a binder_transaction and keep the thread
2110 * indicated by t->from from being freed. When done with that
2111 * binder_thread, this function is called to decrement the
2112 * tmp_ref and free if appropriate (thread has been released
2113 * and no transaction being processed by the driver)
2114 */
2115 static void binder_thread_dec_tmpref(struct binder_thread *thread)
2116 {
2117 /*
2118 * atomic is used to protect the counter value while
2119 * it cannot reach zero or thread->is_dead is false
2120 */
2121 binder_inner_proc_lock(thread->proc);
2122 atomic_dec(&thread->tmp_ref);
2123 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2124 binder_inner_proc_unlock(thread->proc);
2125 binder_free_thread(thread);
2126 return;
2127 }
2128 binder_inner_proc_unlock(thread->proc);
2129 }
2130
2131 /**
2132 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2133 * @proc: proc to decrement
2134 *
2135 * A binder_proc needs to be kept alive while being used to create or
2136 * handle a transaction. proc->tmp_ref is incremented when
2137 * creating a new transaction or the binder_proc is currently in-use
2138 * by threads that are being released. When done with the binder_proc,
2139 * this function is called to decrement the counter and free the
2140 * proc if appropriate (proc has been released, all threads have
2141 * been released and not currenly in-use to process a transaction).
2142 */
2143 static void binder_proc_dec_tmpref(struct binder_proc *proc)
2144 {
2145 binder_inner_proc_lock(proc);
2146 proc->tmp_ref--;
2147 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2148 !proc->tmp_ref) {
2149 binder_inner_proc_unlock(proc);
2150 binder_free_proc(proc);
2151 return;
2152 }
2153 binder_inner_proc_unlock(proc);
2154 }
2155
2156 /**
2157 * binder_get_txn_from() - safely extract the "from" thread in transaction
2158 * @t: binder transaction for t->from
2159 *
2160 * Atomically return the "from" thread and increment the tmp_ref
2161 * count for the thread to ensure it stays alive until
2162 * binder_thread_dec_tmpref() is called.
2163 *
2164 * Return: the value of t->from
2165 */
2166 static struct binder_thread *binder_get_txn_from(
2167 struct binder_transaction *t)
2168 {
2169 struct binder_thread *from;
2170
2171 spin_lock(&t->lock);
2172 from = t->from;
2173 if (from)
2174 atomic_inc(&from->tmp_ref);
2175 spin_unlock(&t->lock);
2176 return from;
2177 }
2178
2179 /**
2180 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2181 * @t: binder transaction for t->from
2182 *
2183 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2184 * to guarantee that the thread cannot be released while operating on it.
2185 * The caller must call binder_inner_proc_unlock() to release the inner lock
2186 * as well as call binder_dec_thread_txn() to release the reference.
2187 *
2188 * Return: the value of t->from
2189 */
2190 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2191 struct binder_transaction *t)
2192 {
2193 struct binder_thread *from;
2194
2195 from = binder_get_txn_from(t);
2196 if (!from)
2197 return NULL;
2198 binder_inner_proc_lock(from->proc);
2199 if (t->from) {
2200 BUG_ON(from != t->from);
2201 return from;
2202 }
2203 binder_inner_proc_unlock(from->proc);
2204 binder_thread_dec_tmpref(from);
2205 return NULL;
2206 }
2207
2208 static void binder_free_transaction(struct binder_transaction *t)
2209 {
2210 if (t->buffer)
2211 t->buffer->transaction = NULL;
2212 kfree(t);
2213 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2214 }
2215
2216 static void binder_send_failed_reply(struct binder_transaction *t,
2217 uint32_t error_code)
2218 {
2219 struct binder_thread *target_thread;
2220 struct binder_transaction *next;
2221
2222 BUG_ON(t->flags & TF_ONE_WAY);
2223 while (1) {
2224 target_thread = binder_get_txn_from_and_acq_inner(t);
2225 if (target_thread) {
2226 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2227 "send failed reply for transaction %d to %d:%d(%s:%s)\n",
2228 t->debug_id,
2229 target_thread->proc->pid,
2230 target_thread->pid,
2231 target_thread->proc->tsk->comm,
2232 target_thread->task->comm);
2233
2234 binder_pop_transaction_ilocked(target_thread, t);
2235 if (target_thread->reply_error.cmd == BR_OK) {
2236 target_thread->reply_error.cmd = error_code;
2237 binder_enqueue_thread_work_ilocked(
2238 target_thread,
2239 &target_thread->reply_error.work);
2240 wake_up_interruptible(&target_thread->wait);
2241 } else {
2242 /*
2243 * Cannot get here for normal operation, but
2244 * we can if multiple synchronous transactions
2245 * are sent without blocking for responses.
2246 * Just ignore the 2nd error in this case.
2247 */
2248 pr_warn("Unexpected reply error: %u\n",
2249 target_thread->reply_error.cmd);
2250 }
2251 binder_inner_proc_unlock(target_thread->proc);
2252 binder_thread_dec_tmpref(target_thread);
2253 binder_free_transaction(t);
2254 return;
2255 }
2256 next = t->from_parent;
2257
2258 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2259 "send failed reply for transaction %d, target dead\n",
2260 t->debug_id);
2261
2262 binder_free_transaction(t);
2263 if (next == NULL) {
2264 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2265 "reply failed, no target thread at root\n");
2266 return;
2267 }
2268 t = next;
2269 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2270 "reply failed, no target thread -- retry %d\n",
2271 t->debug_id);
2272 }
2273 }
2274
2275 /**
2276 * binder_cleanup_transaction() - cleans up undelivered transaction
2277 * @t: transaction that needs to be cleaned up
2278 * @reason: reason the transaction wasn't delivered
2279 * @error_code: error to return to caller (if synchronous call)
2280 */
2281 static void binder_cleanup_transaction(struct binder_transaction *t,
2282 const char *reason,
2283 uint32_t error_code)
2284 {
2285 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2286 binder_send_failed_reply(t, error_code);
2287 } else {
2288 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2289 "undelivered transaction %d, %s\n",
2290 t->debug_id, reason);
2291 binder_free_transaction(t);
2292 }
2293 }
2294
2295 /**
2296 * binder_validate_object() - checks for a valid metadata object in a buffer.
2297 * @buffer: binder_buffer that we're parsing.
2298 * @offset: offset in the buffer at which to validate an object.
2299 *
2300 * Return: If there's a valid metadata object at @offset in @buffer, the
2301 * size of that object. Otherwise, it returns zero.
2302 */
2303 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2304 {
2305 /* Check if we can read a header first */
2306 struct binder_object_header *hdr;
2307 size_t object_size = 0;
2308
2309 if (offset > buffer->data_size - sizeof(*hdr) ||
2310 buffer->data_size < sizeof(*hdr) ||
2311 !IS_ALIGNED(offset, sizeof(u32)))
2312 return 0;
2313
2314 /* Ok, now see if we can read a complete object. */
2315 hdr = (struct binder_object_header *)(buffer->data + offset);
2316 switch (hdr->type) {
2317 case BINDER_TYPE_BINDER:
2318 case BINDER_TYPE_WEAK_BINDER:
2319 case BINDER_TYPE_HANDLE:
2320 case BINDER_TYPE_WEAK_HANDLE:
2321 object_size = sizeof(struct flat_binder_object);
2322 break;
2323 case BINDER_TYPE_FD:
2324 object_size = sizeof(struct binder_fd_object);
2325 break;
2326 case BINDER_TYPE_PTR:
2327 object_size = sizeof(struct binder_buffer_object);
2328 break;
2329 case BINDER_TYPE_FDA:
2330 object_size = sizeof(struct binder_fd_array_object);
2331 break;
2332 default:
2333 return 0;
2334 }
2335 if (offset <= buffer->data_size - object_size &&
2336 buffer->data_size >= object_size)
2337 return object_size;
2338 else
2339 return 0;
2340 }
2341
2342 /**
2343 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2344 * @b: binder_buffer containing the object
2345 * @index: index in offset array at which the binder_buffer_object is
2346 * located
2347 * @start: points to the start of the offset array
2348 * @num_valid: the number of valid offsets in the offset array
2349 *
2350 * Return: If @index is within the valid range of the offset array
2351 * described by @start and @num_valid, and if there's a valid
2352 * binder_buffer_object at the offset found in index @index
2353 * of the offset array, that object is returned. Otherwise,
2354 * %NULL is returned.
2355 * Note that the offset found in index @index itself is not
2356 * verified; this function assumes that @num_valid elements
2357 * from @start were previously verified to have valid offsets.
2358 */
2359 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2360 binder_size_t index,
2361 binder_size_t *start,
2362 binder_size_t num_valid)
2363 {
2364 struct binder_buffer_object *buffer_obj;
2365 binder_size_t *offp;
2366
2367 if (index >= num_valid)
2368 return NULL;
2369
2370 offp = start + index;
2371 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2372 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2373 return NULL;
2374
2375 return buffer_obj;
2376 }
2377
2378 /**
2379 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2380 * @b: transaction buffer
2381 * @objects_start start of objects buffer
2382 * @buffer: binder_buffer_object in which to fix up
2383 * @offset: start offset in @buffer to fix up
2384 * @last_obj: last binder_buffer_object that we fixed up in
2385 * @last_min_offset: minimum fixup offset in @last_obj
2386 *
2387 * Return: %true if a fixup in buffer @buffer at offset @offset is
2388 * allowed.
2389 *
2390 * For safety reasons, we only allow fixups inside a buffer to happen
2391 * at increasing offsets; additionally, we only allow fixup on the last
2392 * buffer object that was verified, or one of its parents.
2393 *
2394 * Example of what is allowed:
2395 *
2396 * A
2397 * B (parent = A, offset = 0)
2398 * C (parent = A, offset = 16)
2399 * D (parent = C, offset = 0)
2400 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2401 *
2402 * Examples of what is not allowed:
2403 *
2404 * Decreasing offsets within the same parent:
2405 * A
2406 * C (parent = A, offset = 16)
2407 * B (parent = A, offset = 0) // decreasing offset within A
2408 *
2409 * Referring to a parent that wasn't the last object or any of its parents:
2410 * A
2411 * B (parent = A, offset = 0)
2412 * C (parent = A, offset = 0)
2413 * C (parent = A, offset = 16)
2414 * D (parent = B, offset = 0) // B is not A or any of A's parents
2415 */
2416 static bool binder_validate_fixup(struct binder_buffer *b,
2417 binder_size_t *objects_start,
2418 struct binder_buffer_object *buffer,
2419 binder_size_t fixup_offset,
2420 struct binder_buffer_object *last_obj,
2421 binder_size_t last_min_offset)
2422 {
2423 if (!last_obj) {
2424 /* Nothing to fix up in */
2425 return false;
2426 }
2427
2428 while (last_obj != buffer) {
2429 /*
2430 * Safe to retrieve the parent of last_obj, since it
2431 * was already previously verified by the driver.
2432 */
2433 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2434 return false;
2435 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2436 last_obj = (struct binder_buffer_object *)
2437 (b->data + *(objects_start + last_obj->parent));
2438 }
2439 return (fixup_offset >= last_min_offset);
2440 }
2441
2442 static void binder_transaction_buffer_release(struct binder_proc *proc,
2443 struct binder_buffer *buffer,
2444 binder_size_t *failed_at)
2445 {
2446 binder_size_t *offp, *off_start, *off_end;
2447 int debug_id = buffer->debug_id;
2448
2449 binder_debug(BINDER_DEBUG_TRANSACTION,
2450 "%d(%s) buffer release %d, size %zd-%zd, failed at %pK\n",
2451 proc->pid, proc->tsk->comm, buffer->debug_id,
2452 buffer->data_size, buffer->offsets_size, failed_at);
2453
2454 if (buffer->target_node)
2455 binder_dec_node(buffer->target_node, 1, 0);
2456
2457 off_start = (binder_size_t *)(buffer->data +
2458 ALIGN(buffer->data_size, sizeof(void *)));
2459 if (failed_at)
2460 off_end = failed_at;
2461 else
2462 off_end = (void *)off_start + buffer->offsets_size;
2463 for (offp = off_start; offp < off_end; offp++) {
2464 struct binder_object_header *hdr;
2465 size_t object_size = binder_validate_object(buffer, *offp);
2466
2467 if (object_size == 0) {
2468 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2469 debug_id, (u64)*offp, buffer->data_size);
2470 continue;
2471 }
2472 hdr = (struct binder_object_header *)(buffer->data + *offp);
2473 switch (hdr->type) {
2474 case BINDER_TYPE_BINDER:
2475 case BINDER_TYPE_WEAK_BINDER: {
2476 struct flat_binder_object *fp;
2477 struct binder_node *node;
2478
2479 fp = to_flat_binder_object(hdr);
2480 node = binder_get_node(proc, fp->binder);
2481 if (node == NULL) {
2482 pr_err("transaction release %d bad node %016llx\n",
2483 debug_id, (u64)fp->binder);
2484 break;
2485 }
2486 binder_debug(BINDER_DEBUG_TRANSACTION,
2487 " node %d u%016llx\n",
2488 node->debug_id, (u64)node->ptr);
2489 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2490 0);
2491 binder_put_node(node);
2492 } break;
2493 case BINDER_TYPE_HANDLE:
2494 case BINDER_TYPE_WEAK_HANDLE: {
2495 struct flat_binder_object *fp;
2496 struct binder_ref_data rdata;
2497 int ret;
2498
2499 fp = to_flat_binder_object(hdr);
2500 ret = binder_dec_ref_for_handle(proc, fp->handle,
2501 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2502
2503 if (ret) {
2504 pr_err("transaction release %d bad handle %d, ret = %d\n",
2505 debug_id, fp->handle, ret);
2506 break;
2507 }
2508 binder_debug(BINDER_DEBUG_TRANSACTION,
2509 " ref %d desc %d\n",
2510 rdata.debug_id, rdata.desc);
2511 } break;
2512
2513 case BINDER_TYPE_FD: {
2514 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2515
2516 binder_debug(BINDER_DEBUG_TRANSACTION,
2517 " fd %d\n", fp->fd);
2518 if (failed_at)
2519 task_close_fd(proc, fp->fd);
2520 } break;
2521 case BINDER_TYPE_PTR:
2522 /*
2523 * Nothing to do here, this will get cleaned up when the
2524 * transaction buffer gets freed
2525 */
2526 break;
2527 case BINDER_TYPE_FDA: {
2528 struct binder_fd_array_object *fda;
2529 struct binder_buffer_object *parent;
2530 uintptr_t parent_buffer;
2531 u32 *fd_array;
2532 size_t fd_index;
2533 binder_size_t fd_buf_size;
2534
2535 fda = to_binder_fd_array_object(hdr);
2536 parent = binder_validate_ptr(buffer, fda->parent,
2537 off_start,
2538 offp - off_start);
2539 if (!parent) {
2540 pr_err("transaction release %d bad parent offset",
2541 debug_id);
2542 continue;
2543 }
2544 /*
2545 * Since the parent was already fixed up, convert it
2546 * back to kernel address space to access it
2547 */
2548 parent_buffer = parent->buffer -
2549 binder_alloc_get_user_buffer_offset(
2550 &proc->alloc);
2551
2552 fd_buf_size = sizeof(u32) * fda->num_fds;
2553 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2554 pr_err("transaction release %d invalid number of fds (%lld)\n",
2555 debug_id, (u64)fda->num_fds);
2556 continue;
2557 }
2558 if (fd_buf_size > parent->length ||
2559 fda->parent_offset > parent->length - fd_buf_size) {
2560 /* No space for all file descriptors here. */
2561 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2562 debug_id, (u64)fda->num_fds);
2563 continue;
2564 }
2565 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2566 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2567 task_close_fd(proc, fd_array[fd_index]);
2568 } break;
2569 default:
2570 pr_err("transaction release %d bad object type %x\n",
2571 debug_id, hdr->type);
2572 break;
2573 }
2574 }
2575 }
2576
2577 static int binder_translate_binder(struct flat_binder_object *fp,
2578 struct binder_transaction *t,
2579 struct binder_thread *thread)
2580 {
2581 struct binder_node *node;
2582 struct binder_proc *proc = thread->proc;
2583 struct binder_proc *target_proc = t->to_proc;
2584 struct binder_ref_data rdata;
2585 int ret = 0;
2586
2587 node = binder_get_node(proc, fp->binder);
2588 if (!node) {
2589 node = binder_new_node(proc, fp);
2590 if (!node)
2591 return -ENOMEM;
2592 }
2593 if (fp->cookie != node->cookie) {
2594 binder_user_error("%d:%d(%s:%s) sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2595 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)fp->binder,
2596 node->debug_id, (u64)fp->cookie,
2597 (u64)node->cookie);
2598 ret = -EINVAL;
2599 goto done;
2600 }
2601 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2602 ret = -EPERM;
2603 goto done;
2604 }
2605
2606 ret = binder_inc_ref_for_node(target_proc, node,
2607 fp->hdr.type == BINDER_TYPE_BINDER,
2608 &thread->todo, &rdata);
2609 if (ret)
2610 goto done;
2611
2612 if (fp->hdr.type == BINDER_TYPE_BINDER)
2613 fp->hdr.type = BINDER_TYPE_HANDLE;
2614 else
2615 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2616 fp->binder = 0;
2617 fp->handle = rdata.desc;
2618 fp->cookie = 0;
2619
2620 trace_binder_transaction_node_to_ref(t, node, &rdata);
2621 binder_debug(BINDER_DEBUG_TRANSACTION,
2622 " node %d u%016llx -> ref %d desc %d\n",
2623 node->debug_id, (u64)node->ptr,
2624 rdata.debug_id, rdata.desc);
2625 done:
2626 binder_put_node(node);
2627 return ret;
2628 }
2629
2630 static int binder_translate_handle(struct flat_binder_object *fp,
2631 struct binder_transaction *t,
2632 struct binder_thread *thread)
2633 {
2634 struct binder_proc *proc = thread->proc;
2635 struct binder_proc *target_proc = t->to_proc;
2636 struct binder_node *node;
2637 struct binder_ref_data src_rdata;
2638 int ret = 0;
2639
2640 node = binder_get_node_from_ref(proc, fp->handle,
2641 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2642 if (!node) {
2643 binder_user_error("%d:%d(%s:%s) got transaction with invalid handle, %d\n",
2644 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, fp->handle);
2645 return -EINVAL;
2646 }
2647 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2648 ret = -EPERM;
2649 goto done;
2650 }
2651
2652 binder_node_lock(node);
2653 if (node->proc == target_proc) {
2654 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2655 fp->hdr.type = BINDER_TYPE_BINDER;
2656 else
2657 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2658 fp->binder = node->ptr;
2659 fp->cookie = node->cookie;
2660 if (node->proc)
2661 binder_inner_proc_lock(node->proc);
2662 binder_inc_node_nilocked(node,
2663 fp->hdr.type == BINDER_TYPE_BINDER,
2664 0, NULL);
2665 if (node->proc)
2666 binder_inner_proc_unlock(node->proc);
2667 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2668 binder_debug(BINDER_DEBUG_TRANSACTION,
2669 " ref %d desc %d -> node %d u%016llx\n",
2670 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2671 (u64)node->ptr);
2672 binder_node_unlock(node);
2673 } else {
2674 struct binder_ref_data dest_rdata;
2675
2676 binder_node_unlock(node);
2677 ret = binder_inc_ref_for_node(target_proc, node,
2678 fp->hdr.type == BINDER_TYPE_HANDLE,
2679 NULL, &dest_rdata);
2680 if (ret)
2681 goto done;
2682
2683 fp->binder = 0;
2684 fp->handle = dest_rdata.desc;
2685 fp->cookie = 0;
2686 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2687 &dest_rdata);
2688 binder_debug(BINDER_DEBUG_TRANSACTION,
2689 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2690 src_rdata.debug_id, src_rdata.desc,
2691 dest_rdata.debug_id, dest_rdata.desc,
2692 node->debug_id);
2693 }
2694 done:
2695 binder_put_node(node);
2696 return ret;
2697 }
2698
2699 static int binder_translate_fd(int fd,
2700 struct binder_transaction *t,
2701 struct binder_thread *thread,
2702 struct binder_transaction *in_reply_to)
2703 {
2704 struct binder_proc *proc = thread->proc;
2705 struct binder_proc *target_proc = t->to_proc;
2706 int target_fd;
2707 struct file *file;
2708 int ret;
2709 bool target_allows_fd;
2710
2711 if (in_reply_to)
2712 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2713 else
2714 target_allows_fd = t->buffer->target_node->accept_fds;
2715 if (!target_allows_fd) {
2716 binder_user_error("%d:%d(%s:%s) got %s with fd, %d, but target does not allow fds\n",
2717 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
2718 in_reply_to ? "reply" : "transaction",
2719 fd);
2720 ret = -EPERM;
2721 goto err_fd_not_accepted;
2722 }
2723
2724 file = fget(fd);
2725 if (!file) {
2726 binder_user_error("%d:%d(%s:%s) got transaction with invalid fd, %d\n",
2727 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, fd);
2728 ret = -EBADF;
2729 goto err_fget;
2730 }
2731 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2732 if (ret < 0) {
2733 ret = -EPERM;
2734 goto err_security;
2735 }
2736
2737 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2738 if (target_fd < 0) {
2739 ret = -ENOMEM;
2740 goto err_get_unused_fd;
2741 }
2742 task_fd_install(target_proc, target_fd, file);
2743 trace_binder_transaction_fd(t, fd, target_fd);
2744 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2745 fd, target_fd);
2746
2747 return target_fd;
2748
2749 err_get_unused_fd:
2750 err_security:
2751 fput(file);
2752 err_fget:
2753 err_fd_not_accepted:
2754 return ret;
2755 }
2756
2757 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2758 struct binder_buffer_object *parent,
2759 struct binder_transaction *t,
2760 struct binder_thread *thread,
2761 struct binder_transaction *in_reply_to)
2762 {
2763 binder_size_t fdi, fd_buf_size, num_installed_fds;
2764 int target_fd;
2765 uintptr_t parent_buffer;
2766 u32 *fd_array;
2767 struct binder_proc *proc = thread->proc;
2768 struct binder_proc *target_proc = t->to_proc;
2769
2770 fd_buf_size = sizeof(u32) * fda->num_fds;
2771 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2772 binder_user_error("%d:%d(%s:%s) got transaction with invalid number of fds (%lld)\n",
2773 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)fda->num_fds);
2774 return -EINVAL;
2775 }
2776 if (fd_buf_size > parent->length ||
2777 fda->parent_offset > parent->length - fd_buf_size) {
2778 /* No space for all file descriptors here. */
2779 binder_user_error("%d:%d(%s:%s) not enough space to store %lld fds in buffer\n",
2780 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)fda->num_fds);
2781 return -EINVAL;
2782 }
2783 /*
2784 * Since the parent was already fixed up, convert it
2785 * back to the kernel address space to access it
2786 */
2787 parent_buffer = parent->buffer -
2788 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2789 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2790 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2791 binder_user_error("%d:%d(%s:%s) parent offset not aligned correctly.\n",
2792 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
2793 return -EINVAL;
2794 }
2795 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2796 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2797 in_reply_to);
2798 if (target_fd < 0)
2799 goto err_translate_fd_failed;
2800 fd_array[fdi] = target_fd;
2801 }
2802 return 0;
2803
2804 err_translate_fd_failed:
2805 /*
2806 * Failed to allocate fd or security error, free fds
2807 * installed so far.
2808 */
2809 num_installed_fds = fdi;
2810 for (fdi = 0; fdi < num_installed_fds; fdi++)
2811 task_close_fd(target_proc, fd_array[fdi]);
2812 return target_fd;
2813 }
2814
2815 static int binder_fixup_parent(struct binder_transaction *t,
2816 struct binder_thread *thread,
2817 struct binder_buffer_object *bp,
2818 binder_size_t *off_start,
2819 binder_size_t num_valid,
2820 struct binder_buffer_object *last_fixup_obj,
2821 binder_size_t last_fixup_min_off)
2822 {
2823 struct binder_buffer_object *parent;
2824 u8 *parent_buffer;
2825 struct binder_buffer *b = t->buffer;
2826 struct binder_proc *proc = thread->proc;
2827 struct binder_proc *target_proc = t->to_proc;
2828
2829 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2830 return 0;
2831
2832 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2833 if (!parent) {
2834 binder_user_error("%d:%d(%s:%s) got transaction with invalid parent offset or type\n",
2835 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
2836 return -EINVAL;
2837 }
2838
2839 if (!binder_validate_fixup(b, off_start,
2840 parent, bp->parent_offset,
2841 last_fixup_obj,
2842 last_fixup_min_off)) {
2843 binder_user_error("%d:%d(%s:%s) got transaction with out-of-order buffer fixup\n",
2844 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
2845 return -EINVAL;
2846 }
2847
2848 if (parent->length < sizeof(binder_uintptr_t) ||
2849 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2850 /* No space for a pointer here! */
2851 binder_user_error("%d:%d(%s:%s) got transaction with invalid parent offset\n",
2852 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
2853 return -EINVAL;
2854 }
2855 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2856 binder_alloc_get_user_buffer_offset(
2857 &target_proc->alloc));
2858 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2859
2860 return 0;
2861 }
2862
2863 /**
2864 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2865 * @t: transaction to send
2866 * @proc: process to send the transaction to
2867 * @thread: thread in @proc to send the transaction to (may be NULL)
2868 *
2869 * This function queues a transaction to the specified process. It will try
2870 * to find a thread in the target process to handle the transaction and
2871 * wake it up. If no thread is found, the work is queued to the proc
2872 * waitqueue.
2873 *
2874 * If the @thread parameter is not NULL, the transaction is always queued
2875 * to the waitlist of that specific thread.
2876 *
2877 * Return: true if the transactions was successfully queued
2878 * false if the target process or thread is dead
2879 */
2880 static bool binder_proc_transaction(struct binder_transaction *t,
2881 struct binder_proc *proc,
2882 struct binder_thread *thread)
2883 {
2884 struct binder_node *node = t->buffer->target_node;
2885 struct binder_priority node_prio;
2886 bool oneway = !!(t->flags & TF_ONE_WAY);
2887 bool pending_async = false;
2888
2889 BUG_ON(!node);
2890 binder_node_lock(node);
2891 node_prio.prio = node->min_priority;
2892 node_prio.sched_policy = node->sched_policy;
2893
2894 if (oneway) {
2895 BUG_ON(thread);
2896 if (node->has_async_transaction) {
2897 pending_async = true;
2898 } else {
2899 node->has_async_transaction = 1;
2900 }
2901 }
2902
2903 binder_inner_proc_lock(proc);
2904
2905 if (proc->is_dead || (thread && thread->is_dead)) {
2906 binder_inner_proc_unlock(proc);
2907 binder_node_unlock(node);
2908 return false;
2909 }
2910
2911 if (!thread && !pending_async)
2912 thread = binder_select_thread_ilocked(proc);
2913
2914 if (thread) {
2915 binder_transaction_priority(thread->task, t, node_prio,
2916 node->inherit_rt);
2917 binder_enqueue_thread_work_ilocked(thread, &t->work);
2918 } else if (!pending_async) {
2919 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2920 } else {
2921 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2922 }
2923
2924 if (!pending_async)
2925 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2926
2927 binder_inner_proc_unlock(proc);
2928 binder_node_unlock(node);
2929
2930 return true;
2931 }
2932
2933 /**
2934 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2935 * @node: struct binder_node for which to get refs
2936 * @proc: returns @node->proc if valid
2937 * @error: if no @proc then returns BR_DEAD_REPLY
2938 *
2939 * User-space normally keeps the node alive when creating a transaction
2940 * since it has a reference to the target. The local strong ref keeps it
2941 * alive if the sending process dies before the target process processes
2942 * the transaction. If the source process is malicious or has a reference
2943 * counting bug, relying on the local strong ref can fail.
2944 *
2945 * Since user-space can cause the local strong ref to go away, we also take
2946 * a tmpref on the node to ensure it survives while we are constructing
2947 * the transaction. We also need a tmpref on the proc while we are
2948 * constructing the transaction, so we take that here as well.
2949 *
2950 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2951 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2952 * target proc has died, @error is set to BR_DEAD_REPLY
2953 */
2954 static struct binder_node *binder_get_node_refs_for_txn(
2955 struct binder_node *node,
2956 struct binder_proc **procp,
2957 uint32_t *error)
2958 {
2959 struct binder_node *target_node = NULL;
2960
2961 binder_node_inner_lock(node);
2962 if (node->proc) {
2963 target_node = node;
2964 binder_inc_node_nilocked(node, 1, 0, NULL);
2965 binder_inc_node_tmpref_ilocked(node);
2966 node->proc->tmp_ref++;
2967 *procp = node->proc;
2968 } else
2969 *error = BR_DEAD_REPLY;
2970 binder_node_inner_unlock(node);
2971
2972 return target_node;
2973 }
2974
2975 static void binder_transaction(struct binder_proc *proc,
2976 struct binder_thread *thread,
2977 struct binder_transaction_data *tr, int reply,
2978 binder_size_t extra_buffers_size)
2979 {
2980 int ret;
2981 struct binder_transaction *t;
2982 struct binder_work *tcomplete;
2983 binder_size_t *offp, *off_end, *off_start;
2984 binder_size_t off_min;
2985 u8 *sg_bufp, *sg_buf_end;
2986 struct binder_proc *target_proc = NULL;
2987 struct binder_thread *target_thread = NULL;
2988 struct binder_node *target_node = NULL;
2989 struct binder_transaction *in_reply_to = NULL;
2990 struct binder_transaction_log_entry *e;
2991 uint32_t return_error = 0;
2992 uint32_t return_error_param = 0;
2993 uint32_t return_error_line = 0;
2994 struct binder_buffer_object *last_fixup_obj = NULL;
2995 binder_size_t last_fixup_min_off = 0;
2996 struct binder_context *context = proc->context;
2997 int t_debug_id = atomic_inc_return(&binder_last_id);
2998
2999 e = binder_transaction_log_add(&binder_transaction_log);
3000 e->debug_id = t_debug_id;
3001 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
3002 e->from_proc = proc->pid;
3003 e->from_thread = thread->pid;
3004 e->target_handle = tr->target.handle;
3005 e->data_size = tr->data_size;
3006 e->offsets_size = tr->offsets_size;
3007 e->context_name = proc->context->name;
3008
3009 if (reply) {
3010 binder_inner_proc_lock(proc);
3011 in_reply_to = thread->transaction_stack;
3012 if (in_reply_to == NULL) {
3013 binder_inner_proc_unlock(proc);
3014 binder_user_error("%d:%d(%s:%s) got reply transaction with no transaction stack\n",
3015 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3016 return_error = BR_FAILED_REPLY;
3017 return_error_param = -EPROTO;
3018 return_error_line = __LINE__;
3019 goto err_empty_call_stack;
3020 }
3021 if (in_reply_to->to_thread != thread) {
3022 spin_lock(&in_reply_to->lock);
3023 binder_user_error("%d:%d(%s:%s) got reply transaction with bad transaction stack, "\
3024 "transaction %d has target %d:%d(%s:%s)\n",
3025 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, in_reply_to->debug_id,
3026 in_reply_to->to_proc ?
3027 in_reply_to->to_proc->pid : 0,
3028 in_reply_to->to_thread ?
3029 in_reply_to->to_thread->pid : 0,
3030 in_reply_to->to_proc ?
3031 in_reply_to->to_proc->tsk->comm : "",
3032 in_reply_to->to_thread ?
3033 in_reply_to->to_thread->task->comm : "");
3034 spin_unlock(&in_reply_to->lock);
3035 binder_inner_proc_unlock(proc);
3036 return_error = BR_FAILED_REPLY;
3037 return_error_param = -EPROTO;
3038 return_error_line = __LINE__;
3039 in_reply_to = NULL;
3040 goto err_bad_call_stack;
3041 }
3042 thread->transaction_stack = in_reply_to->to_parent;
3043 binder_inner_proc_unlock(proc);
3044 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
3045 if (target_thread == NULL) {
3046 return_error = BR_DEAD_REPLY;
3047 return_error_line = __LINE__;
3048 goto err_dead_binder;
3049 }
3050 if (target_thread->transaction_stack != in_reply_to) {
3051 binder_user_error("%d:%d(%s:%s) got reply transaction with bad target transaction "\
3052 "stack %d, expected %d\n",
3053 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3054 target_thread->transaction_stack ?
3055 target_thread->transaction_stack->debug_id : 0,
3056 in_reply_to->debug_id);
3057 binder_inner_proc_unlock(target_thread->proc);
3058 return_error = BR_FAILED_REPLY;
3059 return_error_param = -EPROTO;
3060 return_error_line = __LINE__;
3061 in_reply_to = NULL;
3062 target_thread = NULL;
3063 goto err_dead_binder;
3064 }
3065 target_proc = target_thread->proc;
3066 target_proc->tmp_ref++;
3067 binder_inner_proc_unlock(target_thread->proc);
3068 } else {
3069 if (tr->target.handle) {
3070 struct binder_ref *ref;
3071
3072 /*
3073 * There must already be a strong ref
3074 * on this node. If so, do a strong
3075 * increment on the node to ensure it
3076 * stays alive until the transaction is
3077 * done.
3078 */
3079 binder_proc_lock(proc);
3080 ref = binder_get_ref_olocked(proc, tr->target.handle,
3081 true);
3082 if (ref) {
3083 target_node = binder_get_node_refs_for_txn(
3084 ref->node, &target_proc,
3085 &return_error);
3086 } else {
3087 binder_user_error("%d:%d(%s:%s) got transaction to invalid handle\n",
3088 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3089 return_error = BR_FAILED_REPLY;
3090 }
3091 binder_proc_unlock(proc);
3092 } else {
3093 mutex_lock(&context->context_mgr_node_lock);
3094 target_node = context->binder_context_mgr_node;
3095 if (target_node)
3096 target_node = binder_get_node_refs_for_txn(
3097 target_node, &target_proc,
3098 &return_error);
3099 else
3100 return_error = BR_DEAD_REPLY;
3101 mutex_unlock(&context->context_mgr_node_lock);
3102 if (target_node && target_proc == proc) {
3103 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3104 proc->pid, thread->pid);
3105 return_error = BR_FAILED_REPLY;
3106 return_error_param = -EINVAL;
3107 return_error_line = __LINE__;
3108 goto err_invalid_target_handle;
3109 }
3110 }
3111 if (!target_node) {
3112 /*
3113 * return_error is set above
3114 */
3115 return_error_param = -EINVAL;
3116 return_error_line = __LINE__;
3117 goto err_dead_binder;
3118 }
3119 e->to_node = target_node->debug_id;
3120 if (security_binder_transaction(proc->tsk,
3121 target_proc->tsk) < 0) {
3122 return_error = BR_FAILED_REPLY;
3123 return_error_param = -EPERM;
3124 return_error_line = __LINE__;
3125 goto err_invalid_target_handle;
3126 }
3127 binder_inner_proc_lock(proc);
3128 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3129 struct binder_transaction *tmp;
3130
3131 tmp = thread->transaction_stack;
3132 if (tmp->to_thread != thread) {
3133 spin_lock(&tmp->lock);
3134 binder_user_error("%d:%d(%s:%s) got new transaction with bad transaction stack, "\
3135 "transaction %d has target %d:%d(%s:%s)\n",
3136 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, tmp->debug_id,
3137 tmp->to_proc ? tmp->to_proc->pid : 0,
3138 tmp->to_thread ?
3139 tmp->to_thread->pid : 0,
3140 tmp->to_proc ? tmp->to_proc->tsk->comm : "",
3141 tmp->to_thread ? tmp->to_thread->task->comm : "");
3142 spin_unlock(&tmp->lock);
3143 binder_inner_proc_unlock(proc);
3144 return_error = BR_FAILED_REPLY;
3145 return_error_param = -EPROTO;
3146 return_error_line = __LINE__;
3147 goto err_bad_call_stack;
3148 }
3149 while (tmp) {
3150 struct binder_thread *from;
3151
3152 spin_lock(&tmp->lock);
3153 from = tmp->from;
3154 if (from && from->proc == target_proc) {
3155 atomic_inc(&from->tmp_ref);
3156 target_thread = from;
3157 spin_unlock(&tmp->lock);
3158 break;
3159 }
3160 spin_unlock(&tmp->lock);
3161 tmp = tmp->from_parent;
3162 }
3163 }
3164 binder_inner_proc_unlock(proc);
3165 }
3166 if (target_thread) {
3167 e->to_thread = target_thread->pid;
3168 }
3169 e->to_proc = target_proc->pid;
3170
3171 /* TODO: reuse incoming transaction for reply */
3172 t = kzalloc(sizeof(*t), GFP_KERNEL);
3173 if (t == NULL) {
3174 return_error = BR_FAILED_REPLY;
3175 return_error_param = -ENOMEM;
3176 return_error_line = __LINE__;
3177 goto err_alloc_t_failed;
3178 }
3179 binder_stats_created(BINDER_STAT_TRANSACTION);
3180 spin_lock_init(&t->lock);
3181
3182 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3183 if (tcomplete == NULL) {
3184 return_error = BR_FAILED_REPLY;
3185 return_error_param = -ENOMEM;
3186 return_error_line = __LINE__;
3187 goto err_alloc_tcomplete_failed;
3188 }
3189 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3190
3191 t->debug_id = t_debug_id;
3192
3193 if (reply)
3194 binder_debug(BINDER_DEBUG_TRANSACTION,
3195 "%d:%d(%s:%s) BC_REPLY %d -> %d:%d (%s:%s), data %016llx-%016llx size %lld-%lld-%lld\n",
3196 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, t->debug_id,
3197 target_proc->pid, target_thread->pid, target_proc->tsk->comm, target_thread->task->comm,
3198 (u64)tr->data.ptr.buffer,
3199 (u64)tr->data.ptr.offsets,
3200 (u64)tr->data_size, (u64)tr->offsets_size,
3201 (u64)extra_buffers_size);
3202 else
3203 binder_debug(BINDER_DEBUG_TRANSACTION,
3204 "%d:%d(%s:%s) BC_TRANSACTION %d -> %d (%s) - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3205 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, t->debug_id,
3206 target_proc->pid, target_proc->tsk->comm, target_node->debug_id,
3207 (u64)tr->data.ptr.buffer,
3208 (u64)tr->data.ptr.offsets,
3209 (u64)tr->data_size, (u64)tr->offsets_size,
3210 (u64)extra_buffers_size);
3211
3212 if (!reply && !(tr->flags & TF_ONE_WAY))
3213 t->from = thread;
3214 else
3215 t->from = NULL;
3216 t->sender_euid = task_euid(proc->tsk);
3217 t->to_proc = target_proc;
3218 t->to_thread = target_thread;
3219 t->code = tr->code;
3220 t->flags = tr->flags;
3221 if (!(t->flags & TF_ONE_WAY) &&
3222 binder_supported_policy(current->policy)) {
3223 /* Inherit supported policies for synchronous transactions */
3224 t->priority.sched_policy = current->policy;
3225 t->priority.prio = current->normal_prio;
3226 } else {
3227 /* Otherwise, fall back to the default priority */
3228 t->priority = target_proc->default_priority;
3229 }
3230
3231 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
3232 dss_binder_transaction(reply, t, t->from ? t->from : thread, target_node ? target_node->debug_id : 0);
3233 #endif
3234 trace_binder_transaction(reply, t, target_node);
3235
3236 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3237 tr->offsets_size, extra_buffers_size,
3238 !reply && (t->flags & TF_ONE_WAY));
3239 if (IS_ERR(t->buffer)) {
3240 /*
3241 * -ESRCH indicates VMA cleared. The target is dying.
3242 */
3243 return_error_param = PTR_ERR(t->buffer);
3244 return_error = return_error_param == -ESRCH ?
3245 BR_DEAD_REPLY : BR_FAILED_REPLY;
3246 return_error_line = __LINE__;
3247 t->buffer = NULL;
3248 goto err_binder_alloc_buf_failed;
3249 }
3250 t->buffer->allow_user_free = 0;
3251 t->buffer->debug_id = t->debug_id;
3252 t->buffer->transaction = t;
3253 t->buffer->target_node = target_node;
3254 trace_binder_transaction_alloc_buf(t->buffer);
3255 off_start = (binder_size_t *)(t->buffer->data +
3256 ALIGN(tr->data_size, sizeof(void *)));
3257 offp = off_start;
3258
3259 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3260 tr->data.ptr.buffer, tr->data_size)) {
3261 binder_user_error("%d:%d(%s:%s) got transaction with invalid data ptr\n",
3262 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3263 return_error = BR_FAILED_REPLY;
3264 return_error_param = -EFAULT;
3265 return_error_line = __LINE__;
3266 goto err_copy_data_failed;
3267 }
3268 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3269 tr->data.ptr.offsets, tr->offsets_size)) {
3270 binder_user_error("%d:%d(%s:%s) got transaction with invalid offsets ptr\n",
3271 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3272 return_error = BR_FAILED_REPLY;
3273 return_error_param = -EFAULT;
3274 return_error_line = __LINE__;
3275 goto err_copy_data_failed;
3276 }
3277 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3278 binder_user_error("%d:%d(%s:%s) got transaction with invalid offsets size, %lld\n",
3279 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)tr->offsets_size);
3280 return_error = BR_FAILED_REPLY;
3281 return_error_param = -EINVAL;
3282 return_error_line = __LINE__;
3283 goto err_bad_offset;
3284 }
3285 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3286 binder_user_error("%d:%d(%s:%s) got transaction with unaligned buffers size, %lld\n",
3287 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3288 (u64)extra_buffers_size);
3289 return_error = BR_FAILED_REPLY;
3290 return_error_param = -EINVAL;
3291 return_error_line = __LINE__;
3292 goto err_bad_offset;
3293 }
3294 off_end = (void *)off_start + tr->offsets_size;
3295 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3296 sg_buf_end = sg_bufp + extra_buffers_size;
3297 off_min = 0;
3298 for (; offp < off_end; offp++) {
3299 struct binder_object_header *hdr;
3300 size_t object_size = binder_validate_object(t->buffer, *offp);
3301
3302 if (object_size == 0 || *offp < off_min) {
3303 binder_user_error("%d:%d(%s:%s) got transaction with invalid offset (%lld, min %lld max %lld)"\
3304 " or object.\n",
3305 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3306 (u64)*offp,
3307 (u64)off_min,
3308 (u64)t->buffer->data_size);
3309 return_error = BR_FAILED_REPLY;
3310 return_error_param = -EINVAL;
3311 return_error_line = __LINE__;
3312 goto err_bad_offset;
3313 }
3314
3315 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3316 off_min = *offp + object_size;
3317 switch (hdr->type) {
3318 case BINDER_TYPE_BINDER:
3319 case BINDER_TYPE_WEAK_BINDER: {
3320 struct flat_binder_object *fp;
3321
3322 fp = to_flat_binder_object(hdr);
3323 ret = binder_translate_binder(fp, t, thread);
3324 if (ret < 0) {
3325 return_error = BR_FAILED_REPLY;
3326 return_error_param = ret;
3327 return_error_line = __LINE__;
3328 goto err_translate_failed;
3329 }
3330 } break;
3331 case BINDER_TYPE_HANDLE:
3332 case BINDER_TYPE_WEAK_HANDLE: {
3333 struct flat_binder_object *fp;
3334
3335 fp = to_flat_binder_object(hdr);
3336 ret = binder_translate_handle(fp, t, thread);
3337 if (ret < 0) {
3338 return_error = BR_FAILED_REPLY;
3339 return_error_param = ret;
3340 return_error_line = __LINE__;
3341 goto err_translate_failed;
3342 }
3343 } break;
3344
3345 case BINDER_TYPE_FD: {
3346 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3347 int target_fd = binder_translate_fd(fp->fd, t, thread,
3348 in_reply_to);
3349
3350 if (target_fd < 0) {
3351 return_error = BR_FAILED_REPLY;
3352 return_error_param = target_fd;
3353 return_error_line = __LINE__;
3354 goto err_translate_failed;
3355 }
3356 fp->pad_binder = 0;
3357 fp->fd = target_fd;
3358 } break;
3359 case BINDER_TYPE_FDA: {
3360 struct binder_fd_array_object *fda =
3361 to_binder_fd_array_object(hdr);
3362 struct binder_buffer_object *parent =
3363 binder_validate_ptr(t->buffer, fda->parent,
3364 off_start,
3365 offp - off_start);
3366 if (!parent) {
3367 binder_user_error("%d:%d(%s:%s) got transaction with invalid parent offset or type\n",
3368 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3369 return_error = BR_FAILED_REPLY;
3370 return_error_param = -EINVAL;
3371 return_error_line = __LINE__;
3372 goto err_bad_parent;
3373 }
3374 if (!binder_validate_fixup(t->buffer, off_start,
3375 parent, fda->parent_offset,
3376 last_fixup_obj,
3377 last_fixup_min_off)) {
3378 binder_user_error("%d:%d(%s:%s) got transaction with out-of-order buffer fixup\n",
3379 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3380 return_error = BR_FAILED_REPLY;
3381 return_error_param = -EINVAL;
3382 return_error_line = __LINE__;
3383 goto err_bad_parent;
3384 }
3385 ret = binder_translate_fd_array(fda, parent, t, thread,
3386 in_reply_to);
3387 if (ret < 0) {
3388 return_error = BR_FAILED_REPLY;
3389 return_error_param = ret;
3390 return_error_line = __LINE__;
3391 goto err_translate_failed;
3392 }
3393 last_fixup_obj = parent;
3394 last_fixup_min_off =
3395 fda->parent_offset + sizeof(u32) * fda->num_fds;
3396 } break;
3397 case BINDER_TYPE_PTR: {
3398 struct binder_buffer_object *bp =
3399 to_binder_buffer_object(hdr);
3400 size_t buf_left = sg_buf_end - sg_bufp;
3401
3402 if (bp->length > buf_left) {
3403 binder_user_error("%d:%d(%s:%s) got transaction with too large buffer\n",
3404 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3405 return_error = BR_FAILED_REPLY;
3406 return_error_param = -EINVAL;
3407 return_error_line = __LINE__;
3408 goto err_bad_offset;
3409 }
3410 if (copy_from_user(sg_bufp,
3411 (const void __user *)(uintptr_t)
3412 bp->buffer, bp->length)) {
3413 binder_user_error("%d:%d(%s:%s) got transaction with invalid offsets ptr\n",
3414 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3415 return_error_param = -EFAULT;
3416 return_error = BR_FAILED_REPLY;
3417 return_error_line = __LINE__;
3418 goto err_copy_data_failed;
3419 }
3420 /* Fixup buffer pointer to target proc address space */
3421 bp->buffer = (uintptr_t)sg_bufp +
3422 binder_alloc_get_user_buffer_offset(
3423 &target_proc->alloc);
3424 sg_bufp += ALIGN(bp->length, sizeof(u64));
3425
3426 ret = binder_fixup_parent(t, thread, bp, off_start,
3427 offp - off_start,
3428 last_fixup_obj,
3429 last_fixup_min_off);
3430 if (ret < 0) {
3431 return_error = BR_FAILED_REPLY;
3432 return_error_param = ret;
3433 return_error_line = __LINE__;
3434 goto err_translate_failed;
3435 }
3436 last_fixup_obj = bp;
3437 last_fixup_min_off = 0;
3438 } break;
3439 default:
3440 binder_user_error("%d:%d(%s:%s) got transaction with invalid object type, %x\n",
3441 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, hdr->type);
3442 return_error = BR_FAILED_REPLY;
3443 return_error_param = -EINVAL;
3444 return_error_line = __LINE__;
3445 goto err_bad_object_type;
3446 }
3447 }
3448 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3449 t->work.type = BINDER_WORK_TRANSACTION;
3450
3451 if (reply) {
3452 binder_enqueue_thread_work(thread, tcomplete);
3453 binder_inner_proc_lock(target_proc);
3454 if (target_thread->is_dead) {
3455 binder_inner_proc_unlock(target_proc);
3456 goto err_dead_proc_or_thread;
3457 }
3458 BUG_ON(t->buffer->async_transaction != 0);
3459 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3460 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3461 binder_inner_proc_unlock(target_proc);
3462 wake_up_interruptible_sync(&target_thread->wait);
3463 binder_restore_priority(current, in_reply_to->saved_priority);
3464 binder_free_transaction(in_reply_to);
3465 } else if (!(t->flags & TF_ONE_WAY)) {
3466 BUG_ON(t->buffer->async_transaction != 0);
3467 binder_inner_proc_lock(proc);
3468 /*
3469 * Defer the TRANSACTION_COMPLETE, so we don't return to
3470 * userspace immediately; this allows the target process to
3471 * immediately start processing this transaction, reducing
3472 * latency. We will then return the TRANSACTION_COMPLETE when
3473 * the target replies (or there is an error).
3474 */
3475 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3476 t->need_reply = 1;
3477 t->from_parent = thread->transaction_stack;
3478 thread->transaction_stack = t;
3479 binder_inner_proc_unlock(proc);
3480 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3481 binder_inner_proc_lock(proc);
3482 binder_pop_transaction_ilocked(thread, t);
3483 binder_inner_proc_unlock(proc);
3484 goto err_dead_proc_or_thread;
3485 }
3486 } else {
3487 BUG_ON(target_node == NULL);
3488 BUG_ON(t->buffer->async_transaction != 1);
3489 binder_enqueue_thread_work(thread, tcomplete);
3490 if (!binder_proc_transaction(t, target_proc, NULL))
3491 goto err_dead_proc_or_thread;
3492 }
3493 if (target_thread)
3494 binder_thread_dec_tmpref(target_thread);
3495 binder_proc_dec_tmpref(target_proc);
3496 if (target_node)
3497 binder_dec_node_tmpref(target_node);
3498 /*
3499 * write barrier to synchronize with initialization
3500 * of log entry
3501 */
3502 smp_wmb();
3503 WRITE_ONCE(e->debug_id_done, t_debug_id);
3504 return;
3505
3506 err_dead_proc_or_thread:
3507 return_error = BR_DEAD_REPLY;
3508 return_error_line = __LINE__;
3509 binder_dequeue_work(proc, tcomplete);
3510 err_translate_failed:
3511 err_bad_object_type:
3512 err_bad_offset:
3513 err_bad_parent:
3514 err_copy_data_failed:
3515 trace_binder_transaction_failed_buffer_release(t->buffer);
3516 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3517 if (target_node)
3518 binder_dec_node_tmpref(target_node);
3519 target_node = NULL;
3520 t->buffer->transaction = NULL;
3521 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3522 err_binder_alloc_buf_failed:
3523 kfree(tcomplete);
3524 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3525 err_alloc_tcomplete_failed:
3526 kfree(t);
3527 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3528 err_alloc_t_failed:
3529 err_bad_call_stack:
3530 err_empty_call_stack:
3531 err_dead_binder:
3532 err_invalid_target_handle:
3533 if (target_thread)
3534 binder_thread_dec_tmpref(target_thread);
3535 if (target_proc)
3536 binder_proc_dec_tmpref(target_proc);
3537 if (target_node) {
3538 binder_dec_node(target_node, 1, 0);
3539 binder_dec_node_tmpref(target_node);
3540 }
3541
3542 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3543 "%d:%d(%s:%s) transaction failed %d/%d, size %lld-%lld line %d\n",
3544 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3545 return_error, return_error_param,
3546 (u64)tr->data_size, (u64)tr->offsets_size,
3547 return_error_line);
3548
3549 {
3550 struct binder_transaction_log_entry *fe;
3551
3552 e->return_error = return_error;
3553 e->return_error_param = return_error_param;
3554 e->return_error_line = return_error_line;
3555 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
3556 dss_binder_transaction_failed(reply, e, proc->tsk->comm, thread->task->comm, tr->flags, tr->code);
3557 #endif
3558 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3559 *fe = *e;
3560 /*
3561 * write barrier to synchronize with initialization
3562 * of log entry
3563 */
3564 smp_wmb();
3565 WRITE_ONCE(e->debug_id_done, t_debug_id);
3566 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3567 }
3568
3569 BUG_ON(thread->return_error.cmd != BR_OK);
3570 if (in_reply_to) {
3571 binder_restore_priority(current, in_reply_to->saved_priority);
3572 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3573 binder_enqueue_thread_work(thread, &thread->return_error.work);
3574 binder_send_failed_reply(in_reply_to, return_error);
3575 } else {
3576 thread->return_error.cmd = return_error;
3577 binder_enqueue_thread_work(thread, &thread->return_error.work);
3578 }
3579 }
3580
3581 static int binder_thread_write(struct binder_proc *proc,
3582 struct binder_thread *thread,
3583 binder_uintptr_t binder_buffer, size_t size,
3584 binder_size_t *consumed)
3585 {
3586 uint32_t cmd;
3587 struct binder_context *context = proc->context;
3588 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3589 void __user *ptr = buffer + *consumed;
3590 void __user *end = buffer + size;
3591
3592 while (ptr < end && thread->return_error.cmd == BR_OK) {
3593 int ret;
3594
3595 if (get_user(cmd, (uint32_t __user *)ptr))
3596 return -EFAULT;
3597 ptr += sizeof(uint32_t);
3598 trace_binder_command(cmd);
3599 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3600 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3601 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3602 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3603 }
3604 switch (cmd) {
3605 case BC_INCREFS:
3606 case BC_ACQUIRE:
3607 case BC_RELEASE:
3608 case BC_DECREFS: {
3609 uint32_t target;
3610 const char *debug_string;
3611 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3612 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3613 struct binder_ref_data rdata;
3614
3615 if (get_user(target, (uint32_t __user *)ptr))
3616 return -EFAULT;
3617
3618 ptr += sizeof(uint32_t);
3619 ret = -1;
3620 if (increment && !target) {
3621 struct binder_node *ctx_mgr_node;
3622 mutex_lock(&context->context_mgr_node_lock);
3623 ctx_mgr_node = context->binder_context_mgr_node;
3624 if (ctx_mgr_node)
3625 ret = binder_inc_ref_for_node(
3626 proc, ctx_mgr_node,
3627 strong, NULL, &rdata);
3628 mutex_unlock(&context->context_mgr_node_lock);
3629 }
3630 if (ret)
3631 ret = binder_update_ref_for_handle(
3632 proc, target, increment, strong,
3633 &rdata);
3634 if (!ret && rdata.desc != target) {
3635 binder_user_error("%d:%d(%s:%s) tried to acquire reference to"\
3636 " desc %d, got %d instead\n",
3637 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3638 target, rdata.desc);
3639 }
3640 switch (cmd) {
3641 case BC_INCREFS:
3642 debug_string = "IncRefs";
3643 break;
3644 case BC_ACQUIRE:
3645 debug_string = "Acquire";
3646 break;
3647 case BC_RELEASE:
3648 debug_string = "Release";
3649 break;
3650 case BC_DECREFS:
3651 default:
3652 debug_string = "DecRefs";
3653 break;
3654 }
3655 if (ret) {
3656 binder_user_error("%d:%d(%s:%s) %s %d refcount change on invalid ref %d ret %d\n",
3657 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3658 debug_string, strong, target, ret);
3659 break;
3660 }
3661 binder_debug(BINDER_DEBUG_USER_REFS,
3662 "%d:%d(%s:%s) %s ref %d desc %d s %d w %d\n",
3663 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3664 debug_string, rdata.debug_id, rdata.desc, rdata.strong,
3665 rdata.weak);
3666 break;
3667 }
3668 case BC_INCREFS_DONE:
3669 case BC_ACQUIRE_DONE: {
3670 binder_uintptr_t node_ptr;
3671 binder_uintptr_t cookie;
3672 struct binder_node *node;
3673 bool free_node;
3674
3675 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3676 return -EFAULT;
3677 ptr += sizeof(binder_uintptr_t);
3678 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3679 return -EFAULT;
3680 ptr += sizeof(binder_uintptr_t);
3681 node = binder_get_node(proc, node_ptr);
3682 if (node == NULL) {
3683 binder_user_error("%d:%d(%s:%s) %s u%016llx no match\n",
3684 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3685 cmd == BC_INCREFS_DONE ?
3686 "BC_INCREFS_DONE" :
3687 "BC_ACQUIRE_DONE",
3688 (u64)node_ptr);
3689 break;
3690 }
3691 if (cookie != node->cookie) {
3692 binder_user_error("%d:%d(%s:%s) %s u%016llx node %d cookie mismatch"\
3693 " %016llx != %016llx\n",
3694 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3695 cmd == BC_INCREFS_DONE ?
3696 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3697 (u64)node_ptr, node->debug_id,
3698 (u64)cookie, (u64)node->cookie);
3699 binder_put_node(node);
3700 break;
3701 }
3702 binder_node_inner_lock(node);
3703 if (cmd == BC_ACQUIRE_DONE) {
3704 if (node->pending_strong_ref == 0) {
3705 binder_user_error("%d:%d(%s:%s) BC_ACQUIRE_DONE node %d"\
3706 " has no pending acquire request\n",
3707 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3708 node->debug_id);
3709 binder_node_inner_unlock(node);
3710 binder_put_node(node);
3711 break;
3712 }
3713 node->pending_strong_ref = 0;
3714 } else {
3715 if (node->pending_weak_ref == 0) {
3716 binder_user_error("%d:%d(%s:%s) BC_INCREFS_DONE node %d"\
3717 " has no pending increfs request\n",
3718 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3719 node->debug_id);
3720 binder_node_inner_unlock(node);
3721 binder_put_node(node);
3722 break;
3723 }
3724 node->pending_weak_ref = 0;
3725 }
3726 free_node = binder_dec_node_nilocked(node,
3727 cmd == BC_ACQUIRE_DONE, 0);
3728 WARN_ON(free_node);
3729 binder_debug(BINDER_DEBUG_USER_REFS,
3730 "%d:%d(%s:%s) %s node %d ls %d lw %d tr %d\n",
3731 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3732 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3733 node->debug_id, node->local_strong_refs,
3734 node->local_weak_refs, node->tmp_refs);
3735 binder_node_inner_unlock(node);
3736 binder_put_node(node);
3737 break;
3738 }
3739 case BC_ATTEMPT_ACQUIRE:
3740 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3741 return -EINVAL;
3742 case BC_ACQUIRE_RESULT:
3743 pr_err("BC_ACQUIRE_RESULT not supported\n");
3744 return -EINVAL;
3745
3746 case BC_FREE_BUFFER: {
3747 binder_uintptr_t data_ptr;
3748 struct binder_buffer *buffer;
3749
3750 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3751 return -EFAULT;
3752 ptr += sizeof(binder_uintptr_t);
3753
3754 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3755 data_ptr);
3756 if (buffer == NULL) {
3757 binder_user_error("%d:%d(%s:%s) BC_FREE_BUFFER u%016llx no match\n",
3758 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)data_ptr);
3759 break;
3760 }
3761 if (!buffer->allow_user_free) {
3762 binder_user_error("%d:%d(%s:%s) BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3763 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)data_ptr);
3764 break;
3765 }
3766 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3767 "%d:%d(%s:%s) BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3768 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)data_ptr,
3769 buffer->debug_id,
3770 buffer->transaction ? "active" : "finished");
3771
3772 if (buffer->transaction) {
3773 buffer->transaction->buffer = NULL;
3774 buffer->transaction = NULL;
3775 }
3776 if (buffer->async_transaction && buffer->target_node) {
3777 struct binder_node *buf_node;
3778 struct binder_work *w;
3779
3780 buf_node = buffer->target_node;
3781 binder_node_inner_lock(buf_node);
3782 BUG_ON(!buf_node->has_async_transaction);
3783 BUG_ON(buf_node->proc != proc);
3784 w = binder_dequeue_work_head_ilocked(
3785 &buf_node->async_todo);
3786 if (!w) {
3787 buf_node->has_async_transaction = 0;
3788 } else {
3789 binder_enqueue_work_ilocked(
3790 w, &proc->todo);
3791 binder_wakeup_proc_ilocked(proc);
3792 }
3793 binder_node_inner_unlock(buf_node);
3794 }
3795 trace_binder_transaction_buffer_release(buffer);
3796 binder_transaction_buffer_release(proc, buffer, NULL);
3797 binder_alloc_free_buf(&proc->alloc, buffer);
3798 break;
3799 }
3800
3801 case BC_TRANSACTION_SG:
3802 case BC_REPLY_SG: {
3803 struct binder_transaction_data_sg tr;
3804
3805 if (copy_from_user(&tr, ptr, sizeof(tr)))
3806 return -EFAULT;
3807 ptr += sizeof(tr);
3808 binder_transaction(proc, thread, &tr.transaction_data,
3809 cmd == BC_REPLY_SG, tr.buffers_size);
3810 break;
3811 }
3812 case BC_TRANSACTION:
3813 case BC_REPLY: {
3814 struct binder_transaction_data tr;
3815
3816 if (copy_from_user(&tr, ptr, sizeof(tr)))
3817 return -EFAULT;
3818 ptr += sizeof(tr);
3819 binder_transaction(proc, thread, &tr,
3820 cmd == BC_REPLY, 0);
3821 break;
3822 }
3823
3824 case BC_REGISTER_LOOPER:
3825 binder_debug(BINDER_DEBUG_THREADS,
3826 "%d:%d(%s:%s) BC_REGISTER_LOOPER\n",
3827 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3828 binder_inner_proc_lock(proc);
3829 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3830 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3831 binder_user_error("%d:%d(%s:%s) ERROR: BC_REGISTER_LOOPER"\
3832 " called after BC_ENTER_LOOPER\n",
3833 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3834 } else if (proc->requested_threads == 0) {
3835 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3836 binder_user_error("%d:%d(%s:%s) ERROR: BC_REGISTER_LOOPER called without request\n",
3837 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3838 } else {
3839 proc->requested_threads--;
3840 proc->requested_threads_started++;
3841 }
3842 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3843 binder_inner_proc_unlock(proc);
3844 break;
3845 case BC_ENTER_LOOPER:
3846 binder_debug(BINDER_DEBUG_THREADS,
3847 "%d:%d(%s:%s) BC_ENTER_LOOPER\n",
3848 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3849 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3850 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3851 binder_user_error("%d:%d(%s:%s) ERROR: BC_ENTER_LOOPER"\
3852 " called after BC_REGISTER_LOOPER\n",
3853 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3854 }
3855 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3856 break;
3857 case BC_EXIT_LOOPER:
3858 binder_debug(BINDER_DEBUG_THREADS,
3859 "%d:%d(%s:%s) BC_EXIT_LOOPER\n",
3860 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3861 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3862 break;
3863
3864 case BC_REQUEST_DEATH_NOTIFICATION:
3865 case BC_CLEAR_DEATH_NOTIFICATION: {
3866 uint32_t target;
3867 binder_uintptr_t cookie;
3868 struct binder_ref *ref;
3869 struct binder_ref_death *death = NULL;
3870
3871 if (get_user(target, (uint32_t __user *)ptr))
3872 return -EFAULT;
3873 ptr += sizeof(uint32_t);
3874 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3875 return -EFAULT;
3876 ptr += sizeof(binder_uintptr_t);
3877 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3878 /*
3879 * Allocate memory for death notification
3880 * before taking lock
3881 */
3882 death = kzalloc(sizeof(*death), GFP_KERNEL);
3883 if (death == NULL) {
3884 WARN_ON(thread->return_error.cmd !=
3885 BR_OK);
3886 thread->return_error.cmd = BR_ERROR;
3887 binder_enqueue_thread_work(
3888 thread,
3889 &thread->return_error.work);
3890 binder_debug(
3891 BINDER_DEBUG_FAILED_TRANSACTION,
3892 "%d:%d(%s:%s) BC_REQUEST_DEATH_NOTIFICATION failed\n",
3893 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3894 break;
3895 }
3896 }
3897 binder_proc_lock(proc);
3898 ref = binder_get_ref_olocked(proc, target, false);
3899 if (ref == NULL) {
3900 binder_user_error("%d:%d(%s:%s) %s invalid ref %d\n",
3901 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3902 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3903 "BC_REQUEST_DEATH_NOTIFICATION" :
3904 "BC_CLEAR_DEATH_NOTIFICATION",
3905 target);
3906 binder_proc_unlock(proc);
3907 kfree(death);
3908 break;
3909 }
3910
3911 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3912 "%d:%d(%s:%s) %s %016llx ref %d desc %d s %d w %d for node %d\n",
3913 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3914 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3915 "BC_REQUEST_DEATH_NOTIFICATION" :
3916 "BC_CLEAR_DEATH_NOTIFICATION",
3917 (u64)cookie, ref->data.debug_id,
3918 ref->data.desc, ref->data.strong,
3919 ref->data.weak, ref->node->debug_id);
3920
3921 binder_node_lock(ref->node);
3922 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3923 if (ref->death) {
3924 binder_user_error("%d:%d(%s:%s) BC_REQUEST_DEATH_NOTIFICATION"\
3925 " death notification already set\n",
3926 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3927 binder_node_unlock(ref->node);
3928 binder_proc_unlock(proc);
3929 kfree(death);
3930 break;
3931 }
3932 binder_stats_created(BINDER_STAT_DEATH);
3933 INIT_LIST_HEAD(&death->work.entry);
3934 death->cookie = cookie;
3935 ref->death = death;
3936 if (ref->node->proc == NULL) {
3937 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3938
3939 binder_inner_proc_lock(proc);
3940 binder_enqueue_work_ilocked(
3941 &ref->death->work, &proc->todo);
3942 binder_wakeup_proc_ilocked(proc);
3943 binder_inner_proc_unlock(proc);
3944 }
3945 } else {
3946 if (ref->death == NULL) {
3947 binder_user_error("%d:%d(%s:%s) BC_CLEAR_DEATH_NOTIFICATION"\
3948 " death notification not active\n",
3949 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
3950 binder_node_unlock(ref->node);
3951 binder_proc_unlock(proc);
3952 break;
3953 }
3954 death = ref->death;
3955 if (death->cookie != cookie) {
3956 binder_user_error("%d:%d(%s:%s) BC_CLEAR_DEATH_NOTIFICATION"\
3957 " death notification cookie mismatch %016llx != %016llx\n",
3958 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
3959 (u64)death->cookie,
3960 (u64)cookie);
3961 binder_node_unlock(ref->node);
3962 binder_proc_unlock(proc);
3963 break;
3964 }
3965 ref->death = NULL;
3966 binder_inner_proc_lock(proc);
3967 if (list_empty(&death->work.entry)) {
3968 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3969 if (thread->looper &
3970 (BINDER_LOOPER_STATE_REGISTERED |
3971 BINDER_LOOPER_STATE_ENTERED))
3972 binder_enqueue_thread_work_ilocked(
3973 thread,
3974 &death->work);
3975 else {
3976 binder_enqueue_work_ilocked(
3977 &death->work,
3978 &proc->todo);
3979 binder_wakeup_proc_ilocked(
3980 proc);
3981 }
3982 } else {
3983 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3984 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3985 }
3986 binder_inner_proc_unlock(proc);
3987 }
3988 binder_node_unlock(ref->node);
3989 binder_proc_unlock(proc);
3990 } break;
3991 case BC_DEAD_BINDER_DONE: {
3992 struct binder_work *w;
3993 binder_uintptr_t cookie;
3994 struct binder_ref_death *death = NULL;
3995
3996 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3997 return -EFAULT;
3998
3999 ptr += sizeof(cookie);
4000 binder_inner_proc_lock(proc);
4001 list_for_each_entry(w, &proc->delivered_death,
4002 entry) {
4003 struct binder_ref_death *tmp_death =
4004 container_of(w,
4005 struct binder_ref_death,
4006 work);
4007
4008 if (tmp_death->cookie == cookie) {
4009 death = tmp_death;
4010 break;
4011 }
4012 }
4013 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4014 "%d:%d(%s:%s) BC_DEAD_BINDER_DONE %016llx found %pK\n",
4015 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)cookie,
4016 death);
4017 if (death == NULL) {
4018 binder_user_error("%d:%d(%s:%s) BC_DEAD_BINDER_DONE %016llx not found\n",
4019 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, (u64)cookie);
4020 binder_inner_proc_unlock(proc);
4021 break;
4022 }
4023 binder_dequeue_work_ilocked(&death->work);
4024 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4025 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4026 if (thread->looper &
4027 (BINDER_LOOPER_STATE_REGISTERED |
4028 BINDER_LOOPER_STATE_ENTERED))
4029 binder_enqueue_thread_work_ilocked(
4030 thread, &death->work);
4031 else {
4032 binder_enqueue_work_ilocked(
4033 &death->work,
4034 &proc->todo);
4035 binder_wakeup_proc_ilocked(proc);
4036 }
4037 }
4038 binder_inner_proc_unlock(proc);
4039 } break;
4040
4041 default:
4042 pr_err("%d:%d(%s:%s) unknown command %d\n",
4043 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, cmd);
4044 return -EINVAL;
4045 }
4046 *consumed = ptr - buffer;
4047 }
4048 return 0;
4049 }
4050
4051 static void binder_stat_br(struct binder_proc *proc,
4052 struct binder_thread *thread, uint32_t cmd)
4053 {
4054 trace_binder_return(cmd);
4055 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4056 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4057 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4058 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4059 }
4060 }
4061
4062 static int binder_put_node_cmd(struct binder_proc *proc,
4063 struct binder_thread *thread,
4064 void __user **ptrp,
4065 binder_uintptr_t node_ptr,
4066 binder_uintptr_t node_cookie,
4067 int node_debug_id,
4068 uint32_t cmd, const char *cmd_name)
4069 {
4070 void __user *ptr = *ptrp;
4071
4072 if (put_user(cmd, (uint32_t __user *)ptr))
4073 return -EFAULT;
4074 ptr += sizeof(uint32_t);
4075
4076 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4077 return -EFAULT;
4078 ptr += sizeof(binder_uintptr_t);
4079
4080 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4081 return -EFAULT;
4082 ptr += sizeof(binder_uintptr_t);
4083
4084 binder_stat_br(proc, thread, cmd);
4085 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d(%s:%s) %s %d u%016llx c%016llx\n",
4086 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4087 cmd_name, node_debug_id,
4088 (u64)node_ptr, (u64)node_cookie);
4089
4090 *ptrp = ptr;
4091 return 0;
4092 }
4093
4094 static int binder_wait_for_work(struct binder_thread *thread,
4095 bool do_proc_work)
4096 {
4097 DEFINE_WAIT(wait);
4098 struct binder_proc *proc = thread->proc;
4099 int ret = 0;
4100
4101 freezer_do_not_count();
4102 binder_inner_proc_lock(proc);
4103 for (;;) {
4104 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4105 if (binder_has_work_ilocked(thread, do_proc_work))
4106 break;
4107 if (do_proc_work)
4108 list_add(&thread->waiting_thread_node,
4109 &proc->waiting_threads);
4110 binder_inner_proc_unlock(proc);
4111 schedule();
4112 binder_inner_proc_lock(proc);
4113 list_del_init(&thread->waiting_thread_node);
4114 if (signal_pending(current)) {
4115 ret = -ERESTARTSYS;
4116 break;
4117 }
4118 }
4119 finish_wait(&thread->wait, &wait);
4120 binder_inner_proc_unlock(proc);
4121 freezer_count();
4122
4123 return ret;
4124 }
4125
4126 static int binder_thread_read(struct binder_proc *proc,
4127 struct binder_thread *thread,
4128 binder_uintptr_t binder_buffer, size_t size,
4129 binder_size_t *consumed, int non_block)
4130 {
4131 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4132 void __user *ptr = buffer + *consumed;
4133 void __user *end = buffer + size;
4134
4135 int ret = 0;
4136 int wait_for_proc_work;
4137
4138 if (*consumed == 0) {
4139 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4140 return -EFAULT;
4141 ptr += sizeof(uint32_t);
4142 }
4143
4144 retry:
4145 binder_inner_proc_lock(proc);
4146 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4147 binder_inner_proc_unlock(proc);
4148
4149 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4150
4151 trace_binder_wait_for_work(wait_for_proc_work,
4152 !!thread->transaction_stack,
4153 !binder_worklist_empty(proc, &thread->todo));
4154 if (wait_for_proc_work) {
4155 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4156 BINDER_LOOPER_STATE_ENTERED))) {
4157 binder_user_error("%d:%d(%s:%s) ERROR: Thread waiting for process work"\
4158 " before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4159 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm, thread->looper);
4160 wait_event_interruptible(binder_user_error_wait,
4161 binder_stop_on_user_error < 2);
4162 }
4163 binder_restore_priority(current, proc->default_priority);
4164 }
4165
4166 if (non_block) {
4167 if (!binder_has_work(thread, wait_for_proc_work))
4168 ret = -EAGAIN;
4169 } else {
4170 ret = binder_wait_for_work(thread, wait_for_proc_work);
4171 }
4172
4173 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4174
4175 if (ret)
4176 return ret;
4177
4178 while (1) {
4179 uint32_t cmd;
4180 struct binder_transaction_data tr;
4181 struct binder_work *w = NULL;
4182 struct list_head *list = NULL;
4183 struct binder_transaction *t = NULL;
4184 struct binder_thread *t_from;
4185
4186 binder_inner_proc_lock(proc);
4187 if (!binder_worklist_empty_ilocked(&thread->todo))
4188 list = &thread->todo;
4189 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4190 wait_for_proc_work)
4191 list = &proc->todo;
4192 else {
4193 binder_inner_proc_unlock(proc);
4194
4195 /* no data added */
4196 if (ptr - buffer == 4 && !thread->looper_need_return)
4197 goto retry;
4198 break;
4199 }
4200
4201 if (end - ptr < sizeof(tr) + 4) {
4202 binder_inner_proc_unlock(proc);
4203 break;
4204 }
4205 w = binder_dequeue_work_head_ilocked(list);
4206 if (binder_worklist_empty_ilocked(&thread->todo))
4207 thread->process_todo = false;
4208
4209 switch (w->type) {
4210 case BINDER_WORK_TRANSACTION: {
4211 binder_inner_proc_unlock(proc);
4212 t = container_of(w, struct binder_transaction, work);
4213 } break;
4214 case BINDER_WORK_RETURN_ERROR: {
4215 struct binder_error *e = container_of(
4216 w, struct binder_error, work);
4217
4218 WARN_ON(e->cmd == BR_OK);
4219 binder_inner_proc_unlock(proc);
4220 if (put_user(e->cmd, (uint32_t __user *)ptr))
4221 return -EFAULT;
4222 e->cmd = BR_OK;
4223 ptr += sizeof(uint32_t);
4224
4225 binder_stat_br(proc, thread, e->cmd);
4226 } break;
4227 case BINDER_WORK_TRANSACTION_COMPLETE: {
4228 binder_inner_proc_unlock(proc);
4229 cmd = BR_TRANSACTION_COMPLETE;
4230 if (put_user(cmd, (uint32_t __user *)ptr))
4231 return -EFAULT;
4232 ptr += sizeof(uint32_t);
4233
4234 binder_stat_br(proc, thread, cmd);
4235 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4236 "%d:%d(%s:%s) BR_TRANSACTION_COMPLETE\n",
4237 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
4238 kfree(w);
4239 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4240 } break;
4241 case BINDER_WORK_NODE: {
4242 struct binder_node *node = container_of(w, struct binder_node, work);
4243 int strong, weak;
4244 binder_uintptr_t node_ptr = node->ptr;
4245 binder_uintptr_t node_cookie = node->cookie;
4246 int node_debug_id = node->debug_id;
4247 int has_weak_ref;
4248 int has_strong_ref;
4249 void __user *orig_ptr = ptr;
4250
4251 BUG_ON(proc != node->proc);
4252 strong = node->internal_strong_refs ||
4253 node->local_strong_refs;
4254 weak = !hlist_empty(&node->refs) ||
4255 node->local_weak_refs ||
4256 node->tmp_refs || strong;
4257 has_strong_ref = node->has_strong_ref;
4258 has_weak_ref = node->has_weak_ref;
4259
4260 if (weak && !has_weak_ref) {
4261 node->has_weak_ref = 1;
4262 node->pending_weak_ref = 1;
4263 node->local_weak_refs++;
4264 }
4265 if (strong && !has_strong_ref) {
4266 node->has_strong_ref = 1;
4267 node->pending_strong_ref = 1;
4268 node->local_strong_refs++;
4269 }
4270 if (!strong && has_strong_ref)
4271 node->has_strong_ref = 0;
4272 if (!weak && has_weak_ref)
4273 node->has_weak_ref = 0;
4274 if (!weak && !strong) {
4275 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4276 "%d:%d(%s:%s) node %d u%016llx c%016llx deleted\n",
4277 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4278 node_debug_id,
4279 (u64)node_ptr,
4280 (u64)node_cookie);
4281 rb_erase(&node->rb_node, &proc->nodes);
4282 binder_inner_proc_unlock(proc);
4283 binder_node_lock(node);
4284 /*
4285 * Acquire the node lock before freeing the
4286 * node to serialize with other threads that
4287 * may have been holding the node lock while
4288 * decrementing this node (avoids race where
4289 * this thread frees while the other thread
4290 * is unlocking the node after the final
4291 * decrement)
4292 */
4293 binder_node_unlock(node);
4294 binder_free_node(node);
4295 } else
4296 binder_inner_proc_unlock(proc);
4297
4298 if (weak && !has_weak_ref)
4299 ret = binder_put_node_cmd(
4300 proc, thread, &ptr, node_ptr,
4301 node_cookie, node_debug_id,
4302 BR_INCREFS, "BR_INCREFS");
4303 if (!ret && strong && !has_strong_ref)
4304 ret = binder_put_node_cmd(
4305 proc, thread, &ptr, node_ptr,
4306 node_cookie, node_debug_id,
4307 BR_ACQUIRE, "BR_ACQUIRE");
4308 if (!ret && !strong && has_strong_ref)
4309 ret = binder_put_node_cmd(
4310 proc, thread, &ptr, node_ptr,
4311 node_cookie, node_debug_id,
4312 BR_RELEASE, "BR_RELEASE");
4313 if (!ret && !weak && has_weak_ref)
4314 ret = binder_put_node_cmd(
4315 proc, thread, &ptr, node_ptr,
4316 node_cookie, node_debug_id,
4317 BR_DECREFS, "BR_DECREFS");
4318 if (orig_ptr == ptr)
4319 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4320 "%d:%d(%s:%s) node %d u%016llx c%016llx state unchanged\n",
4321 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4322 node_debug_id,
4323 (u64)node_ptr,
4324 (u64)node_cookie);
4325 if (ret)
4326 return ret;
4327 } break;
4328 case BINDER_WORK_DEAD_BINDER:
4329 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4330 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4331 struct binder_ref_death *death;
4332 uint32_t cmd;
4333 binder_uintptr_t cookie;
4334
4335 death = container_of(w, struct binder_ref_death, work);
4336 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4337 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4338 else
4339 cmd = BR_DEAD_BINDER;
4340 cookie = death->cookie;
4341
4342 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4343 "%d:%d(%s:%s) %s %016llx\n",
4344 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4345 cmd == BR_DEAD_BINDER ?
4346 "BR_DEAD_BINDER" :
4347 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4348 (u64)cookie);
4349 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4350 binder_inner_proc_unlock(proc);
4351 kfree(death);
4352 binder_stats_deleted(BINDER_STAT_DEATH);
4353 } else {
4354 binder_enqueue_work_ilocked(
4355 w, &proc->delivered_death);
4356 binder_inner_proc_unlock(proc);
4357 }
4358 if (put_user(cmd, (uint32_t __user *)ptr))
4359 return -EFAULT;
4360 ptr += sizeof(uint32_t);
4361 if (put_user(cookie,
4362 (binder_uintptr_t __user *)ptr))
4363 return -EFAULT;
4364 ptr += sizeof(binder_uintptr_t);
4365 binder_stat_br(proc, thread, cmd);
4366 if (cmd == BR_DEAD_BINDER)
4367 goto done; /* DEAD_BINDER notifications can cause transactions */
4368 } break;
4369 }
4370
4371 if (!t)
4372 continue;
4373
4374 BUG_ON(t->buffer == NULL);
4375 if (t->buffer->target_node) {
4376 struct binder_node *target_node = t->buffer->target_node;
4377 struct binder_priority node_prio;
4378
4379 tr.target.ptr = target_node->ptr;
4380 tr.cookie = target_node->cookie;
4381 node_prio.sched_policy = target_node->sched_policy;
4382 node_prio.prio = target_node->min_priority;
4383 binder_transaction_priority(current, t, node_prio,
4384 target_node->inherit_rt);
4385 cmd = BR_TRANSACTION;
4386 } else {
4387 tr.target.ptr = 0;
4388 tr.cookie = 0;
4389 cmd = BR_REPLY;
4390 }
4391 tr.code = t->code;
4392 tr.flags = t->flags;
4393 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4394
4395 t_from = binder_get_txn_from(t);
4396 if (t_from) {
4397 struct task_struct *sender = t_from->proc->tsk;
4398
4399 tr.sender_pid = task_tgid_nr_ns(sender,
4400 task_active_pid_ns(current));
4401 } else {
4402 tr.sender_pid = 0;
4403 }
4404
4405 tr.data_size = t->buffer->data_size;
4406 tr.offsets_size = t->buffer->offsets_size;
4407 tr.data.ptr.buffer = (binder_uintptr_t)
4408 ((uintptr_t)t->buffer->data +
4409 binder_alloc_get_user_buffer_offset(&proc->alloc));
4410 tr.data.ptr.offsets = tr.data.ptr.buffer +
4411 ALIGN(t->buffer->data_size,
4412 sizeof(void *));
4413
4414 if (put_user(cmd, (uint32_t __user *)ptr)) {
4415 if (t_from)
4416 binder_thread_dec_tmpref(t_from);
4417
4418 binder_cleanup_transaction(t, "put_user failed",
4419 BR_FAILED_REPLY);
4420
4421 return -EFAULT;
4422 }
4423 ptr += sizeof(uint32_t);
4424 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4425 if (t_from)
4426 binder_thread_dec_tmpref(t_from);
4427
4428 binder_cleanup_transaction(t, "copy_to_user failed",
4429 BR_FAILED_REPLY);
4430
4431 return -EFAULT;
4432 }
4433 ptr += sizeof(tr);
4434
4435 #ifdef CONFIG_DEBUG_SNAPSHOT_BINDER
4436 dss_binder_transaction_received(t, thread);
4437 #endif
4438 trace_binder_transaction_received(t);
4439 binder_stat_br(proc, thread, cmd);
4440 binder_debug(BINDER_DEBUG_TRANSACTION,
4441 "%d:%d(%s:%s) %s %d %d:%d(%s:%s), cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4442 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4443 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4444 "BR_REPLY",
4445 t->debug_id, t_from ? t_from->proc->pid : 0,
4446 t_from ? t_from->pid : 0,
4447 t_from ? t_from->proc->tsk->comm : "",
4448 t_from ? t_from->task->comm : "",
4449 cmd,
4450 t->buffer->data_size, t->buffer->offsets_size,
4451 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4452
4453 if (t_from)
4454 binder_thread_dec_tmpref(t_from);
4455 t->buffer->allow_user_free = 1;
4456 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4457 binder_inner_proc_lock(thread->proc);
4458 t->to_parent = thread->transaction_stack;
4459 t->to_thread = thread;
4460 thread->transaction_stack = t;
4461 binder_inner_proc_unlock(thread->proc);
4462 } else {
4463 binder_free_transaction(t);
4464 }
4465 break;
4466 }
4467
4468 done:
4469
4470 *consumed = ptr - buffer;
4471 binder_inner_proc_lock(proc);
4472 if (proc->requested_threads == 0 &&
4473 list_empty(&thread->proc->waiting_threads) &&
4474 proc->requested_threads_started < proc->max_threads &&
4475 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4476 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4477 /*spawn a new thread if we leave this out */) {
4478 proc->requested_threads++;
4479 binder_inner_proc_unlock(proc);
4480 binder_debug(BINDER_DEBUG_THREADS,
4481 "%d:%d(%s:%s) BR_SPAWN_LOOPER\n",
4482 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
4483 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4484 return -EFAULT;
4485 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4486 } else
4487 binder_inner_proc_unlock(proc);
4488 return 0;
4489 }
4490
4491 static void binder_release_work(struct binder_proc *proc,
4492 struct list_head *list)
4493 {
4494 struct binder_work *w;
4495
4496 while (1) {
4497 w = binder_dequeue_work_head(proc, list);
4498 if (!w)
4499 return;
4500
4501 switch (w->type) {
4502 case BINDER_WORK_TRANSACTION: {
4503 struct binder_transaction *t;
4504
4505 t = container_of(w, struct binder_transaction, work);
4506
4507 binder_cleanup_transaction(t, "process died.",
4508 BR_DEAD_REPLY);
4509 } break;
4510 case BINDER_WORK_RETURN_ERROR: {
4511 struct binder_error *e = container_of(
4512 w, struct binder_error, work);
4513
4514 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4515 "undelivered TRANSACTION_ERROR: %u\n",
4516 e->cmd);
4517 } break;
4518 case BINDER_WORK_TRANSACTION_COMPLETE: {
4519 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4520 "undelivered TRANSACTION_COMPLETE\n");
4521 kfree(w);
4522 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4523 } break;
4524 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4525 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4526 struct binder_ref_death *death;
4527
4528 death = container_of(w, struct binder_ref_death, work);
4529 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4530 "undelivered death notification, %016llx\n",
4531 (u64)death->cookie);
4532 kfree(death);
4533 binder_stats_deleted(BINDER_STAT_DEATH);
4534 } break;
4535 default:
4536 pr_err("unexpected work type, %d, not freed\n",
4537 w->type);
4538 break;
4539 }
4540 }
4541
4542 }
4543
4544 static struct binder_thread *binder_get_thread_ilocked(
4545 struct binder_proc *proc, struct binder_thread *new_thread)
4546 {
4547 struct binder_thread *thread = NULL;
4548 struct rb_node *parent = NULL;
4549 struct rb_node **p = &proc->threads.rb_node;
4550
4551 while (*p) {
4552 parent = *p;
4553 thread = rb_entry(parent, struct binder_thread, rb_node);
4554
4555 if (current->pid < thread->pid)
4556 p = &(*p)->rb_left;
4557 else if (current->pid > thread->pid)
4558 p = &(*p)->rb_right;
4559 else
4560 return thread;
4561 }
4562 if (!new_thread)
4563 return NULL;
4564 thread = new_thread;
4565 binder_stats_created(BINDER_STAT_THREAD);
4566 thread->proc = proc;
4567 thread->pid = current->pid;
4568 get_task_struct(current);
4569 thread->task = current;
4570 atomic_set(&thread->tmp_ref, 0);
4571 init_waitqueue_head(&thread->wait);
4572 INIT_LIST_HEAD(&thread->todo);
4573 rb_link_node(&thread->rb_node, parent, p);
4574 rb_insert_color(&thread->rb_node, &proc->threads);
4575 thread->looper_need_return = true;
4576 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4577 thread->return_error.cmd = BR_OK;
4578 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4579 thread->reply_error.cmd = BR_OK;
4580 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4581 return thread;
4582 }
4583
4584 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4585 {
4586 struct binder_thread *thread;
4587 struct binder_thread *new_thread;
4588
4589 binder_inner_proc_lock(proc);
4590 thread = binder_get_thread_ilocked(proc, NULL);
4591 binder_inner_proc_unlock(proc);
4592 if (!thread) {
4593 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4594 if (new_thread == NULL)
4595 return NULL;
4596 binder_inner_proc_lock(proc);
4597 thread = binder_get_thread_ilocked(proc, new_thread);
4598 binder_inner_proc_unlock(proc);
4599 if (thread != new_thread)
4600 kfree(new_thread);
4601 }
4602 return thread;
4603 }
4604
4605 static void binder_free_proc(struct binder_proc *proc)
4606 {
4607 BUG_ON(!list_empty(&proc->todo));
4608 BUG_ON(!list_empty(&proc->delivered_death));
4609 binder_alloc_deferred_release(&proc->alloc);
4610 put_task_struct(proc->tsk);
4611 binder_stats_deleted(BINDER_STAT_PROC);
4612 kfree(proc);
4613 }
4614
4615 static void binder_free_thread(struct binder_thread *thread)
4616 {
4617 BUG_ON(!list_empty(&thread->todo));
4618 binder_stats_deleted(BINDER_STAT_THREAD);
4619 binder_proc_dec_tmpref(thread->proc);
4620 put_task_struct(thread->task);
4621 kfree(thread);
4622 }
4623
4624 static int binder_thread_release(struct binder_proc *proc,
4625 struct binder_thread *thread)
4626 {
4627 struct binder_transaction *t;
4628 struct binder_transaction *send_reply = NULL;
4629 int active_transactions = 0;
4630 struct binder_transaction *last_t = NULL;
4631
4632 binder_inner_proc_lock(thread->proc);
4633 /*
4634 * take a ref on the proc so it survives
4635 * after we remove this thread from proc->threads.
4636 * The corresponding dec is when we actually
4637 * free the thread in binder_free_thread()
4638 */
4639 proc->tmp_ref++;
4640 /*
4641 * take a ref on this thread to ensure it
4642 * survives while we are releasing it
4643 */
4644 atomic_inc(&thread->tmp_ref);
4645 rb_erase(&thread->rb_node, &proc->threads);
4646 t = thread->transaction_stack;
4647 if (t) {
4648 spin_lock(&t->lock);
4649 if (t->to_thread == thread)
4650 send_reply = t;
4651 }
4652 thread->is_dead = true;
4653
4654 while (t) {
4655 last_t = t;
4656 active_transactions++;
4657 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4658 "release %d:%d(%s:%s) transaction %d %s, still active\n",
4659 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4660 t->debug_id,
4661 (t->to_thread == thread) ? "in" : "out");
4662
4663 if (t->to_thread == thread) {
4664 t->to_proc = NULL;
4665 t->to_thread = NULL;
4666 if (t->buffer) {
4667 t->buffer->transaction = NULL;
4668 t->buffer = NULL;
4669 }
4670 t = t->to_parent;
4671 } else if (t->from == thread) {
4672 t->from = NULL;
4673 t = t->from_parent;
4674 } else
4675 BUG();
4676 spin_unlock(&last_t->lock);
4677 if (t)
4678 spin_lock(&t->lock);
4679 }
4680
4681 /*
4682 * If this thread used poll, make sure we remove the waitqueue
4683 * from any epoll data structures holding it with POLLFREE.
4684 * waitqueue_active() is safe to use here because we're holding
4685 * the inner lock.
4686 */
4687 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4688 waitqueue_active(&thread->wait)) {
4689 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4690 }
4691
4692 binder_inner_proc_unlock(thread->proc);
4693
4694 /*
4695 * This is needed to avoid races between wake_up_poll() above and
4696 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4697 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4698 * lock, so we can be sure it's done after calling synchronize_rcu().
4699 */
4700 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4701 synchronize_rcu();
4702
4703 if (send_reply)
4704 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4705 binder_release_work(proc, &thread->todo);
4706 binder_thread_dec_tmpref(thread);
4707 return active_transactions;
4708 }
4709
4710 static unsigned int binder_poll(struct file *filp,
4711 struct poll_table_struct *wait)
4712 {
4713 struct binder_proc *proc = filp->private_data;
4714 struct binder_thread *thread = NULL;
4715 bool wait_for_proc_work;
4716
4717 thread = binder_get_thread(proc);
4718 if (!thread)
4719 return POLLERR;
4720
4721 binder_inner_proc_lock(thread->proc);
4722 thread->looper |= BINDER_LOOPER_STATE_POLL;
4723 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4724
4725 binder_inner_proc_unlock(thread->proc);
4726
4727 poll_wait(filp, &thread->wait, wait);
4728
4729 if (binder_has_work(thread, wait_for_proc_work))
4730 return POLLIN;
4731
4732 return 0;
4733 }
4734
4735 static int binder_ioctl_write_read(struct file *filp,
4736 unsigned int cmd, unsigned long arg,
4737 struct binder_thread *thread)
4738 {
4739 int ret = 0;
4740 struct binder_proc *proc = filp->private_data;
4741 unsigned int size = _IOC_SIZE(cmd);
4742 void __user *ubuf = (void __user *)arg;
4743 struct binder_write_read bwr;
4744
4745 if (size != sizeof(struct binder_write_read)) {
4746 ret = -EINVAL;
4747 goto out;
4748 }
4749 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4750 ret = -EFAULT;
4751 goto out;
4752 }
4753 binder_debug(BINDER_DEBUG_READ_WRITE,
4754 "%d:%d(%s:%s) write %lld at %016llx, read %lld at %016llx\n",
4755 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4756 (u64)bwr.write_size, (u64)bwr.write_buffer,
4757 (u64)bwr.read_size, (u64)bwr.read_buffer);
4758
4759 if (bwr.write_size > 0) {
4760 ret = binder_thread_write(proc, thread,
4761 bwr.write_buffer,
4762 bwr.write_size,
4763 &bwr.write_consumed);
4764 trace_binder_write_done(ret);
4765 if (ret < 0) {
4766 bwr.read_consumed = 0;
4767 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4768 ret = -EFAULT;
4769 goto out;
4770 }
4771 }
4772 if (bwr.read_size > 0) {
4773 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4774 bwr.read_size,
4775 &bwr.read_consumed,
4776 filp->f_flags & O_NONBLOCK);
4777 trace_binder_read_done(ret);
4778 binder_inner_proc_lock(proc);
4779 if (!binder_worklist_empty_ilocked(&proc->todo))
4780 binder_wakeup_proc_ilocked(proc);
4781 binder_inner_proc_unlock(proc);
4782 if (ret < 0) {
4783 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4784 ret = -EFAULT;
4785 goto out;
4786 }
4787 }
4788 binder_debug(BINDER_DEBUG_READ_WRITE,
4789 "%d:%d(%s:%s) wrote %lld of %lld, read return %lld of %lld\n",
4790 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm,
4791 (u64)bwr.write_consumed, (u64)bwr.write_size,
4792 (u64)bwr.read_consumed, (u64)bwr.read_size);
4793 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4794 ret = -EFAULT;
4795 goto out;
4796 }
4797 out:
4798 return ret;
4799 }
4800
4801 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4802 {
4803 int ret = 0;
4804 struct binder_proc *proc = filp->private_data;
4805 struct binder_context *context = proc->context;
4806 struct binder_node *new_node;
4807 kuid_t curr_euid = current_euid();
4808
4809 mutex_lock(&context->context_mgr_node_lock);
4810 if (context->binder_context_mgr_node) {
4811 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4812 ret = -EBUSY;
4813 goto out;
4814 }
4815 ret = security_binder_set_context_mgr(proc->tsk);
4816 if (ret < 0)
4817 goto out;
4818 if (uid_valid(context->binder_context_mgr_uid)) {
4819 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4820 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4821 from_kuid(&init_user_ns, curr_euid),
4822 from_kuid(&init_user_ns,
4823 context->binder_context_mgr_uid));
4824 ret = -EPERM;
4825 goto out;
4826 }
4827 } else {
4828 context->binder_context_mgr_uid = curr_euid;
4829 }
4830 new_node = binder_new_node(proc, NULL);
4831 if (!new_node) {
4832 ret = -ENOMEM;
4833 goto out;
4834 }
4835 binder_node_lock(new_node);
4836 new_node->local_weak_refs++;
4837 new_node->local_strong_refs++;
4838 new_node->has_strong_ref = 1;
4839 new_node->has_weak_ref = 1;
4840 context->binder_context_mgr_node = new_node;
4841 binder_node_unlock(new_node);
4842 binder_put_node(new_node);
4843 out:
4844 mutex_unlock(&context->context_mgr_node_lock);
4845 return ret;
4846 }
4847
4848 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4849 struct binder_node_debug_info *info)
4850 {
4851 struct rb_node *n;
4852 binder_uintptr_t ptr = info->ptr;
4853
4854 memset(info, 0, sizeof(*info));
4855
4856 binder_inner_proc_lock(proc);
4857 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4858 struct binder_node *node = rb_entry(n, struct binder_node,
4859 rb_node);
4860 if (node->ptr > ptr) {
4861 info->ptr = node->ptr;
4862 info->cookie = node->cookie;
4863 info->has_strong_ref = node->has_strong_ref;
4864 info->has_weak_ref = node->has_weak_ref;
4865 break;
4866 }
4867 }
4868 binder_inner_proc_unlock(proc);
4869
4870 return 0;
4871 }
4872
4873 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4874 {
4875 int ret;
4876 struct binder_proc *proc = filp->private_data;
4877 struct binder_thread *thread;
4878 unsigned int size = _IOC_SIZE(cmd);
4879 void __user *ubuf = (void __user *)arg;
4880
4881 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4882 proc->pid, current->pid, cmd, arg);*/
4883
4884 binder_selftest_alloc(&proc->alloc);
4885
4886 trace_binder_ioctl(cmd, arg);
4887
4888 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4889 if (ret)
4890 goto err_unlocked;
4891
4892 thread = binder_get_thread(proc);
4893 if (thread == NULL) {
4894 ret = -ENOMEM;
4895 goto err;
4896 }
4897
4898 switch (cmd) {
4899 case BINDER_WRITE_READ:
4900 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4901 if (ret)
4902 goto err;
4903 break;
4904 case BINDER_SET_MAX_THREADS: {
4905 int max_threads;
4906
4907 if (copy_from_user(&max_threads, ubuf,
4908 sizeof(max_threads))) {
4909 ret = -EINVAL;
4910 goto err;
4911 }
4912 binder_inner_proc_lock(proc);
4913 proc->max_threads = max_threads;
4914 binder_inner_proc_unlock(proc);
4915 break;
4916 }
4917 case BINDER_SET_CONTEXT_MGR:
4918 ret = binder_ioctl_set_ctx_mgr(filp);
4919 if (ret)
4920 goto err;
4921 break;
4922 case BINDER_THREAD_EXIT:
4923 binder_debug(BINDER_DEBUG_THREADS, "%d:%d(%s:%s) exit\n",
4924 proc->pid, thread->pid, proc->tsk->comm, thread->task->comm);
4925 binder_thread_release(proc, thread);
4926 thread = NULL;
4927 break;
4928 case BINDER_VERSION: {
4929 struct binder_version __user *ver = ubuf;
4930
4931 if (size != sizeof(struct binder_version)) {
4932 ret = -EINVAL;
4933 goto err;
4934 }
4935 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4936 &ver->protocol_version)) {
4937 ret = -EINVAL;
4938 goto err;
4939 }
4940 break;
4941 }
4942 case BINDER_GET_NODE_DEBUG_INFO: {
4943 struct binder_node_debug_info info;
4944
4945 if (copy_from_user(&info, ubuf, sizeof(info))) {
4946 ret = -EFAULT;
4947 goto err;
4948 }
4949
4950 ret = binder_ioctl_get_node_debug_info(proc, &info);
4951 if (ret < 0)
4952 goto err;
4953
4954 if (copy_to_user(ubuf, &info, sizeof(info))) {
4955 ret = -EFAULT;
4956 goto err;
4957 }
4958 break;
4959 }
4960 default:
4961 ret = -EINVAL;
4962 goto err;
4963 }
4964 ret = 0;
4965 err:
4966 if (thread)
4967 thread->looper_need_return = false;
4968 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4969 if (ret && ret != -ERESTARTSYS)
4970 pr_info("%d:%d(%s:%s) ioctl %x %lx returned %d\n", proc->pid, current->pid,
4971 proc->tsk->comm, current->comm, cmd, arg, ret);
4972 err_unlocked:
4973 trace_binder_ioctl_done(ret);
4974 return ret;
4975 }
4976
4977 static void binder_vma_open(struct vm_area_struct *vma)
4978 {
4979 struct binder_proc *proc = vma->vm_private_data;
4980
4981 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4982 "%d(%s) open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4983 proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end,
4984 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4985 (unsigned long)pgprot_val(vma->vm_page_prot));
4986 }
4987
4988 static void binder_vma_close(struct vm_area_struct *vma)
4989 {
4990 struct binder_proc *proc = vma->vm_private_data;
4991
4992 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4993 "%d(%s) close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4994 proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end,
4995 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4996 (unsigned long)pgprot_val(vma->vm_page_prot));
4997 binder_alloc_vma_close(&proc->alloc);
4998 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4999 }
5000
5001 static int binder_vm_fault(struct vm_fault *vmf)
5002 {
5003 return VM_FAULT_SIGBUS;
5004 }
5005
5006 static const struct vm_operations_struct binder_vm_ops = {
5007 .open = binder_vma_open,
5008 .close = binder_vma_close,
5009 .fault = binder_vm_fault,
5010 };
5011
5012 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5013 {
5014 int ret;
5015 struct binder_proc *proc = filp->private_data;
5016 const char *failure_string;
5017
5018 if (proc->tsk != current->group_leader)
5019 return -EINVAL;
5020
5021 if ((vma->vm_end - vma->vm_start) > SZ_4M)
5022 vma->vm_end = vma->vm_start + SZ_4M;
5023
5024 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5025 "%s: %d(%s) %lx-%lx (%ld K) vma %lx pagep %lx\n",
5026 __func__, proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end,
5027 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5028 (unsigned long)pgprot_val(vma->vm_page_prot));
5029
5030 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5031 ret = -EPERM;
5032 failure_string = "bad vm_flags";
5033 goto err_bad_arg;
5034 }
5035 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
5036 vma->vm_ops = &binder_vm_ops;
5037 vma->vm_private_data = proc;
5038
5039 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5040 if (ret) {
5041 failure_string = "";
5042 goto err_bad_arg;
5043 }
5044 mutex_lock(&proc->files_lock);
5045 proc->files = get_files_struct(current);
5046 mutex_unlock(&proc->files_lock);
5047 return 0;
5048
5049 err_bad_arg:
5050 pr_err("%s: %d(%s) %lx-%lx %s failed %d\n", __func__,
5051 proc->pid, proc->tsk->comm, vma->vm_start, vma->vm_end, failure_string, ret);
5052 return ret;
5053 }
5054
5055 static int binder_open(struct inode *nodp, struct file *filp)
5056 {
5057 struct binder_proc *proc;
5058 struct binder_device *binder_dev;
5059
5060 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d (%s:%s)\n", __func__,
5061 current->group_leader->pid, current->pid,
5062 current->group_leader->comm, current->comm);
5063
5064 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5065 if (proc == NULL)
5066 return -ENOMEM;
5067 spin_lock_init(&proc->inner_lock);
5068 spin_lock_init(&proc->outer_lock);
5069 get_task_struct(current->group_leader);
5070 proc->tsk = current->group_leader;
5071 mutex_init(&proc->files_lock);
5072 INIT_LIST_HEAD(&proc->todo);
5073 if (binder_supported_policy(current->policy)) {
5074 proc->default_priority.sched_policy = current->policy;
5075 proc->default_priority.prio = current->normal_prio;
5076 } else {
5077 proc->default_priority.sched_policy = SCHED_NORMAL;
5078 proc->default_priority.prio = NICE_TO_PRIO(0);
5079 }
5080
5081 binder_dev = container_of(filp->private_data, struct binder_device,
5082 miscdev);
5083 proc->context = &binder_dev->context;
5084 binder_alloc_init(&proc->alloc);
5085
5086 binder_stats_created(BINDER_STAT_PROC);
5087 proc->pid = current->group_leader->pid;
5088 INIT_LIST_HEAD(&proc->delivered_death);
5089 INIT_LIST_HEAD(&proc->waiting_threads);
5090 filp->private_data = proc;
5091
5092 mutex_lock(&binder_procs_lock);
5093 hlist_add_head(&proc->proc_node, &binder_procs);
5094 mutex_unlock(&binder_procs_lock);
5095
5096 if (binder_debugfs_dir_entry_proc) {
5097 char strbuf[11];
5098
5099 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5100 /*
5101 * proc debug entries are shared between contexts, so
5102 * this will fail if the process tries to open the driver
5103 * again with a different context. The priting code will
5104 * anyway print all contexts that a given PID has, so this
5105 * is not a problem.
5106 */
5107 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
5108 binder_debugfs_dir_entry_proc,
5109 (void *)(unsigned long)proc->pid,
5110 &binder_proc_fops);
5111 }
5112
5113 return 0;
5114 }
5115
5116 static int binder_flush(struct file *filp, fl_owner_t id)
5117 {
5118 struct binder_proc *proc = filp->private_data;
5119
5120 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5121
5122 return 0;
5123 }
5124
5125 static void binder_deferred_flush(struct binder_proc *proc)
5126 {
5127 struct rb_node *n;
5128 int wake_count = 0;
5129
5130 binder_inner_proc_lock(proc);
5131 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5132 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5133
5134 thread->looper_need_return = true;
5135 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5136 wake_up_interruptible(&thread->wait);
5137 wake_count++;
5138 }
5139 }
5140 binder_inner_proc_unlock(proc);
5141
5142 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5143 "binder_flush: %d(%s) woke %d threads\n", proc->pid, proc->tsk->comm,
5144 wake_count);
5145 }
5146
5147 static int binder_release(struct inode *nodp, struct file *filp)
5148 {
5149 struct binder_proc *proc = filp->private_data;
5150
5151 debugfs_remove(proc->debugfs_entry);
5152 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5153
5154 return 0;
5155 }
5156
5157 static int binder_node_release(struct binder_node *node, int refs)
5158 {
5159 struct binder_ref *ref;
5160 int death = 0;
5161 struct binder_proc *proc = node->proc;
5162
5163 binder_release_work(proc, &node->async_todo);
5164
5165 binder_node_lock(node);
5166 binder_inner_proc_lock(proc);
5167 binder_dequeue_work_ilocked(&node->work);
5168 /*
5169 * The caller must have taken a temporary ref on the node,
5170 */
5171 BUG_ON(!node->tmp_refs);
5172 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5173 binder_inner_proc_unlock(proc);
5174 binder_node_unlock(node);
5175 binder_free_node(node);
5176
5177 return refs;
5178 }
5179
5180 node->proc = NULL;
5181 node->local_strong_refs = 0;
5182 node->local_weak_refs = 0;
5183 binder_inner_proc_unlock(proc);
5184
5185 spin_lock(&binder_dead_nodes_lock);
5186 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5187 spin_unlock(&binder_dead_nodes_lock);
5188
5189 hlist_for_each_entry(ref, &node->refs, node_entry) {
5190 refs++;
5191 /*
5192 * Need the node lock to synchronize
5193 * with new notification requests and the
5194 * inner lock to synchronize with queued
5195 * death notifications.
5196 */
5197 binder_inner_proc_lock(ref->proc);
5198 if (!ref->death) {
5199 binder_inner_proc_unlock(ref->proc);
5200 continue;
5201 }
5202
5203 death++;
5204
5205 BUG_ON(!list_empty(&ref->death->work.entry));
5206 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5207 binder_enqueue_work_ilocked(&ref->death->work,
5208 &ref->proc->todo);
5209 binder_wakeup_proc_ilocked(ref->proc);
5210 binder_inner_proc_unlock(ref->proc);
5211 }
5212
5213 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5214 "node %d now dead, refs %d, death %d\n",
5215 node->debug_id, refs, death);
5216 binder_node_unlock(node);
5217 binder_put_node(node);
5218
5219 return refs;
5220 }
5221
5222 static void binder_deferred_release(struct binder_proc *proc)
5223 {
5224 struct binder_context *context = proc->context;
5225 struct rb_node *n;
5226 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5227
5228 BUG_ON(proc->files);
5229
5230 mutex_lock(&binder_procs_lock);
5231 hlist_del(&proc->proc_node);
5232 mutex_unlock(&binder_procs_lock);
5233
5234 mutex_lock(&context->context_mgr_node_lock);
5235 if (context->binder_context_mgr_node &&
5236 context->binder_context_mgr_node->proc == proc) {
5237 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5238 "%s: %d(%s) context_mgr_node gone\n",
5239 __func__, proc->pid, proc->tsk->comm);
5240 context->binder_context_mgr_node = NULL;
5241 }
5242 mutex_unlock(&context->context_mgr_node_lock);
5243 binder_inner_proc_lock(proc);
5244 /*
5245 * Make sure proc stays alive after we
5246 * remove all the threads
5247 */
5248 proc->tmp_ref++;
5249
5250 proc->is_dead = true;
5251 threads = 0;
5252 active_transactions = 0;
5253 while ((n = rb_first(&proc->threads))) {
5254 struct binder_thread *thread;
5255
5256 thread = rb_entry(n, struct binder_thread, rb_node);
5257 binder_inner_proc_unlock(proc);
5258 threads++;
5259 active_transactions += binder_thread_release(proc, thread);
5260 binder_inner_proc_lock(proc);
5261 }
5262
5263 nodes = 0;
5264 incoming_refs = 0;
5265 while ((n = rb_first(&proc->nodes))) {
5266 struct binder_node *node;
5267
5268 node = rb_entry(n, struct binder_node, rb_node);
5269 nodes++;
5270 /*
5271 * take a temporary ref on the node before
5272 * calling binder_node_release() which will either
5273 * kfree() the node or call binder_put_node()
5274 */
5275 binder_inc_node_tmpref_ilocked(node);
5276 rb_erase(&node->rb_node, &proc->nodes);
5277 binder_inner_proc_unlock(proc);
5278 incoming_refs = binder_node_release(node, incoming_refs);
5279 binder_inner_proc_lock(proc);
5280 }
5281 binder_inner_proc_unlock(proc);
5282
5283 outgoing_refs = 0;
5284 binder_proc_lock(proc);
5285 while ((n = rb_first(&proc->refs_by_desc))) {
5286 struct binder_ref *ref;
5287
5288 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5289 outgoing_refs++;
5290 binder_cleanup_ref_olocked(ref);
5291 binder_proc_unlock(proc);
5292 binder_free_ref(ref);
5293 binder_proc_lock(proc);
5294 }
5295 binder_proc_unlock(proc);
5296
5297 binder_release_work(proc, &proc->todo);
5298 binder_release_work(proc, &proc->delivered_death);
5299
5300 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5301 "%s: %d(%s) threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5302 __func__, proc->pid, proc->tsk->comm, threads, nodes, incoming_refs,
5303 outgoing_refs, active_transactions);
5304
5305 binder_proc_dec_tmpref(proc);
5306 }
5307
5308 static void binder_deferred_func(struct work_struct *work)
5309 {
5310 struct binder_proc *proc;
5311 struct files_struct *files;
5312
5313 int defer;
5314
5315 do {
5316 mutex_lock(&binder_deferred_lock);
5317 if (!hlist_empty(&binder_deferred_list)) {
5318 proc = hlist_entry(binder_deferred_list.first,
5319 struct binder_proc, deferred_work_node);
5320 hlist_del_init(&proc->deferred_work_node);
5321 defer = proc->deferred_work;
5322 proc->deferred_work = 0;
5323 } else {
5324 proc = NULL;
5325 defer = 0;
5326 }
5327 mutex_unlock(&binder_deferred_lock);
5328
5329 files = NULL;
5330 if (defer & BINDER_DEFERRED_PUT_FILES) {
5331 mutex_lock(&proc->files_lock);
5332 files = proc->files;
5333 if (files)
5334 proc->files = NULL;
5335 mutex_unlock(&proc->files_lock);
5336 }
5337
5338 if (defer & BINDER_DEFERRED_FLUSH)
5339 binder_deferred_flush(proc);
5340
5341 if (defer & BINDER_DEFERRED_RELEASE)
5342 binder_deferred_release(proc); /* frees proc */
5343
5344 if (files)
5345 put_files_struct(files);
5346 } while (proc);
5347 }
5348 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5349
5350 static void
5351 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5352 {
5353 mutex_lock(&binder_deferred_lock);
5354 proc->deferred_work |= defer;
5355 if (hlist_unhashed(&proc->deferred_work_node)) {
5356 hlist_add_head(&proc->deferred_work_node,
5357 &binder_deferred_list);
5358 schedule_work(&binder_deferred_work);
5359 }
5360 mutex_unlock(&binder_deferred_lock);
5361 }
5362
5363 static void print_binder_transaction_ilocked(struct seq_file *m,
5364 struct binder_proc *proc,
5365 const char *prefix,
5366 struct binder_transaction *t)
5367 {
5368 struct binder_proc *to_proc;
5369 struct binder_buffer *buffer = t->buffer;
5370
5371 spin_lock(&t->lock);
5372 to_proc = t->to_proc;
5373 seq_printf(m,
5374 "%s %d: %pK from %d:%d(%s:%s) to %d:%d(%s:%s) code %x flags %x pri %d:%d r%d",
5375 prefix, t->debug_id, t,
5376 t->from ? t->from->proc->pid : 0,
5377 t->from ? t->from->pid : 0,
5378 t->from ? t->from->proc->tsk->comm : "",
5379 t->from ? t->from->task->comm : "",
5380 to_proc ? to_proc->pid : 0,
5381 t->to_thread ? t->to_thread->pid : 0,
5382 to_proc ? to_proc->tsk->comm : "",
5383 t->to_thread ? t->to_thread->task->comm : "",
5384 t->code, t->flags, t->priority.sched_policy,
5385 t->priority.prio, t->need_reply);
5386 spin_unlock(&t->lock);
5387
5388 if (proc != to_proc) {
5389 /*
5390 * Can only safely deref buffer if we are holding the
5391 * correct proc inner lock for this node
5392 */
5393 seq_puts(m, "\n");
5394 return;
5395 }
5396
5397 if (buffer == NULL) {
5398 seq_puts(m, " buffer free\n");
5399 return;
5400 }
5401 if (buffer->target_node)
5402 seq_printf(m, " node %d", buffer->target_node->debug_id);
5403 seq_printf(m, " size %zd:%zd data %pK\n",
5404 buffer->data_size, buffer->offsets_size,
5405 buffer->data);
5406 }
5407
5408 static void print_binder_work_ilocked(struct seq_file *m,
5409 struct binder_proc *proc,
5410 const char *prefix,
5411 const char *transaction_prefix,
5412 struct binder_work *w)
5413 {
5414 struct binder_node *node;
5415 struct binder_transaction *t;
5416
5417 switch (w->type) {
5418 case BINDER_WORK_TRANSACTION:
5419 t = container_of(w, struct binder_transaction, work);
5420 print_binder_transaction_ilocked(
5421 m, proc, transaction_prefix, t);
5422 break;
5423 case BINDER_WORK_RETURN_ERROR: {
5424 struct binder_error *e = container_of(
5425 w, struct binder_error, work);
5426
5427 seq_printf(m, "%stransaction error: %u\n",
5428 prefix, e->cmd);
5429 } break;
5430 case BINDER_WORK_TRANSACTION_COMPLETE:
5431 seq_printf(m, "%stransaction complete\n", prefix);
5432 break;
5433 case BINDER_WORK_NODE:
5434 node = container_of(w, struct binder_node, work);
5435 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5436 prefix, node->debug_id,
5437 (u64)node->ptr, (u64)node->cookie);
5438 break;
5439 case BINDER_WORK_DEAD_BINDER:
5440 seq_printf(m, "%shas dead binder\n", prefix);
5441 break;
5442 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5443 seq_printf(m, "%shas cleared dead binder\n", prefix);
5444 break;
5445 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5446 seq_printf(m, "%shas cleared death notification\n", prefix);
5447 break;
5448 default:
5449 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5450 break;
5451 }
5452 }
5453
5454 static void print_binder_thread_ilocked(struct seq_file *m,
5455 struct binder_thread *thread,
5456 int print_always)
5457 {
5458 struct binder_transaction *t;
5459 struct binder_work *w;
5460 size_t start_pos = m->count;
5461 size_t header_pos;
5462
5463 seq_printf(m, " thread %d %s: l %02x need_return %d tr %d\n",
5464 thread->pid, thread->task->comm, thread->looper,
5465 thread->looper_need_return,
5466 atomic_read(&thread->tmp_ref));
5467 header_pos = m->count;
5468 t = thread->transaction_stack;
5469 while (t) {
5470 if (t->from == thread) {
5471 print_binder_transaction_ilocked(m, thread->proc,
5472 " outgoing transaction", t);
5473 t = t->from_parent;
5474 } else if (t->to_thread == thread) {
5475 print_binder_transaction_ilocked(m, thread->proc,
5476 " incoming transaction", t);
5477 t = t->to_parent;
5478 } else {
5479 print_binder_transaction_ilocked(m, thread->proc,
5480 " bad transaction", t);
5481 t = NULL;
5482 }
5483 }
5484 list_for_each_entry(w, &thread->todo, entry) {
5485 print_binder_work_ilocked(m, thread->proc, " ",
5486 " pending transaction", w);
5487 }
5488 if (!print_always && m->count == header_pos)
5489 m->count = start_pos;
5490 }
5491
5492 static void print_binder_node_nilocked(struct seq_file *m,
5493 struct binder_node *node)
5494 {
5495 struct binder_ref *ref;
5496 struct binder_work *w;
5497 int count;
5498
5499 count = 0;
5500 hlist_for_each_entry(ref, &node->refs, node_entry)
5501 count++;
5502
5503 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5504 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5505 node->sched_policy, node->min_priority,
5506 node->has_strong_ref, node->has_weak_ref,
5507 node->local_strong_refs, node->local_weak_refs,
5508 node->internal_strong_refs, count, node->tmp_refs);
5509 if (count) {
5510 seq_puts(m, " proc");
5511 hlist_for_each_entry(ref, &node->refs, node_entry)
5512 seq_printf(m, " %d %s", ref->proc->pid, ref->proc->tsk->comm);
5513 }
5514 seq_puts(m, "\n");
5515 if (node->proc) {
5516 list_for_each_entry(w, &node->async_todo, entry)
5517 print_binder_work_ilocked(m, node->proc, " ",
5518 " pending async transaction", w);
5519 }
5520 }
5521
5522 static void print_binder_ref_olocked(struct seq_file *m,
5523 struct binder_ref *ref)
5524 {
5525 binder_node_lock(ref->node);
5526 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5527 ref->data.debug_id, ref->data.desc,
5528 ref->node->proc ? "" : "dead ",
5529 ref->node->debug_id, ref->data.strong,
5530 ref->data.weak, ref->death);
5531 binder_node_unlock(ref->node);
5532 }
5533
5534 static void print_binder_proc(struct seq_file *m,
5535 struct binder_proc *proc, int print_all)
5536 {
5537 struct binder_work *w;
5538 struct rb_node *n;
5539 size_t start_pos = m->count;
5540 size_t header_pos;
5541 struct binder_node *last_node = NULL;
5542
5543 seq_printf(m, "proc %d %s\n", proc->pid, proc->tsk->comm);
5544 seq_printf(m, "context %s\n", proc->context->name);
5545 header_pos = m->count;
5546
5547 binder_inner_proc_lock(proc);
5548 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5549 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5550 rb_node), print_all);
5551
5552 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5553 struct binder_node *node = rb_entry(n, struct binder_node,
5554 rb_node);
5555 /*
5556 * take a temporary reference on the node so it
5557 * survives and isn't removed from the tree
5558 * while we print it.
5559 */
5560 binder_inc_node_tmpref_ilocked(node);
5561 /* Need to drop inner lock to take node lock */
5562 binder_inner_proc_unlock(proc);
5563 if (last_node)
5564 binder_put_node(last_node);
5565 binder_node_inner_lock(node);
5566 print_binder_node_nilocked(m, node);
5567 binder_node_inner_unlock(node);
5568 last_node = node;
5569 binder_inner_proc_lock(proc);
5570 }
5571 binder_inner_proc_unlock(proc);
5572 if (last_node)
5573 binder_put_node(last_node);
5574
5575 if (print_all) {
5576 binder_proc_lock(proc);
5577 for (n = rb_first(&proc->refs_by_desc);
5578 n != NULL;
5579 n = rb_next(n))
5580 print_binder_ref_olocked(m, rb_entry(n,
5581 struct binder_ref,
5582 rb_node_desc));
5583 binder_proc_unlock(proc);
5584 }
5585 binder_alloc_print_allocated(m, &proc->alloc);
5586 binder_inner_proc_lock(proc);
5587 list_for_each_entry(w, &proc->todo, entry)
5588 print_binder_work_ilocked(m, proc, " ",
5589 " pending transaction", w);
5590 list_for_each_entry(w, &proc->delivered_death, entry) {
5591 seq_puts(m, " has delivered dead binder\n");
5592 break;
5593 }
5594 binder_inner_proc_unlock(proc);
5595 if (!print_all && m->count == header_pos)
5596 m->count = start_pos;
5597 }
5598
5599 static const char * const binder_return_strings[] = {
5600 "BR_ERROR",
5601 "BR_OK",
5602 "BR_TRANSACTION",
5603 "BR_REPLY",
5604 "BR_ACQUIRE_RESULT",
5605 "BR_DEAD_REPLY",
5606 "BR_TRANSACTION_COMPLETE",
5607 "BR_INCREFS",
5608 "BR_ACQUIRE",
5609 "BR_RELEASE",
5610 "BR_DECREFS",
5611 "BR_ATTEMPT_ACQUIRE",
5612 "BR_NOOP",
5613 "BR_SPAWN_LOOPER",
5614 "BR_FINISHED",
5615 "BR_DEAD_BINDER",
5616 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5617 "BR_FAILED_REPLY"
5618 };
5619
5620 static const char * const binder_command_strings[] = {
5621 "BC_TRANSACTION",
5622 "BC_REPLY",
5623 "BC_ACQUIRE_RESULT",
5624 "BC_FREE_BUFFER",
5625 "BC_INCREFS",
5626 "BC_ACQUIRE",
5627 "BC_RELEASE",
5628 "BC_DECREFS",
5629 "BC_INCREFS_DONE",
5630 "BC_ACQUIRE_DONE",
5631 "BC_ATTEMPT_ACQUIRE",
5632 "BC_REGISTER_LOOPER",
5633 "BC_ENTER_LOOPER",
5634 "BC_EXIT_LOOPER",
5635 "BC_REQUEST_DEATH_NOTIFICATION",
5636 "BC_CLEAR_DEATH_NOTIFICATION",
5637 "BC_DEAD_BINDER_DONE",
5638 "BC_TRANSACTION_SG",
5639 "BC_REPLY_SG",
5640 };
5641
5642 static const char * const binder_objstat_strings[] = {
5643 "proc",
5644 "thread",
5645 "node",
5646 "ref",
5647 "death",
5648 "transaction",
5649 "transaction_complete"
5650 };
5651
5652 static void print_binder_stats(struct seq_file *m, const char *prefix,
5653 struct binder_stats *stats)
5654 {
5655 int i;
5656
5657 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5658 ARRAY_SIZE(binder_command_strings));
5659 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5660 int temp = atomic_read(&stats->bc[i]);
5661
5662 if (temp)
5663 seq_printf(m, "%s%s: %d\n", prefix,
5664 binder_command_strings[i], temp);
5665 }
5666
5667 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5668 ARRAY_SIZE(binder_return_strings));
5669 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5670 int temp = atomic_read(&stats->br[i]);
5671
5672 if (temp)
5673 seq_printf(m, "%s%s: %d\n", prefix,
5674 binder_return_strings[i], temp);
5675 }
5676
5677 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5678 ARRAY_SIZE(binder_objstat_strings));
5679 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5680 ARRAY_SIZE(stats->obj_deleted));
5681 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5682 int created = atomic_read(&stats->obj_created[i]);
5683 int deleted = atomic_read(&stats->obj_deleted[i]);
5684
5685 if (created || deleted)
5686 seq_printf(m, "%s%s: active %d total %d\n",
5687 prefix,
5688 binder_objstat_strings[i],
5689 created - deleted,
5690 created);
5691 }
5692 }
5693
5694 static void print_binder_proc_stats(struct seq_file *m,
5695 struct binder_proc *proc)
5696 {
5697 struct binder_work *w;
5698 struct binder_thread *thread;
5699 struct rb_node *n;
5700 int count, strong, weak, ready_threads;
5701 size_t free_async_space =
5702 binder_alloc_get_free_async_space(&proc->alloc);
5703
5704 seq_printf(m, "proc %d %s\n", proc->pid, proc->tsk->comm);
5705 seq_printf(m, "context %s\n", proc->context->name);
5706 count = 0;
5707 ready_threads = 0;
5708 binder_inner_proc_lock(proc);
5709 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5710 count++;
5711
5712 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5713 ready_threads++;
5714
5715 seq_printf(m, " threads: %d\n", count);
5716 seq_printf(m, " requested threads: %d+%d/%d\n"
5717 " ready threads %d\n"
5718 " free async space %zd\n", proc->requested_threads,
5719 proc->requested_threads_started, proc->max_threads,
5720 ready_threads,
5721 free_async_space);
5722 count = 0;
5723 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5724 count++;
5725 binder_inner_proc_unlock(proc);
5726 seq_printf(m, " nodes: %d\n", count);
5727 count = 0;
5728 strong = 0;
5729 weak = 0;
5730 binder_proc_lock(proc);
5731 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5732 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5733 rb_node_desc);
5734 count++;
5735 strong += ref->data.strong;
5736 weak += ref->data.weak;
5737 }
5738 binder_proc_unlock(proc);
5739 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5740
5741 count = binder_alloc_get_allocated_count(&proc->alloc);
5742 seq_printf(m, " buffers: %d\n", count);
5743
5744 binder_alloc_print_pages(m, &proc->alloc);
5745
5746 count = 0;
5747 binder_inner_proc_lock(proc);
5748 list_for_each_entry(w, &proc->todo, entry) {
5749 if (w->type == BINDER_WORK_TRANSACTION)
5750 count++;
5751 }
5752 binder_inner_proc_unlock(proc);
5753 seq_printf(m, " pending transactions: %d\n", count);
5754
5755 print_binder_stats(m, " ", &proc->stats);
5756 }
5757
5758
5759 static int binder_state_show(struct seq_file *m, void *unused)
5760 {
5761 struct binder_proc *proc;
5762 struct binder_node *node;
5763 struct binder_node *last_node = NULL;
5764
5765 seq_puts(m, "binder state:\n");
5766
5767 spin_lock(&binder_dead_nodes_lock);
5768 if (!hlist_empty(&binder_dead_nodes))
5769 seq_puts(m, "dead nodes:\n");
5770 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5771 /*
5772 * take a temporary reference on the node so it
5773 * survives and isn't removed from the list
5774 * while we print it.
5775 */
5776 node->tmp_refs++;
5777 spin_unlock(&binder_dead_nodes_lock);
5778 if (last_node)
5779 binder_put_node(last_node);
5780 binder_node_lock(node);
5781 print_binder_node_nilocked(m, node);
5782 binder_node_unlock(node);
5783 last_node = node;
5784 spin_lock(&binder_dead_nodes_lock);
5785 }
5786 spin_unlock(&binder_dead_nodes_lock);
5787 if (last_node)
5788 binder_put_node(last_node);
5789
5790 mutex_lock(&binder_procs_lock);
5791 hlist_for_each_entry(proc, &binder_procs, proc_node)
5792 print_binder_proc(m, proc, 1);
5793 mutex_unlock(&binder_procs_lock);
5794
5795 return 0;
5796 }
5797
5798 static int binder_stats_show(struct seq_file *m, void *unused)
5799 {
5800 struct binder_proc *proc;
5801
5802 seq_puts(m, "binder stats:\n");
5803
5804 print_binder_stats(m, "", &binder_stats);
5805
5806 mutex_lock(&binder_procs_lock);
5807 hlist_for_each_entry(proc, &binder_procs, proc_node)
5808 print_binder_proc_stats(m, proc);
5809 mutex_unlock(&binder_procs_lock);
5810
5811 return 0;
5812 }
5813
5814 static int binder_transactions_show(struct seq_file *m, void *unused)
5815 {
5816 struct binder_proc *proc;
5817
5818 seq_puts(m, "binder transactions:\n");
5819 mutex_lock(&binder_procs_lock);
5820 hlist_for_each_entry(proc, &binder_procs, proc_node)
5821 print_binder_proc(m, proc, 0);
5822 mutex_unlock(&binder_procs_lock);
5823
5824 return 0;
5825 }
5826
5827 static int binder_proc_show(struct seq_file *m, void *unused)
5828 {
5829 struct binder_proc *itr;
5830 int pid = (unsigned long)m->private;
5831
5832 mutex_lock(&binder_procs_lock);
5833 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5834 if (itr->pid == pid) {
5835 seq_puts(m, "binder proc state:\n");
5836 print_binder_proc(m, itr, 1);
5837 }
5838 }
5839 mutex_unlock(&binder_procs_lock);
5840
5841 return 0;
5842 }
5843
5844 static void print_binder_transaction_log_entry(struct seq_file *m,
5845 struct binder_transaction_log_entry *e)
5846 {
5847 int debug_id = READ_ONCE(e->debug_id_done);
5848 /*
5849 * read barrier to guarantee debug_id_done read before
5850 * we print the log values
5851 */
5852 smp_rmb();
5853 seq_printf(m,
5854 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5855 e->debug_id, (e->call_type == 2) ? "reply" :
5856 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5857 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5858 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5859 e->return_error, e->return_error_param,
5860 e->return_error_line);
5861 /*
5862 * read-barrier to guarantee read of debug_id_done after
5863 * done printing the fields of the entry
5864 */
5865 smp_rmb();
5866 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5867 "\n" : " (incomplete)\n");
5868 }
5869
5870 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5871 {
5872 struct binder_transaction_log *log = m->private;
5873 unsigned int log_cur = atomic_read(&log->cur);
5874 unsigned int count;
5875 unsigned int cur;
5876 int i;
5877
5878 count = log_cur + 1;
5879 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5880 0 : count % ARRAY_SIZE(log->entry);
5881 if (count > ARRAY_SIZE(log->entry) || log->full)
5882 count = ARRAY_SIZE(log->entry);
5883 for (i = 0; i < count; i++) {
5884 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5885
5886 print_binder_transaction_log_entry(m, &log->entry[index]);
5887 }
5888 return 0;
5889 }
5890
5891 static const struct file_operations binder_fops = {
5892 .owner = THIS_MODULE,
5893 .poll = binder_poll,
5894 .unlocked_ioctl = binder_ioctl,
5895 .compat_ioctl = binder_ioctl,
5896 .mmap = binder_mmap,
5897 .open = binder_open,
5898 .flush = binder_flush,
5899 .release = binder_release,
5900 };
5901
5902 BINDER_DEBUG_ENTRY(state);
5903 BINDER_DEBUG_ENTRY(stats);
5904 BINDER_DEBUG_ENTRY(transactions);
5905 BINDER_DEBUG_ENTRY(transaction_log);
5906
5907 static int __init init_binder_device(const char *name)
5908 {
5909 int ret;
5910 struct binder_device *binder_device;
5911
5912 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5913 if (!binder_device)
5914 return -ENOMEM;
5915
5916 binder_device->miscdev.fops = &binder_fops;
5917 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5918 binder_device->miscdev.name = name;
5919
5920 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5921 binder_device->context.name = name;
5922 mutex_init(&binder_device->context.context_mgr_node_lock);
5923
5924 ret = misc_register(&binder_device->miscdev);
5925 if (ret < 0) {
5926 kfree(binder_device);
5927 return ret;
5928 }
5929
5930 hlist_add_head(&binder_device->hlist, &binder_devices);
5931
5932 return ret;
5933 }
5934
5935 static int __init binder_init(void)
5936 {
5937 int ret;
5938 char *device_name, *device_names, *device_tmp;
5939 struct binder_device *device;
5940 struct hlist_node *tmp;
5941
5942 binder_alloc_shrinker_init();
5943
5944 atomic_set(&binder_transaction_log.cur, ~0U);
5945 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5946
5947 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5948 if (binder_debugfs_dir_entry_root)
5949 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5950 binder_debugfs_dir_entry_root);
5951
5952 if (binder_debugfs_dir_entry_root) {
5953 debugfs_create_file("state",
5954 S_IRUGO,
5955 binder_debugfs_dir_entry_root,
5956 NULL,
5957 &binder_state_fops);
5958 debugfs_create_file("stats",
5959 S_IRUGO,
5960 binder_debugfs_dir_entry_root,
5961 NULL,
5962 &binder_stats_fops);
5963 debugfs_create_file("transactions",
5964 S_IRUGO,
5965 binder_debugfs_dir_entry_root,
5966 NULL,
5967 &binder_transactions_fops);
5968 debugfs_create_file("transaction_log",
5969 S_IRUGO,
5970 binder_debugfs_dir_entry_root,
5971 &binder_transaction_log,
5972 &binder_transaction_log_fops);
5973 debugfs_create_file("failed_transaction_log",
5974 S_IRUGO,
5975 binder_debugfs_dir_entry_root,
5976 &binder_transaction_log_failed,
5977 &binder_transaction_log_fops);
5978 }
5979
5980 /*
5981 * Copy the module_parameter string, because we don't want to
5982 * tokenize it in-place.
5983 */
5984 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5985 if (!device_names) {
5986 ret = -ENOMEM;
5987 goto err_alloc_device_names_failed;
5988 }
5989 strcpy(device_names, binder_devices_param);
5990
5991 device_tmp = device_names;
5992 while ((device_name = strsep(&device_tmp, ","))) {
5993 ret = init_binder_device(device_name);
5994 if (ret)
5995 goto err_init_binder_device_failed;
5996 }
5997
5998 return ret;
5999
6000 err_init_binder_device_failed:
6001 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6002 misc_deregister(&device->miscdev);
6003 hlist_del(&device->hlist);
6004 kfree(device);
6005 }
6006
6007 kfree(device_names);
6008
6009 err_alloc_device_names_failed:
6010 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6011
6012 return ret;
6013 }
6014
6015 device_initcall(binder_init);
6016
6017 #define CREATE_TRACE_POINTS
6018 #include "binder_trace.h"
6019
6020 MODULE_LICENSE("GPL v2");