ba8872fc9211d61e677f27ef34904786c587bbee
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / android / binder.c
1 /* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 /*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
58 #include <linux/fs.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched.h>
68 #include <linux/seq_file.h>
69 #include <linux/uaccess.h>
70 #include <linux/pid_namespace.h>
71 #include <linux/security.h>
72 #include <linux/spinlock.h>
73
74 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75 #define BINDER_IPC_32BIT 1
76 #endif
77
78 #include <uapi/linux/android/binder.h>
79 #include "binder_alloc.h"
80 #include "binder_trace.h"
81 #ifdef CONFIG_SAMSUNG_FREECESS
82 #include <linux/freecess.h>
83 #endif
84
85 static HLIST_HEAD(binder_deferred_list);
86 static DEFINE_MUTEX(binder_deferred_lock);
87
88 static HLIST_HEAD(binder_devices);
89 static HLIST_HEAD(binder_procs);
90 static DEFINE_MUTEX(binder_procs_lock);
91
92 static HLIST_HEAD(binder_dead_nodes);
93 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
94
95 static struct dentry *binder_debugfs_dir_entry_root;
96 static struct dentry *binder_debugfs_dir_entry_proc;
97 static atomic_t binder_last_id;
98
99 #define BINDER_DEBUG_ENTRY(name) \
100 static int binder_##name##_open(struct inode *inode, struct file *file) \
101 { \
102 return single_open(file, binder_##name##_show, inode->i_private); \
103 } \
104 \
105 static const struct file_operations binder_##name##_fops = { \
106 .owner = THIS_MODULE, \
107 .open = binder_##name##_open, \
108 .read = seq_read, \
109 .llseek = seq_lseek, \
110 .release = single_release, \
111 }
112
113 static int binder_proc_show(struct seq_file *m, void *unused);
114 BINDER_DEBUG_ENTRY(proc);
115
116 /* This is only defined in include/asm-arm/sizes.h */
117 #ifndef SZ_1K
118 #define SZ_1K 0x400
119 #endif
120
121 #ifndef SZ_4M
122 #define SZ_4M 0x400000
123 #endif
124
125 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
126
127 #define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
128
129 enum {
130 BINDER_DEBUG_USER_ERROR = 1U << 0,
131 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
132 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
133 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
134 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
135 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
136 BINDER_DEBUG_READ_WRITE = 1U << 6,
137 BINDER_DEBUG_USER_REFS = 1U << 7,
138 BINDER_DEBUG_THREADS = 1U << 8,
139 BINDER_DEBUG_TRANSACTION = 1U << 9,
140 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
141 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
142 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
143 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
144 BINDER_DEBUG_SPINLOCKS = 1U << 14,
145 };
146 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
147 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
148 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
149
150 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
151 module_param_named(devices, binder_devices_param, charp, S_IRUGO);
152
153 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
154 static int binder_stop_on_user_error;
155
156 static int binder_set_stop_on_user_error(const char *val,
157 struct kernel_param *kp)
158 {
159 int ret;
160
161 ret = param_set_int(val, kp);
162 if (binder_stop_on_user_error < 2)
163 wake_up(&binder_user_error_wait);
164 return ret;
165 }
166 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
167 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
168
169 #define binder_debug(mask, x...) \
170 do { \
171 if (binder_debug_mask & mask) \
172 pr_info(x); \
173 } while (0)
174
175 #define binder_user_error(x...) \
176 do { \
177 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
178 pr_info(x); \
179 if (binder_stop_on_user_error) \
180 binder_stop_on_user_error = 2; \
181 } while (0)
182
183 #define to_flat_binder_object(hdr) \
184 container_of(hdr, struct flat_binder_object, hdr)
185
186 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
187
188 #define to_binder_buffer_object(hdr) \
189 container_of(hdr, struct binder_buffer_object, hdr)
190
191 #define to_binder_fd_array_object(hdr) \
192 container_of(hdr, struct binder_fd_array_object, hdr)
193
194 enum binder_stat_types {
195 BINDER_STAT_PROC,
196 BINDER_STAT_THREAD,
197 BINDER_STAT_NODE,
198 BINDER_STAT_REF,
199 BINDER_STAT_DEATH,
200 BINDER_STAT_TRANSACTION,
201 BINDER_STAT_TRANSACTION_COMPLETE,
202 BINDER_STAT_COUNT
203 };
204
205 struct binder_stats {
206 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
207 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
208 atomic_t obj_created[BINDER_STAT_COUNT];
209 atomic_t obj_deleted[BINDER_STAT_COUNT];
210 };
211
212 static struct binder_stats binder_stats;
213
214 static inline void binder_stats_deleted(enum binder_stat_types type)
215 {
216 atomic_inc(&binder_stats.obj_deleted[type]);
217 }
218
219 static inline void binder_stats_created(enum binder_stat_types type)
220 {
221 atomic_inc(&binder_stats.obj_created[type]);
222 }
223
224 struct binder_transaction_log_entry {
225 int debug_id;
226 int debug_id_done;
227 int call_type;
228 int from_proc;
229 int from_thread;
230 int target_handle;
231 int to_proc;
232 int to_thread;
233 int to_node;
234 int data_size;
235 int offsets_size;
236 int return_error_line;
237 uint32_t return_error;
238 uint32_t return_error_param;
239 const char *context_name;
240 };
241 struct binder_transaction_log {
242 atomic_t cur;
243 bool full;
244 struct binder_transaction_log_entry entry[32];
245 };
246 static struct binder_transaction_log binder_transaction_log;
247 static struct binder_transaction_log binder_transaction_log_failed;
248
249 static struct binder_transaction_log_entry *binder_transaction_log_add(
250 struct binder_transaction_log *log)
251 {
252 struct binder_transaction_log_entry *e;
253 unsigned int cur = atomic_inc_return(&log->cur);
254
255 if (cur >= ARRAY_SIZE(log->entry))
256 log->full = 1;
257 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
258 WRITE_ONCE(e->debug_id_done, 0);
259 /*
260 * write-barrier to synchronize access to e->debug_id_done.
261 * We make sure the initialized 0 value is seen before
262 * memset() other fields are zeroed by memset.
263 */
264 smp_wmb();
265 memset(e, 0, sizeof(*e));
266 return e;
267 }
268
269 struct binder_context {
270 struct binder_node *binder_context_mgr_node;
271 struct mutex context_mgr_node_lock;
272
273 kuid_t binder_context_mgr_uid;
274 const char *name;
275 };
276
277 struct binder_device {
278 struct hlist_node hlist;
279 struct miscdevice miscdev;
280 struct binder_context context;
281 };
282
283 /**
284 * struct binder_work - work enqueued on a worklist
285 * @entry: node enqueued on list
286 * @type: type of work to be performed
287 *
288 * There are separate work lists for proc, thread, and node (async).
289 */
290 struct binder_work {
291 struct list_head entry;
292
293 enum {
294 BINDER_WORK_TRANSACTION = 1,
295 BINDER_WORK_TRANSACTION_COMPLETE,
296 BINDER_WORK_RETURN_ERROR,
297 BINDER_WORK_NODE,
298 BINDER_WORK_DEAD_BINDER,
299 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
300 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
301 } type;
302 };
303
304 struct binder_error {
305 struct binder_work work;
306 uint32_t cmd;
307 };
308
309 /**
310 * struct binder_node - binder node bookkeeping
311 * @debug_id: unique ID for debugging
312 * (invariant after initialized)
313 * @lock: lock for node fields
314 * @work: worklist element for node work
315 * (protected by @proc->inner_lock)
316 * @rb_node: element for proc->nodes tree
317 * (protected by @proc->inner_lock)
318 * @dead_node: element for binder_dead_nodes list
319 * (protected by binder_dead_nodes_lock)
320 * @proc: binder_proc that owns this node
321 * (invariant after initialized)
322 * @refs: list of references on this node
323 * (protected by @lock)
324 * @internal_strong_refs: used to take strong references when
325 * initiating a transaction
326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
328 * @local_weak_refs: weak user refs from local process
329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
331 * @local_strong_refs: strong user refs from local process
332 * (protected by @proc->inner_lock if @proc
333 * and by @lock)
334 * @tmp_refs: temporary kernel refs
335 * (protected by @proc->inner_lock while @proc
336 * is valid, and by binder_dead_nodes_lock
337 * if @proc is NULL. During inc/dec and node release
338 * it is also protected by @lock to provide safety
339 * as the node dies and @proc becomes NULL)
340 * @ptr: userspace pointer for node
341 * (invariant, no lock needed)
342 * @cookie: userspace cookie for node
343 * (invariant, no lock needed)
344 * @has_strong_ref: userspace notified of strong ref
345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
347 * @pending_strong_ref: userspace has acked notification of strong ref
348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
350 * @has_weak_ref: userspace notified of weak ref
351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
353 * @pending_weak_ref: userspace has acked notification of weak ref
354 * (protected by @proc->inner_lock if @proc
355 * and by @lock)
356 * @has_async_transaction: async transaction to node in progress
357 * (protected by @lock)
358 * @sched_policy: minimum scheduling policy for node
359 * (invariant after initialized)
360 * @accept_fds: file descriptor operations supported for node
361 * (invariant after initialized)
362 * @min_priority: minimum scheduling priority
363 * (invariant after initialized)
364 * @txn_security_ctx: require sender's security context
365 (invariant after initialized)
366
367 * @inherit_rt: inherit RT scheduling policy from caller
368 * (invariant after initialized)
369 * @async_todo: list of async work items
370 * (protected by @proc->inner_lock)
371 *
372 * Bookkeeping structure for binder nodes.
373 */
374 struct binder_node {
375 int debug_id;
376 spinlock_t lock;
377 struct binder_work work;
378 union {
379 struct rb_node rb_node;
380 struct hlist_node dead_node;
381 };
382 struct binder_proc *proc;
383 struct hlist_head refs;
384 int internal_strong_refs;
385 int local_weak_refs;
386 int local_strong_refs;
387 int tmp_refs;
388 binder_uintptr_t ptr;
389 binder_uintptr_t cookie;
390 struct {
391 /*
392 * bitfield elements protected by
393 * proc inner_lock
394 */
395 u8 has_strong_ref:1;
396 u8 pending_strong_ref:1;
397 u8 has_weak_ref:1;
398 u8 pending_weak_ref:1;
399 };
400 struct {
401 /*
402 * invariant after initialization
403 */
404 u8 sched_policy:2;
405 u8 inherit_rt:1;
406 u8 accept_fds:1;
407 u8 txn_security_ctx:1;
408 u8 min_priority;
409 };
410 bool has_async_transaction;
411 struct list_head async_todo;
412 };
413
414 struct binder_ref_death {
415 /**
416 * @work: worklist element for death notifications
417 * (protected by inner_lock of the proc that
418 * this ref belongs to)
419 */
420 struct binder_work work;
421 binder_uintptr_t cookie;
422 };
423
424 /**
425 * struct binder_ref_data - binder_ref counts and id
426 * @debug_id: unique ID for the ref
427 * @desc: unique userspace handle for ref
428 * @strong: strong ref count (debugging only if not locked)
429 * @weak: weak ref count (debugging only if not locked)
430 *
431 * Structure to hold ref count and ref id information. Since
432 * the actual ref can only be accessed with a lock, this structure
433 * is used to return information about the ref to callers of
434 * ref inc/dec functions.
435 */
436 struct binder_ref_data {
437 int debug_id;
438 uint32_t desc;
439 int strong;
440 int weak;
441 };
442
443 /**
444 * struct binder_ref - struct to track references on nodes
445 * @data: binder_ref_data containing id, handle, and current refcounts
446 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
447 * @rb_node_node: node for lookup by @node in proc's rb_tree
448 * @node_entry: list entry for node->refs list in target node
449 * (protected by @node->lock)
450 * @proc: binder_proc containing ref
451 * @node: binder_node of target node. When cleaning up a
452 * ref for deletion in binder_cleanup_ref, a non-NULL
453 * @node indicates the node must be freed
454 * @death: pointer to death notification (ref_death) if requested
455 * (protected by @node->lock)
456 *
457 * Structure to track references from procA to target node (on procB). This
458 * structure is unsafe to access without holding @proc->outer_lock.
459 */
460 struct binder_ref {
461 /* Lookups needed: */
462 /* node + proc => ref (transaction) */
463 /* desc + proc => ref (transaction, inc/dec ref) */
464 /* node => refs + procs (proc exit) */
465 struct binder_ref_data data;
466 struct rb_node rb_node_desc;
467 struct rb_node rb_node_node;
468 struct hlist_node node_entry;
469 struct binder_proc *proc;
470 struct binder_node *node;
471 struct binder_ref_death *death;
472 };
473
474 enum binder_deferred_state {
475 BINDER_DEFERRED_FLUSH = 0x01,
476 BINDER_DEFERRED_RELEASE = 0x02,
477 };
478
479 /**
480 * struct binder_priority - scheduler policy and priority
481 * @sched_policy scheduler policy
482 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
483 *
484 * The binder driver supports inheriting the following scheduler policies:
485 * SCHED_NORMAL
486 * SCHED_BATCH
487 * SCHED_FIFO
488 * SCHED_RR
489 */
490 struct binder_priority {
491 unsigned int sched_policy;
492 int prio;
493 };
494
495 /**
496 * struct binder_proc - binder process bookkeeping
497 * @proc_node: element for binder_procs list
498 * @threads: rbtree of binder_threads in this proc
499 * (protected by @inner_lock)
500 * @nodes: rbtree of binder nodes associated with
501 * this proc ordered by node->ptr
502 * (protected by @inner_lock)
503 * @refs_by_desc: rbtree of refs ordered by ref->desc
504 * (protected by @outer_lock)
505 * @refs_by_node: rbtree of refs ordered by ref->node
506 * (protected by @outer_lock)
507 * @waiting_threads: threads currently waiting for proc work
508 * (protected by @inner_lock)
509 * @pid PID of group_leader of process
510 * (invariant after initialized)
511 * @tsk task_struct for group_leader of process
512 * (invariant after initialized)
513 * @deferred_work_node: element for binder_deferred_list
514 * (protected by binder_deferred_lock)
515 * @deferred_work: bitmap of deferred work to perform
516 * (protected by binder_deferred_lock)
517 * @is_dead: process is dead and awaiting free
518 * when outstanding transactions are cleaned up
519 * (protected by @inner_lock)
520 * @todo: list of work for this process
521 * (protected by @inner_lock)
522 * @stats: per-process binder statistics
523 * (atomics, no lock needed)
524 * @delivered_death: list of delivered death notification
525 * (protected by @inner_lock)
526 * @max_threads: cap on number of binder threads
527 * (protected by @inner_lock)
528 * @requested_threads: number of binder threads requested but not
529 * yet started. In current implementation, can
530 * only be 0 or 1.
531 * (protected by @inner_lock)
532 * @requested_threads_started: number binder threads started
533 * (protected by @inner_lock)
534 * @tmp_ref: temporary reference to indicate proc is in use
535 * (protected by @inner_lock)
536 * @default_priority: default scheduler priority
537 * (invariant after initialized)
538 * @debugfs_entry: debugfs node
539 * @alloc: binder allocator bookkeeping
540 * @context: binder_context for this proc
541 * (invariant after initialized)
542 * @inner_lock: can nest under outer_lock and/or node lock
543 * @outer_lock: no nesting under innor or node lock
544 * Lock order: 1) outer, 2) node, 3) inner
545 *
546 * Bookkeeping structure for binder processes
547 */
548 struct binder_proc {
549 struct hlist_node proc_node;
550 struct rb_root threads;
551 struct rb_root nodes;
552 struct rb_root refs_by_desc;
553 struct rb_root refs_by_node;
554 struct list_head waiting_threads;
555 int pid;
556 struct task_struct *tsk;
557 struct hlist_node deferred_work_node;
558 int deferred_work;
559 bool is_dead;
560
561 struct list_head todo;
562 struct binder_stats stats;
563 struct list_head delivered_death;
564 int max_threads;
565 int requested_threads;
566 int requested_threads_started;
567 int tmp_ref;
568 struct binder_priority default_priority;
569 struct dentry *debugfs_entry;
570 struct binder_alloc alloc;
571 struct binder_context *context;
572 spinlock_t inner_lock;
573 spinlock_t outer_lock;
574 };
575
576 enum {
577 BINDER_LOOPER_STATE_REGISTERED = 0x01,
578 BINDER_LOOPER_STATE_ENTERED = 0x02,
579 BINDER_LOOPER_STATE_EXITED = 0x04,
580 BINDER_LOOPER_STATE_INVALID = 0x08,
581 BINDER_LOOPER_STATE_WAITING = 0x10,
582 BINDER_LOOPER_STATE_POLL = 0x20,
583 };
584
585 /**
586 * struct binder_thread - binder thread bookkeeping
587 * @proc: binder process for this thread
588 * (invariant after initialization)
589 * @rb_node: element for proc->threads rbtree
590 * (protected by @proc->inner_lock)
591 * @waiting_thread_node: element for @proc->waiting_threads list
592 * (protected by @proc->inner_lock)
593 * @pid: PID for this thread
594 * (invariant after initialization)
595 * @looper: bitmap of looping state
596 * (only accessed by this thread)
597 * @looper_needs_return: looping thread needs to exit driver
598 * (no lock needed)
599 * @transaction_stack: stack of in-progress transactions for this thread
600 * (protected by @proc->inner_lock)
601 * @todo: list of work to do for this thread
602 * (protected by @proc->inner_lock)
603 * @process_todo: whether work in @todo should be processed
604 * (protected by @proc->inner_lock)
605 * @return_error: transaction errors reported by this thread
606 * (only accessed by this thread)
607 * @reply_error: transaction errors reported by target thread
608 * (protected by @proc->inner_lock)
609 * @wait: wait queue for thread work
610 * @stats: per-thread statistics
611 * (atomics, no lock needed)
612 * @tmp_ref: temporary reference to indicate thread is in use
613 * (atomic since @proc->inner_lock cannot
614 * always be acquired)
615 * @is_dead: thread is dead and awaiting free
616 * when outstanding transactions are cleaned up
617 * (protected by @proc->inner_lock)
618 * @task: struct task_struct for this thread
619 *
620 * Bookkeeping structure for binder threads.
621 */
622 struct binder_thread {
623 struct binder_proc *proc;
624 struct rb_node rb_node;
625 struct list_head waiting_thread_node;
626 int pid;
627 int looper; /* only modified by this thread */
628 bool looper_need_return; /* can be written by other thread */
629 struct binder_transaction *transaction_stack;
630 struct list_head todo;
631 bool process_todo;
632 struct binder_error return_error;
633 struct binder_error reply_error;
634 wait_queue_head_t wait;
635 struct binder_stats stats;
636 atomic_t tmp_ref;
637 bool is_dead;
638 struct task_struct *task;
639 };
640
641 struct binder_transaction {
642 int debug_id;
643 struct binder_work work;
644 struct binder_thread *from;
645 struct binder_transaction *from_parent;
646 struct binder_proc *to_proc;
647 struct binder_thread *to_thread;
648 struct binder_transaction *to_parent;
649 unsigned need_reply:1;
650 /* unsigned is_dead:1; */ /* not used at the moment */
651
652 struct binder_buffer *buffer;
653 unsigned int code;
654 unsigned int flags;
655 struct binder_priority priority;
656 struct binder_priority saved_priority;
657 bool set_priority_called;
658 kuid_t sender_euid;
659 binder_uintptr_t security_ctx;
660 /**
661 * @lock: protects @from, @to_proc, and @to_thread
662 *
663 * @from, @to_proc, and @to_thread can be set to NULL
664 * during thread teardown
665 */
666 spinlock_t lock;
667 };
668
669 /**
670 * binder_proc_lock() - Acquire outer lock for given binder_proc
671 * @proc: struct binder_proc to acquire
672 *
673 * Acquires proc->outer_lock. Used to protect binder_ref
674 * structures associated with the given proc.
675 */
676 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
677 static void
678 _binder_proc_lock(struct binder_proc *proc, int line)
679 {
680 binder_debug(BINDER_DEBUG_SPINLOCKS,
681 "%s: line=%d\n", __func__, line);
682 spin_lock(&proc->outer_lock);
683 }
684
685 /**
686 * binder_proc_unlock() - Release spinlock for given binder_proc
687 * @proc: struct binder_proc to acquire
688 *
689 * Release lock acquired via binder_proc_lock()
690 */
691 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
692 static void
693 _binder_proc_unlock(struct binder_proc *proc, int line)
694 {
695 binder_debug(BINDER_DEBUG_SPINLOCKS,
696 "%s: line=%d\n", __func__, line);
697 spin_unlock(&proc->outer_lock);
698 }
699
700 /**
701 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
702 * @proc: struct binder_proc to acquire
703 *
704 * Acquires proc->inner_lock. Used to protect todo lists
705 */
706 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
707 static void
708 _binder_inner_proc_lock(struct binder_proc *proc, int line)
709 {
710 binder_debug(BINDER_DEBUG_SPINLOCKS,
711 "%s: line=%d\n", __func__, line);
712 spin_lock(&proc->inner_lock);
713 }
714
715 /**
716 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
717 * @proc: struct binder_proc to acquire
718 *
719 * Release lock acquired via binder_inner_proc_lock()
720 */
721 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
722 static void
723 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
724 {
725 binder_debug(BINDER_DEBUG_SPINLOCKS,
726 "%s: line=%d\n", __func__, line);
727 spin_unlock(&proc->inner_lock);
728 }
729
730 /**
731 * binder_node_lock() - Acquire spinlock for given binder_node
732 * @node: struct binder_node to acquire
733 *
734 * Acquires node->lock. Used to protect binder_node fields
735 */
736 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
737 static void
738 _binder_node_lock(struct binder_node *node, int line)
739 {
740 binder_debug(BINDER_DEBUG_SPINLOCKS,
741 "%s: line=%d\n", __func__, line);
742 spin_lock(&node->lock);
743 }
744
745 /**
746 * binder_node_unlock() - Release spinlock for given binder_proc
747 * @node: struct binder_node to acquire
748 *
749 * Release lock acquired via binder_node_lock()
750 */
751 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
752 static void
753 _binder_node_unlock(struct binder_node *node, int line)
754 {
755 binder_debug(BINDER_DEBUG_SPINLOCKS,
756 "%s: line=%d\n", __func__, line);
757 spin_unlock(&node->lock);
758 }
759
760 /**
761 * binder_node_inner_lock() - Acquire node and inner locks
762 * @node: struct binder_node to acquire
763 *
764 * Acquires node->lock. If node->proc also acquires
765 * proc->inner_lock. Used to protect binder_node fields
766 */
767 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
768 static void
769 _binder_node_inner_lock(struct binder_node *node, int line)
770 {
771 binder_debug(BINDER_DEBUG_SPINLOCKS,
772 "%s: line=%d\n", __func__, line);
773 spin_lock(&node->lock);
774 if (node->proc)
775 binder_inner_proc_lock(node->proc);
776 }
777
778 /**
779 * binder_node_unlock() - Release node and inner locks
780 * @node: struct binder_node to acquire
781 *
782 * Release lock acquired via binder_node_lock()
783 */
784 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
785 static void
786 _binder_node_inner_unlock(struct binder_node *node, int line)
787 {
788 struct binder_proc *proc = node->proc;
789
790 binder_debug(BINDER_DEBUG_SPINLOCKS,
791 "%s: line=%d\n", __func__, line);
792 if (proc)
793 binder_inner_proc_unlock(proc);
794 spin_unlock(&node->lock);
795 }
796
797 static bool binder_worklist_empty_ilocked(struct list_head *list)
798 {
799 return list_empty(list);
800 }
801
802 /**
803 * binder_worklist_empty() - Check if no items on the work list
804 * @proc: binder_proc associated with list
805 * @list: list to check
806 *
807 * Return: true if there are no items on list, else false
808 */
809 static bool binder_worklist_empty(struct binder_proc *proc,
810 struct list_head *list)
811 {
812 bool ret;
813
814 binder_inner_proc_lock(proc);
815 ret = binder_worklist_empty_ilocked(list);
816 binder_inner_proc_unlock(proc);
817 return ret;
818 }
819
820 /**
821 * binder_enqueue_work_ilocked() - Add an item to the work list
822 * @work: struct binder_work to add to list
823 * @target_list: list to add work to
824 *
825 * Adds the work to the specified list. Asserts that work
826 * is not already on a list.
827 *
828 * Requires the proc->inner_lock to be held.
829 */
830 static void
831 binder_enqueue_work_ilocked(struct binder_work *work,
832 struct list_head *target_list)
833 {
834 BUG_ON(target_list == NULL);
835 BUG_ON(work->entry.next && !list_empty(&work->entry));
836 list_add_tail(&work->entry, target_list);
837 }
838
839 /**
840 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
841 * @thread: thread to queue work to
842 * @work: struct binder_work to add to list
843 *
844 * Adds the work to the todo list of the thread. Doesn't set the process_todo
845 * flag, which means that (if it wasn't already set) the thread will go to
846 * sleep without handling this work when it calls read.
847 *
848 * Requires the proc->inner_lock to be held.
849 */
850 static void
851 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
852 struct binder_work *work)
853 {
854 binder_enqueue_work_ilocked(work, &thread->todo);
855 }
856
857 /**
858 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
859 * @thread: thread to queue work to
860 * @work: struct binder_work to add to list
861 *
862 * Adds the work to the todo list of the thread, and enables processing
863 * of the todo queue.
864 *
865 * Requires the proc->inner_lock to be held.
866 */
867 static void
868 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
869 struct binder_work *work)
870 {
871 binder_enqueue_work_ilocked(work, &thread->todo);
872 thread->process_todo = true;
873 }
874
875 /**
876 * binder_enqueue_thread_work() - Add an item to the thread work list
877 * @thread: thread to queue work to
878 * @work: struct binder_work to add to list
879 *
880 * Adds the work to the todo list of the thread, and enables processing
881 * of the todo queue.
882 */
883 static void
884 binder_enqueue_thread_work(struct binder_thread *thread,
885 struct binder_work *work)
886 {
887 binder_inner_proc_lock(thread->proc);
888 binder_enqueue_thread_work_ilocked(thread, work);
889 binder_inner_proc_unlock(thread->proc);
890 }
891
892 static void
893 binder_dequeue_work_ilocked(struct binder_work *work)
894 {
895 list_del_init(&work->entry);
896 }
897
898 /**
899 * binder_dequeue_work() - Removes an item from the work list
900 * @proc: binder_proc associated with list
901 * @work: struct binder_work to remove from list
902 *
903 * Removes the specified work item from whatever list it is on.
904 * Can safely be called if work is not on any list.
905 */
906 static void
907 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
908 {
909 binder_inner_proc_lock(proc);
910 binder_dequeue_work_ilocked(work);
911 binder_inner_proc_unlock(proc);
912 }
913
914 static struct binder_work *binder_dequeue_work_head_ilocked(
915 struct list_head *list)
916 {
917 struct binder_work *w;
918
919 w = list_first_entry_or_null(list, struct binder_work, entry);
920 if (w)
921 list_del_init(&w->entry);
922 return w;
923 }
924
925 /**
926 * binder_dequeue_work_head() - Dequeues the item at head of list
927 * @proc: binder_proc associated with list
928 * @list: list to dequeue head
929 *
930 * Removes the head of the list if there are items on the list
931 *
932 * Return: pointer dequeued binder_work, NULL if list was empty
933 */
934 static struct binder_work *binder_dequeue_work_head(
935 struct binder_proc *proc,
936 struct list_head *list)
937 {
938 struct binder_work *w;
939
940 binder_inner_proc_lock(proc);
941 w = binder_dequeue_work_head_ilocked(list);
942 binder_inner_proc_unlock(proc);
943 return w;
944 }
945
946 static void
947 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
948 static void binder_free_thread(struct binder_thread *thread);
949 static void binder_free_proc(struct binder_proc *proc);
950 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
951
952 struct files_struct *binder_get_files_struct(struct binder_proc *proc)
953 {
954 return get_files_struct(proc->tsk);
955 }
956
957 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
958 {
959 struct files_struct *files;
960 unsigned long rlim_cur;
961 unsigned long irqs;
962 int ret;
963
964 files = binder_get_files_struct(proc);
965 if (files == NULL)
966 return -ESRCH;
967
968 if (!lock_task_sighand(proc->tsk, &irqs)) {
969 ret = -EMFILE;
970 goto err;
971 }
972
973 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
974 unlock_task_sighand(proc->tsk, &irqs);
975
976 ret = __alloc_fd(files, 0, rlim_cur, flags);
977 err:
978 put_files_struct(files);
979 return ret;
980 }
981
982 /*
983 * copied from fd_install
984 */
985 static void task_fd_install(
986 struct binder_proc *proc, unsigned int fd, struct file *file)
987 {
988 struct files_struct *files = binder_get_files_struct(proc);
989
990 if (files) {
991 __fd_install(files, fd, file);
992 put_files_struct(files);
993 }
994 }
995
996 /*
997 * copied from sys_close
998 */
999 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
1000 {
1001 struct files_struct *files = binder_get_files_struct(proc);
1002 int retval;
1003
1004 if (files == NULL)
1005 return -ESRCH;
1006
1007 retval = __close_fd(files, fd);
1008 /* can't restart close syscall because file table entry was cleared */
1009 if (unlikely(retval == -ERESTARTSYS ||
1010 retval == -ERESTARTNOINTR ||
1011 retval == -ERESTARTNOHAND ||
1012 retval == -ERESTART_RESTARTBLOCK))
1013 retval = -EINTR;
1014 put_files_struct(files);
1015
1016 return retval;
1017 }
1018
1019 static bool binder_has_work_ilocked(struct binder_thread *thread,
1020 bool do_proc_work)
1021 {
1022 return thread->process_todo ||
1023 thread->looper_need_return ||
1024 (do_proc_work &&
1025 !binder_worklist_empty_ilocked(&thread->proc->todo));
1026 }
1027
1028 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1029 {
1030 bool has_work;
1031
1032 binder_inner_proc_lock(thread->proc);
1033 has_work = binder_has_work_ilocked(thread, do_proc_work);
1034 binder_inner_proc_unlock(thread->proc);
1035
1036 return has_work;
1037 }
1038
1039 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1040 {
1041 return !thread->transaction_stack &&
1042 binder_worklist_empty_ilocked(&thread->todo) &&
1043 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1044 BINDER_LOOPER_STATE_REGISTERED));
1045 }
1046
1047 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1048 bool sync)
1049 {
1050 struct rb_node *n;
1051 struct binder_thread *thread;
1052
1053 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1054 thread = rb_entry(n, struct binder_thread, rb_node);
1055 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1056 binder_available_for_proc_work_ilocked(thread)) {
1057 if (sync)
1058 wake_up_interruptible_sync(&thread->wait);
1059 else
1060 wake_up_interruptible(&thread->wait);
1061 }
1062 }
1063 }
1064
1065 /**
1066 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1067 * @proc: process to select a thread from
1068 *
1069 * Note that calling this function moves the thread off the waiting_threads
1070 * list, so it can only be woken up by the caller of this function, or a
1071 * signal. Therefore, callers *should* always wake up the thread this function
1072 * returns.
1073 *
1074 * Return: If there's a thread currently waiting for process work,
1075 * returns that thread. Otherwise returns NULL.
1076 */
1077 static struct binder_thread *
1078 binder_select_thread_ilocked(struct binder_proc *proc)
1079 {
1080 struct binder_thread *thread;
1081
1082 assert_spin_locked(&proc->inner_lock);
1083 thread = list_first_entry_or_null(&proc->waiting_threads,
1084 struct binder_thread,
1085 waiting_thread_node);
1086
1087 if (thread)
1088 list_del_init(&thread->waiting_thread_node);
1089
1090 return thread;
1091 }
1092
1093 /**
1094 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1095 * @proc: process to wake up a thread in
1096 * @thread: specific thread to wake-up (may be NULL)
1097 * @sync: whether to do a synchronous wake-up
1098 *
1099 * This function wakes up a thread in the @proc process.
1100 * The caller may provide a specific thread to wake-up in
1101 * the @thread parameter. If @thread is NULL, this function
1102 * will wake up threads that have called poll().
1103 *
1104 * Note that for this function to work as expected, callers
1105 * should first call binder_select_thread() to find a thread
1106 * to handle the work (if they don't have a thread already),
1107 * and pass the result into the @thread parameter.
1108 */
1109 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1110 struct binder_thread *thread,
1111 bool sync)
1112 {
1113 assert_spin_locked(&proc->inner_lock);
1114
1115 if (thread) {
1116 if (sync)
1117 wake_up_interruptible_sync(&thread->wait);
1118 else
1119 wake_up_interruptible(&thread->wait);
1120 return;
1121 }
1122
1123 /* Didn't find a thread waiting for proc work; this can happen
1124 * in two scenarios:
1125 * 1. All threads are busy handling transactions
1126 * In that case, one of those threads should call back into
1127 * the kernel driver soon and pick up this work.
1128 * 2. Threads are using the (e)poll interface, in which case
1129 * they may be blocked on the waitqueue without having been
1130 * added to waiting_threads. For this case, we just iterate
1131 * over all threads not handling transaction work, and
1132 * wake them all up. We wake all because we don't know whether
1133 * a thread that called into (e)poll is handling non-binder
1134 * work currently.
1135 */
1136 binder_wakeup_poll_threads_ilocked(proc, sync);
1137 }
1138
1139 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1140 {
1141 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1142
1143 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1144 }
1145
1146 static bool is_rt_policy(int policy)
1147 {
1148 return policy == SCHED_FIFO || policy == SCHED_RR;
1149 }
1150
1151 static bool is_fair_policy(int policy)
1152 {
1153 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1154 }
1155
1156 static bool binder_supported_policy(int policy)
1157 {
1158 return is_fair_policy(policy) || is_rt_policy(policy);
1159 }
1160
1161 static int to_userspace_prio(int policy, int kernel_priority)
1162 {
1163 if (is_fair_policy(policy))
1164 return PRIO_TO_NICE(kernel_priority);
1165 else
1166 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1167 }
1168
1169 static int to_kernel_prio(int policy, int user_priority)
1170 {
1171 if (is_fair_policy(policy))
1172 return NICE_TO_PRIO(user_priority);
1173 else
1174 return MAX_USER_RT_PRIO - 1 - user_priority;
1175 }
1176
1177 static void binder_do_set_priority(struct task_struct *task,
1178 struct binder_priority desired,
1179 bool verify)
1180 {
1181 int priority; /* user-space prio value */
1182 bool has_cap_nice;
1183 unsigned int policy = desired.sched_policy;
1184
1185 if (task->policy == policy && task->normal_prio == desired.prio)
1186 return;
1187
1188 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1189
1190 priority = to_userspace_prio(policy, desired.prio);
1191
1192 if (verify && is_rt_policy(policy) && !has_cap_nice) {
1193 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1194
1195 if (max_rtprio == 0) {
1196 policy = SCHED_NORMAL;
1197 priority = MIN_NICE;
1198 } else if (priority > max_rtprio) {
1199 priority = max_rtprio;
1200 }
1201 }
1202
1203 if (verify && is_fair_policy(policy) && !has_cap_nice) {
1204 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1205
1206 if (min_nice > MAX_NICE) {
1207 binder_user_error("%d RLIMIT_NICE not set\n",
1208 task->pid);
1209 return;
1210 } else if (priority < min_nice) {
1211 priority = min_nice;
1212 }
1213 }
1214
1215 if (policy != desired.sched_policy ||
1216 to_kernel_prio(policy, priority) != desired.prio)
1217 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1218 "%d: priority %d not allowed, using %d instead\n",
1219 task->pid, desired.prio,
1220 to_kernel_prio(policy, priority));
1221
1222 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1223 to_kernel_prio(policy, priority),
1224 desired.prio);
1225
1226 /* Set the actual priority */
1227 if (task->policy != policy || is_rt_policy(policy)) {
1228 struct sched_param params;
1229
1230 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1231
1232 sched_setscheduler_nocheck(task,
1233 policy | SCHED_RESET_ON_FORK,
1234 &params);
1235 }
1236 if (is_fair_policy(policy))
1237 set_user_nice(task, priority);
1238 }
1239
1240 static void binder_set_priority(struct task_struct *task,
1241 struct binder_priority desired)
1242 {
1243 binder_do_set_priority(task, desired, /* verify = */ true);
1244 }
1245
1246 static void binder_restore_priority(struct task_struct *task,
1247 struct binder_priority desired)
1248 {
1249 binder_do_set_priority(task, desired, /* verify = */ false);
1250 }
1251
1252 static void binder_transaction_priority(struct task_struct *task,
1253 struct binder_transaction *t,
1254 struct binder_priority node_prio,
1255 bool inherit_rt)
1256 {
1257 struct binder_priority desired_prio = t->priority;
1258
1259 if (t->set_priority_called)
1260 return;
1261
1262 t->set_priority_called = true;
1263 t->saved_priority.sched_policy = task->policy;
1264 t->saved_priority.prio = task->normal_prio;
1265
1266 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1267 desired_prio.prio = NICE_TO_PRIO(0);
1268 desired_prio.sched_policy = SCHED_NORMAL;
1269 }
1270
1271 if (node_prio.prio < t->priority.prio ||
1272 (node_prio.prio == t->priority.prio &&
1273 node_prio.sched_policy == SCHED_FIFO)) {
1274 /*
1275 * In case the minimum priority on the node is
1276 * higher (lower value), use that priority. If
1277 * the priority is the same, but the node uses
1278 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1279 * run unbounded, unlike SCHED_RR.
1280 */
1281 desired_prio = node_prio;
1282 }
1283
1284 binder_set_priority(task, desired_prio);
1285 }
1286
1287 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1288 binder_uintptr_t ptr)
1289 {
1290 struct rb_node *n = proc->nodes.rb_node;
1291 struct binder_node *node;
1292
1293 assert_spin_locked(&proc->inner_lock);
1294
1295 while (n) {
1296 node = rb_entry(n, struct binder_node, rb_node);
1297
1298 if (ptr < node->ptr)
1299 n = n->rb_left;
1300 else if (ptr > node->ptr)
1301 n = n->rb_right;
1302 else {
1303 /*
1304 * take an implicit weak reference
1305 * to ensure node stays alive until
1306 * call to binder_put_node()
1307 */
1308 binder_inc_node_tmpref_ilocked(node);
1309 return node;
1310 }
1311 }
1312 return NULL;
1313 }
1314
1315 static struct binder_node *binder_get_node(struct binder_proc *proc,
1316 binder_uintptr_t ptr)
1317 {
1318 struct binder_node *node;
1319
1320 binder_inner_proc_lock(proc);
1321 node = binder_get_node_ilocked(proc, ptr);
1322 binder_inner_proc_unlock(proc);
1323 return node;
1324 }
1325
1326 static struct binder_node *binder_init_node_ilocked(
1327 struct binder_proc *proc,
1328 struct binder_node *new_node,
1329 struct flat_binder_object *fp)
1330 {
1331 struct rb_node **p = &proc->nodes.rb_node;
1332 struct rb_node *parent = NULL;
1333 struct binder_node *node;
1334 binder_uintptr_t ptr = fp ? fp->binder : 0;
1335 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1336 __u32 flags = fp ? fp->flags : 0;
1337 s8 priority;
1338
1339 assert_spin_locked(&proc->inner_lock);
1340
1341 while (*p) {
1342
1343 parent = *p;
1344 node = rb_entry(parent, struct binder_node, rb_node);
1345
1346 if (ptr < node->ptr)
1347 p = &(*p)->rb_left;
1348 else if (ptr > node->ptr)
1349 p = &(*p)->rb_right;
1350 else {
1351 /*
1352 * A matching node is already in
1353 * the rb tree. Abandon the init
1354 * and return it.
1355 */
1356 binder_inc_node_tmpref_ilocked(node);
1357 return node;
1358 }
1359 }
1360 node = new_node;
1361 binder_stats_created(BINDER_STAT_NODE);
1362 node->tmp_refs++;
1363 rb_link_node(&node->rb_node, parent, p);
1364 rb_insert_color(&node->rb_node, &proc->nodes);
1365 node->debug_id = atomic_inc_return(&binder_last_id);
1366 node->proc = proc;
1367 node->ptr = ptr;
1368 node->cookie = cookie;
1369 node->work.type = BINDER_WORK_NODE;
1370 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1371 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
1372 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1373 node->min_priority = to_kernel_prio(node->sched_policy, priority);
1374 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1375 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
1376 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
1377 spin_lock_init(&node->lock);
1378 INIT_LIST_HEAD(&node->work.entry);
1379 INIT_LIST_HEAD(&node->async_todo);
1380 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1381 "%d:%d node %d u%016llx c%016llx created\n",
1382 proc->pid, current->pid, node->debug_id,
1383 (u64)node->ptr, (u64)node->cookie);
1384
1385 return node;
1386 }
1387
1388 static struct binder_node *binder_new_node(struct binder_proc *proc,
1389 struct flat_binder_object *fp)
1390 {
1391 struct binder_node *node;
1392 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1393
1394 if (!new_node)
1395 return NULL;
1396 binder_inner_proc_lock(proc);
1397 node = binder_init_node_ilocked(proc, new_node, fp);
1398 binder_inner_proc_unlock(proc);
1399 if (node != new_node)
1400 /*
1401 * The node was already added by another thread
1402 */
1403 kfree(new_node);
1404
1405 return node;
1406 }
1407
1408 static void binder_free_node(struct binder_node *node)
1409 {
1410 kfree(node);
1411 binder_stats_deleted(BINDER_STAT_NODE);
1412 }
1413
1414 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1415 int internal,
1416 struct list_head *target_list)
1417 {
1418 struct binder_proc *proc = node->proc;
1419
1420 assert_spin_locked(&node->lock);
1421 if (proc)
1422 assert_spin_locked(&proc->inner_lock);
1423 if (strong) {
1424 if (internal) {
1425 if (target_list == NULL &&
1426 node->internal_strong_refs == 0 &&
1427 !(node->proc &&
1428 node == node->proc->context->
1429 binder_context_mgr_node &&
1430 node->has_strong_ref)) {
1431 pr_err("invalid inc strong node for %d\n",
1432 node->debug_id);
1433 return -EINVAL;
1434 }
1435 node->internal_strong_refs++;
1436 } else
1437 node->local_strong_refs++;
1438 if (!node->has_strong_ref && target_list) {
1439 binder_dequeue_work_ilocked(&node->work);
1440 /*
1441 * Note: this function is the only place where we queue
1442 * directly to a thread->todo without using the
1443 * corresponding binder_enqueue_thread_work() helper
1444 * functions; in this case it's ok to not set the
1445 * process_todo flag, since we know this node work will
1446 * always be followed by other work that starts queue
1447 * processing: in case of synchronous transactions, a
1448 * BR_REPLY or BR_ERROR; in case of oneway
1449 * transactions, a BR_TRANSACTION_COMPLETE.
1450 */
1451 binder_enqueue_work_ilocked(&node->work, target_list);
1452 }
1453 } else {
1454 if (!internal)
1455 node->local_weak_refs++;
1456 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1457 if (target_list == NULL) {
1458 pr_err("invalid inc weak node for %d\n",
1459 node->debug_id);
1460 return -EINVAL;
1461 }
1462 /*
1463 * See comment above
1464 */
1465 binder_enqueue_work_ilocked(&node->work, target_list);
1466 }
1467 }
1468 return 0;
1469 }
1470
1471 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1472 struct list_head *target_list)
1473 {
1474 int ret;
1475
1476 binder_node_inner_lock(node);
1477 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1478 binder_node_inner_unlock(node);
1479
1480 return ret;
1481 }
1482
1483 static bool binder_dec_node_nilocked(struct binder_node *node,
1484 int strong, int internal)
1485 {
1486 struct binder_proc *proc = node->proc;
1487
1488 assert_spin_locked(&node->lock);
1489 if (proc)
1490 assert_spin_locked(&proc->inner_lock);
1491 if (strong) {
1492 if (internal)
1493 node->internal_strong_refs--;
1494 else
1495 node->local_strong_refs--;
1496 if (node->local_strong_refs || node->internal_strong_refs)
1497 return false;
1498 } else {
1499 if (!internal)
1500 node->local_weak_refs--;
1501 if (node->local_weak_refs || node->tmp_refs ||
1502 !hlist_empty(&node->refs))
1503 return false;
1504 }
1505
1506 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1507 if (list_empty(&node->work.entry)) {
1508 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1509 binder_wakeup_proc_ilocked(proc);
1510 }
1511 } else {
1512 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1513 !node->local_weak_refs && !node->tmp_refs) {
1514 if (proc) {
1515 binder_dequeue_work_ilocked(&node->work);
1516 rb_erase(&node->rb_node, &proc->nodes);
1517 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1518 "refless node %d deleted\n",
1519 node->debug_id);
1520 } else {
1521 BUG_ON(!list_empty(&node->work.entry));
1522 spin_lock(&binder_dead_nodes_lock);
1523 /*
1524 * tmp_refs could have changed so
1525 * check it again
1526 */
1527 if (node->tmp_refs) {
1528 spin_unlock(&binder_dead_nodes_lock);
1529 return false;
1530 }
1531 hlist_del(&node->dead_node);
1532 spin_unlock(&binder_dead_nodes_lock);
1533 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1534 "dead node %d deleted\n",
1535 node->debug_id);
1536 }
1537 return true;
1538 }
1539 }
1540 return false;
1541 }
1542
1543 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1544 {
1545 bool free_node;
1546
1547 binder_node_inner_lock(node);
1548 free_node = binder_dec_node_nilocked(node, strong, internal);
1549 binder_node_inner_unlock(node);
1550 if (free_node)
1551 binder_free_node(node);
1552 }
1553
1554 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1555 {
1556 /*
1557 * No call to binder_inc_node() is needed since we
1558 * don't need to inform userspace of any changes to
1559 * tmp_refs
1560 */
1561 node->tmp_refs++;
1562 }
1563
1564 /**
1565 * binder_inc_node_tmpref() - take a temporary reference on node
1566 * @node: node to reference
1567 *
1568 * Take reference on node to prevent the node from being freed
1569 * while referenced only by a local variable. The inner lock is
1570 * needed to serialize with the node work on the queue (which
1571 * isn't needed after the node is dead). If the node is dead
1572 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1573 * node->tmp_refs against dead-node-only cases where the node
1574 * lock cannot be acquired (eg traversing the dead node list to
1575 * print nodes)
1576 */
1577 static void binder_inc_node_tmpref(struct binder_node *node)
1578 {
1579 binder_node_lock(node);
1580 if (node->proc)
1581 binder_inner_proc_lock(node->proc);
1582 else
1583 spin_lock(&binder_dead_nodes_lock);
1584 binder_inc_node_tmpref_ilocked(node);
1585 if (node->proc)
1586 binder_inner_proc_unlock(node->proc);
1587 else
1588 spin_unlock(&binder_dead_nodes_lock);
1589 binder_node_unlock(node);
1590 }
1591
1592 /**
1593 * binder_dec_node_tmpref() - remove a temporary reference on node
1594 * @node: node to reference
1595 *
1596 * Release temporary reference on node taken via binder_inc_node_tmpref()
1597 */
1598 static void binder_dec_node_tmpref(struct binder_node *node)
1599 {
1600 bool free_node;
1601
1602 binder_node_inner_lock(node);
1603 if (!node->proc)
1604 spin_lock(&binder_dead_nodes_lock);
1605 node->tmp_refs--;
1606 BUG_ON(node->tmp_refs < 0);
1607 if (!node->proc)
1608 spin_unlock(&binder_dead_nodes_lock);
1609 /*
1610 * Call binder_dec_node() to check if all refcounts are 0
1611 * and cleanup is needed. Calling with strong=0 and internal=1
1612 * causes no actual reference to be released in binder_dec_node().
1613 * If that changes, a change is needed here too.
1614 */
1615 free_node = binder_dec_node_nilocked(node, 0, 1);
1616 binder_node_inner_unlock(node);
1617 if (free_node)
1618 binder_free_node(node);
1619 }
1620
1621 static void binder_put_node(struct binder_node *node)
1622 {
1623 binder_dec_node_tmpref(node);
1624 }
1625
1626 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1627 u32 desc, bool need_strong_ref)
1628 {
1629 struct rb_node *n = proc->refs_by_desc.rb_node;
1630 struct binder_ref *ref;
1631
1632 while (n) {
1633 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1634
1635 if (desc < ref->data.desc) {
1636 n = n->rb_left;
1637 } else if (desc > ref->data.desc) {
1638 n = n->rb_right;
1639 } else if (need_strong_ref && !ref->data.strong) {
1640 binder_user_error("tried to use weak ref as strong ref\n");
1641 return NULL;
1642 } else {
1643 return ref;
1644 }
1645 }
1646 return NULL;
1647 }
1648
1649 /**
1650 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1651 * @proc: binder_proc that owns the ref
1652 * @node: binder_node of target
1653 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1654 *
1655 * Look up the ref for the given node and return it if it exists
1656 *
1657 * If it doesn't exist and the caller provides a newly allocated
1658 * ref, initialize the fields of the newly allocated ref and insert
1659 * into the given proc rb_trees and node refs list.
1660 *
1661 * Return: the ref for node. It is possible that another thread
1662 * allocated/initialized the ref first in which case the
1663 * returned ref would be different than the passed-in
1664 * new_ref. new_ref must be kfree'd by the caller in
1665 * this case.
1666 */
1667 static struct binder_ref *binder_get_ref_for_node_olocked(
1668 struct binder_proc *proc,
1669 struct binder_node *node,
1670 struct binder_ref *new_ref)
1671 {
1672 struct binder_context *context = proc->context;
1673 struct rb_node **p = &proc->refs_by_node.rb_node;
1674 struct rb_node *parent = NULL;
1675 struct binder_ref *ref;
1676 struct rb_node *n;
1677
1678 while (*p) {
1679 parent = *p;
1680 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1681
1682 if (node < ref->node)
1683 p = &(*p)->rb_left;
1684 else if (node > ref->node)
1685 p = &(*p)->rb_right;
1686 else
1687 return ref;
1688 }
1689 if (!new_ref)
1690 return NULL;
1691
1692 binder_stats_created(BINDER_STAT_REF);
1693 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1694 new_ref->proc = proc;
1695 new_ref->node = node;
1696 rb_link_node(&new_ref->rb_node_node, parent, p);
1697 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1698
1699 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1700 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1701 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1702 if (ref->data.desc > new_ref->data.desc)
1703 break;
1704 new_ref->data.desc = ref->data.desc + 1;
1705 }
1706
1707 p = &proc->refs_by_desc.rb_node;
1708 while (*p) {
1709 parent = *p;
1710 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1711
1712 if (new_ref->data.desc < ref->data.desc)
1713 p = &(*p)->rb_left;
1714 else if (new_ref->data.desc > ref->data.desc)
1715 p = &(*p)->rb_right;
1716 else
1717 BUG();
1718 }
1719 rb_link_node(&new_ref->rb_node_desc, parent, p);
1720 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1721
1722 binder_node_lock(node);
1723 hlist_add_head(&new_ref->node_entry, &node->refs);
1724
1725 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1726 "%d new ref %d desc %d for node %d\n",
1727 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1728 node->debug_id);
1729 binder_node_unlock(node);
1730 return new_ref;
1731 }
1732
1733 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1734 {
1735 bool delete_node = false;
1736
1737 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1738 "%d delete ref %d desc %d for node %d\n",
1739 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1740 ref->node->debug_id);
1741
1742 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1743 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1744
1745 binder_node_inner_lock(ref->node);
1746 if (ref->data.strong)
1747 binder_dec_node_nilocked(ref->node, 1, 1);
1748
1749 hlist_del(&ref->node_entry);
1750 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1751 binder_node_inner_unlock(ref->node);
1752 /*
1753 * Clear ref->node unless we want the caller to free the node
1754 */
1755 if (!delete_node) {
1756 /*
1757 * The caller uses ref->node to determine
1758 * whether the node needs to be freed. Clear
1759 * it since the node is still alive.
1760 */
1761 ref->node = NULL;
1762 }
1763
1764 if (ref->death) {
1765 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1766 "%d delete ref %d desc %d has death notification\n",
1767 ref->proc->pid, ref->data.debug_id,
1768 ref->data.desc);
1769 binder_dequeue_work(ref->proc, &ref->death->work);
1770 binder_stats_deleted(BINDER_STAT_DEATH);
1771 }
1772 binder_stats_deleted(BINDER_STAT_REF);
1773 }
1774
1775 /**
1776 * binder_inc_ref_olocked() - increment the ref for given handle
1777 * @ref: ref to be incremented
1778 * @strong: if true, strong increment, else weak
1779 * @target_list: list to queue node work on
1780 *
1781 * Increment the ref. @ref->proc->outer_lock must be held on entry
1782 *
1783 * Return: 0, if successful, else errno
1784 */
1785 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1786 struct list_head *target_list)
1787 {
1788 int ret;
1789
1790 if (strong) {
1791 if (ref->data.strong == 0) {
1792 ret = binder_inc_node(ref->node, 1, 1, target_list);
1793 if (ret)
1794 return ret;
1795 }
1796 ref->data.strong++;
1797 } else {
1798 if (ref->data.weak == 0) {
1799 ret = binder_inc_node(ref->node, 0, 1, target_list);
1800 if (ret)
1801 return ret;
1802 }
1803 ref->data.weak++;
1804 }
1805 return 0;
1806 }
1807
1808 /**
1809 * binder_dec_ref() - dec the ref for given handle
1810 * @ref: ref to be decremented
1811 * @strong: if true, strong decrement, else weak
1812 *
1813 * Decrement the ref.
1814 *
1815 * Return: true if ref is cleaned up and ready to be freed
1816 */
1817 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1818 {
1819 if (strong) {
1820 if (ref->data.strong == 0) {
1821 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1822 ref->proc->pid, ref->data.debug_id,
1823 ref->data.desc, ref->data.strong,
1824 ref->data.weak);
1825 return false;
1826 }
1827 ref->data.strong--;
1828 if (ref->data.strong == 0)
1829 binder_dec_node(ref->node, strong, 1);
1830 } else {
1831 if (ref->data.weak == 0) {
1832 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1833 ref->proc->pid, ref->data.debug_id,
1834 ref->data.desc, ref->data.strong,
1835 ref->data.weak);
1836 return false;
1837 }
1838 ref->data.weak--;
1839 }
1840 if (ref->data.strong == 0 && ref->data.weak == 0) {
1841 binder_cleanup_ref_olocked(ref);
1842 return true;
1843 }
1844 return false;
1845 }
1846
1847 /**
1848 * binder_get_node_from_ref() - get the node from the given proc/desc
1849 * @proc: proc containing the ref
1850 * @desc: the handle associated with the ref
1851 * @need_strong_ref: if true, only return node if ref is strong
1852 * @rdata: the id/refcount data for the ref
1853 *
1854 * Given a proc and ref handle, return the associated binder_node
1855 *
1856 * Return: a binder_node or NULL if not found or not strong when strong required
1857 */
1858 static struct binder_node *binder_get_node_from_ref(
1859 struct binder_proc *proc,
1860 u32 desc, bool need_strong_ref,
1861 struct binder_ref_data *rdata)
1862 {
1863 struct binder_node *node;
1864 struct binder_ref *ref;
1865
1866 binder_proc_lock(proc);
1867 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1868 if (!ref)
1869 goto err_no_ref;
1870 node = ref->node;
1871 /*
1872 * Take an implicit reference on the node to ensure
1873 * it stays alive until the call to binder_put_node()
1874 */
1875 binder_inc_node_tmpref(node);
1876 if (rdata)
1877 *rdata = ref->data;
1878 binder_proc_unlock(proc);
1879
1880 return node;
1881
1882 err_no_ref:
1883 binder_proc_unlock(proc);
1884 return NULL;
1885 }
1886
1887 /**
1888 * binder_free_ref() - free the binder_ref
1889 * @ref: ref to free
1890 *
1891 * Free the binder_ref. Free the binder_node indicated by ref->node
1892 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1893 */
1894 static void binder_free_ref(struct binder_ref *ref)
1895 {
1896 if (ref->node)
1897 binder_free_node(ref->node);
1898 kfree(ref->death);
1899 kfree(ref);
1900 }
1901
1902 /**
1903 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1904 * @proc: proc containing the ref
1905 * @desc: the handle associated with the ref
1906 * @increment: true=inc reference, false=dec reference
1907 * @strong: true=strong reference, false=weak reference
1908 * @rdata: the id/refcount data for the ref
1909 *
1910 * Given a proc and ref handle, increment or decrement the ref
1911 * according to "increment" arg.
1912 *
1913 * Return: 0 if successful, else errno
1914 */
1915 static int binder_update_ref_for_handle(struct binder_proc *proc,
1916 uint32_t desc, bool increment, bool strong,
1917 struct binder_ref_data *rdata)
1918 {
1919 int ret = 0;
1920 struct binder_ref *ref;
1921 bool delete_ref = false;
1922
1923 binder_proc_lock(proc);
1924 ref = binder_get_ref_olocked(proc, desc, strong);
1925 if (!ref) {
1926 ret = -EINVAL;
1927 goto err_no_ref;
1928 }
1929 if (increment)
1930 ret = binder_inc_ref_olocked(ref, strong, NULL);
1931 else
1932 delete_ref = binder_dec_ref_olocked(ref, strong);
1933
1934 if (rdata)
1935 *rdata = ref->data;
1936 binder_proc_unlock(proc);
1937
1938 if (delete_ref)
1939 binder_free_ref(ref);
1940 return ret;
1941
1942 err_no_ref:
1943 binder_proc_unlock(proc);
1944 return ret;
1945 }
1946
1947 /**
1948 * binder_dec_ref_for_handle() - dec the ref for given handle
1949 * @proc: proc containing the ref
1950 * @desc: the handle associated with the ref
1951 * @strong: true=strong reference, false=weak reference
1952 * @rdata: the id/refcount data for the ref
1953 *
1954 * Just calls binder_update_ref_for_handle() to decrement the ref.
1955 *
1956 * Return: 0 if successful, else errno
1957 */
1958 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1959 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1960 {
1961 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1962 }
1963
1964
1965 /**
1966 * binder_inc_ref_for_node() - increment the ref for given proc/node
1967 * @proc: proc containing the ref
1968 * @node: target node
1969 * @strong: true=strong reference, false=weak reference
1970 * @target_list: worklist to use if node is incremented
1971 * @rdata: the id/refcount data for the ref
1972 *
1973 * Given a proc and node, increment the ref. Create the ref if it
1974 * doesn't already exist
1975 *
1976 * Return: 0 if successful, else errno
1977 */
1978 static int binder_inc_ref_for_node(struct binder_proc *proc,
1979 struct binder_node *node,
1980 bool strong,
1981 struct list_head *target_list,
1982 struct binder_ref_data *rdata)
1983 {
1984 struct binder_ref *ref;
1985 struct binder_ref *new_ref = NULL;
1986 int ret = 0;
1987
1988 binder_proc_lock(proc);
1989 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1990 if (!ref) {
1991 binder_proc_unlock(proc);
1992 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1993 if (!new_ref)
1994 return -ENOMEM;
1995 binder_proc_lock(proc);
1996 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1997 }
1998 ret = binder_inc_ref_olocked(ref, strong, target_list);
1999 *rdata = ref->data;
2000 binder_proc_unlock(proc);
2001 if (new_ref && ref != new_ref)
2002 /*
2003 * Another thread created the ref first so
2004 * free the one we allocated
2005 */
2006 kfree(new_ref);
2007 return ret;
2008 }
2009
2010 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2011 struct binder_transaction *t)
2012 {
2013 BUG_ON(!target_thread);
2014 assert_spin_locked(&target_thread->proc->inner_lock);
2015 BUG_ON(target_thread->transaction_stack != t);
2016 BUG_ON(target_thread->transaction_stack->from != target_thread);
2017 target_thread->transaction_stack =
2018 target_thread->transaction_stack->from_parent;
2019 t->from = NULL;
2020 }
2021
2022 /**
2023 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2024 * @thread: thread to decrement
2025 *
2026 * A thread needs to be kept alive while being used to create or
2027 * handle a transaction. binder_get_txn_from() is used to safely
2028 * extract t->from from a binder_transaction and keep the thread
2029 * indicated by t->from from being freed. When done with that
2030 * binder_thread, this function is called to decrement the
2031 * tmp_ref and free if appropriate (thread has been released
2032 * and no transaction being processed by the driver)
2033 */
2034 static void binder_thread_dec_tmpref(struct binder_thread *thread)
2035 {
2036 /*
2037 * atomic is used to protect the counter value while
2038 * it cannot reach zero or thread->is_dead is false
2039 */
2040 binder_inner_proc_lock(thread->proc);
2041 atomic_dec(&thread->tmp_ref);
2042 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
2043 binder_inner_proc_unlock(thread->proc);
2044 binder_free_thread(thread);
2045 return;
2046 }
2047 binder_inner_proc_unlock(thread->proc);
2048 }
2049
2050 /**
2051 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2052 * @proc: proc to decrement
2053 *
2054 * A binder_proc needs to be kept alive while being used to create or
2055 * handle a transaction. proc->tmp_ref is incremented when
2056 * creating a new transaction or the binder_proc is currently in-use
2057 * by threads that are being released. When done with the binder_proc,
2058 * this function is called to decrement the counter and free the
2059 * proc if appropriate (proc has been released, all threads have
2060 * been released and not currenly in-use to process a transaction).
2061 */
2062 static void binder_proc_dec_tmpref(struct binder_proc *proc)
2063 {
2064 binder_inner_proc_lock(proc);
2065 proc->tmp_ref--;
2066 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2067 !proc->tmp_ref) {
2068 binder_inner_proc_unlock(proc);
2069 binder_free_proc(proc);
2070 return;
2071 }
2072 binder_inner_proc_unlock(proc);
2073 }
2074
2075 /**
2076 * binder_get_txn_from() - safely extract the "from" thread in transaction
2077 * @t: binder transaction for t->from
2078 *
2079 * Atomically return the "from" thread and increment the tmp_ref
2080 * count for the thread to ensure it stays alive until
2081 * binder_thread_dec_tmpref() is called.
2082 *
2083 * Return: the value of t->from
2084 */
2085 static struct binder_thread *binder_get_txn_from(
2086 struct binder_transaction *t)
2087 {
2088 struct binder_thread *from;
2089
2090 spin_lock(&t->lock);
2091 from = t->from;
2092 if (from)
2093 atomic_inc(&from->tmp_ref);
2094 spin_unlock(&t->lock);
2095 return from;
2096 }
2097
2098 /**
2099 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2100 * @t: binder transaction for t->from
2101 *
2102 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2103 * to guarantee that the thread cannot be released while operating on it.
2104 * The caller must call binder_inner_proc_unlock() to release the inner lock
2105 * as well as call binder_dec_thread_txn() to release the reference.
2106 *
2107 * Return: the value of t->from
2108 */
2109 static struct binder_thread *binder_get_txn_from_and_acq_inner(
2110 struct binder_transaction *t)
2111 {
2112 struct binder_thread *from;
2113
2114 from = binder_get_txn_from(t);
2115 if (!from)
2116 return NULL;
2117 binder_inner_proc_lock(from->proc);
2118 if (t->from) {
2119 BUG_ON(from != t->from);
2120 return from;
2121 }
2122 binder_inner_proc_unlock(from->proc);
2123 binder_thread_dec_tmpref(from);
2124 return NULL;
2125 }
2126
2127 static void binder_free_transaction(struct binder_transaction *t)
2128 {
2129 if (t->buffer)
2130 t->buffer->transaction = NULL;
2131 kfree(t);
2132 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2133 }
2134
2135 static void binder_send_failed_reply(struct binder_transaction *t,
2136 uint32_t error_code)
2137 {
2138 struct binder_thread *target_thread;
2139 struct binder_transaction *next;
2140
2141 BUG_ON(t->flags & TF_ONE_WAY);
2142 while (1) {
2143 target_thread = binder_get_txn_from_and_acq_inner(t);
2144 if (target_thread) {
2145 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2146 "send failed reply for transaction %d to %d:%d\n",
2147 t->debug_id,
2148 target_thread->proc->pid,
2149 target_thread->pid);
2150
2151 binder_pop_transaction_ilocked(target_thread, t);
2152 if (target_thread->reply_error.cmd == BR_OK) {
2153 target_thread->reply_error.cmd = error_code;
2154 binder_enqueue_thread_work_ilocked(
2155 target_thread,
2156 &target_thread->reply_error.work);
2157 wake_up_interruptible(&target_thread->wait);
2158 } else {
2159 WARN(1, "Unexpected reply error: %u\n",
2160 target_thread->reply_error.cmd);
2161 }
2162 binder_inner_proc_unlock(target_thread->proc);
2163 binder_thread_dec_tmpref(target_thread);
2164 binder_free_transaction(t);
2165 return;
2166 }
2167 next = t->from_parent;
2168
2169 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2170 "send failed reply for transaction %d, target dead\n",
2171 t->debug_id);
2172
2173 binder_free_transaction(t);
2174 if (next == NULL) {
2175 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2176 "reply failed, no target thread at root\n");
2177 return;
2178 }
2179 t = next;
2180 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2181 "reply failed, no target thread -- retry %d\n",
2182 t->debug_id);
2183 }
2184 }
2185
2186 /**
2187 * binder_cleanup_transaction() - cleans up undelivered transaction
2188 * @t: transaction that needs to be cleaned up
2189 * @reason: reason the transaction wasn't delivered
2190 * @error_code: error to return to caller (if synchronous call)
2191 */
2192 static void binder_cleanup_transaction(struct binder_transaction *t,
2193 const char *reason,
2194 uint32_t error_code)
2195 {
2196 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2197 binder_send_failed_reply(t, error_code);
2198 } else {
2199 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2200 "undelivered transaction %d, %s\n",
2201 t->debug_id, reason);
2202 binder_free_transaction(t);
2203 }
2204 }
2205
2206 /**
2207 * binder_validate_object() - checks for a valid metadata object in a buffer.
2208 * @buffer: binder_buffer that we're parsing.
2209 * @offset: offset in the buffer at which to validate an object.
2210 *
2211 * Return: If there's a valid metadata object at @offset in @buffer, the
2212 * size of that object. Otherwise, it returns zero.
2213 */
2214 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2215 {
2216 /* Check if we can read a header first */
2217 struct binder_object_header *hdr;
2218 size_t object_size = 0;
2219
2220 if (offset > buffer->data_size - sizeof(*hdr) ||
2221 buffer->data_size < sizeof(*hdr) ||
2222 !IS_ALIGNED(offset, sizeof(u32)))
2223 return 0;
2224
2225 /* Ok, now see if we can read a complete object. */
2226 hdr = (struct binder_object_header *)(buffer->data + offset);
2227 switch (hdr->type) {
2228 case BINDER_TYPE_BINDER:
2229 case BINDER_TYPE_WEAK_BINDER:
2230 case BINDER_TYPE_HANDLE:
2231 case BINDER_TYPE_WEAK_HANDLE:
2232 object_size = sizeof(struct flat_binder_object);
2233 break;
2234 case BINDER_TYPE_FD:
2235 object_size = sizeof(struct binder_fd_object);
2236 break;
2237 case BINDER_TYPE_PTR:
2238 object_size = sizeof(struct binder_buffer_object);
2239 break;
2240 case BINDER_TYPE_FDA:
2241 object_size = sizeof(struct binder_fd_array_object);
2242 break;
2243 default:
2244 return 0;
2245 }
2246 if (offset <= buffer->data_size - object_size &&
2247 buffer->data_size >= object_size)
2248 return object_size;
2249 else
2250 return 0;
2251 }
2252
2253 /**
2254 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2255 * @b: binder_buffer containing the object
2256 * @index: index in offset array at which the binder_buffer_object is
2257 * located
2258 * @start: points to the start of the offset array
2259 * @num_valid: the number of valid offsets in the offset array
2260 *
2261 * Return: If @index is within the valid range of the offset array
2262 * described by @start and @num_valid, and if there's a valid
2263 * binder_buffer_object at the offset found in index @index
2264 * of the offset array, that object is returned. Otherwise,
2265 * %NULL is returned.
2266 * Note that the offset found in index @index itself is not
2267 * verified; this function assumes that @num_valid elements
2268 * from @start were previously verified to have valid offsets.
2269 */
2270 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2271 binder_size_t index,
2272 binder_size_t *start,
2273 binder_size_t num_valid)
2274 {
2275 struct binder_buffer_object *buffer_obj;
2276 binder_size_t *offp;
2277
2278 if (index >= num_valid)
2279 return NULL;
2280
2281 offp = start + index;
2282 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2283 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2284 return NULL;
2285
2286 return buffer_obj;
2287 }
2288
2289 /**
2290 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2291 * @b: transaction buffer
2292 * @objects_start start of objects buffer
2293 * @buffer: binder_buffer_object in which to fix up
2294 * @offset: start offset in @buffer to fix up
2295 * @last_obj: last binder_buffer_object that we fixed up in
2296 * @last_min_offset: minimum fixup offset in @last_obj
2297 *
2298 * Return: %true if a fixup in buffer @buffer at offset @offset is
2299 * allowed.
2300 *
2301 * For safety reasons, we only allow fixups inside a buffer to happen
2302 * at increasing offsets; additionally, we only allow fixup on the last
2303 * buffer object that was verified, or one of its parents.
2304 *
2305 * Example of what is allowed:
2306 *
2307 * A
2308 * B (parent = A, offset = 0)
2309 * C (parent = A, offset = 16)
2310 * D (parent = C, offset = 0)
2311 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2312 *
2313 * Examples of what is not allowed:
2314 *
2315 * Decreasing offsets within the same parent:
2316 * A
2317 * C (parent = A, offset = 16)
2318 * B (parent = A, offset = 0) // decreasing offset within A
2319 *
2320 * Referring to a parent that wasn't the last object or any of its parents:
2321 * A
2322 * B (parent = A, offset = 0)
2323 * C (parent = A, offset = 0)
2324 * C (parent = A, offset = 16)
2325 * D (parent = B, offset = 0) // B is not A or any of A's parents
2326 */
2327 static bool binder_validate_fixup(struct binder_buffer *b,
2328 binder_size_t *objects_start,
2329 struct binder_buffer_object *buffer,
2330 binder_size_t fixup_offset,
2331 struct binder_buffer_object *last_obj,
2332 binder_size_t last_min_offset)
2333 {
2334 if (!last_obj) {
2335 /* Nothing to fix up in */
2336 return false;
2337 }
2338
2339 while (last_obj != buffer) {
2340 /*
2341 * Safe to retrieve the parent of last_obj, since it
2342 * was already previously verified by the driver.
2343 */
2344 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2345 return false;
2346 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2347 last_obj = (struct binder_buffer_object *)
2348 (b->data + *(objects_start + last_obj->parent));
2349 }
2350 return (fixup_offset >= last_min_offset);
2351 }
2352
2353 static void binder_transaction_buffer_release(struct binder_proc *proc,
2354 struct binder_buffer *buffer,
2355 binder_size_t *failed_at)
2356 {
2357 binder_size_t *offp, *off_start, *off_end;
2358 int debug_id = buffer->debug_id;
2359
2360 binder_debug(BINDER_DEBUG_TRANSACTION,
2361 "%d buffer release %d, size %zd-%zd, failed at %p\n",
2362 proc->pid, buffer->debug_id,
2363 buffer->data_size, buffer->offsets_size, failed_at);
2364
2365 if (buffer->target_node)
2366 binder_dec_node(buffer->target_node, 1, 0);
2367
2368 off_start = (binder_size_t *)(buffer->data +
2369 ALIGN(buffer->data_size, sizeof(void *)));
2370 if (failed_at)
2371 off_end = failed_at;
2372 else
2373 off_end = (void *)off_start + buffer->offsets_size;
2374 for (offp = off_start; offp < off_end; offp++) {
2375 struct binder_object_header *hdr;
2376 size_t object_size = binder_validate_object(buffer, *offp);
2377
2378 if (object_size == 0) {
2379 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2380 debug_id, (u64)*offp, buffer->data_size);
2381 continue;
2382 }
2383 hdr = (struct binder_object_header *)(buffer->data + *offp);
2384 switch (hdr->type) {
2385 case BINDER_TYPE_BINDER:
2386 case BINDER_TYPE_WEAK_BINDER: {
2387 struct flat_binder_object *fp;
2388 struct binder_node *node;
2389
2390 fp = to_flat_binder_object(hdr);
2391 node = binder_get_node(proc, fp->binder);
2392 if (node == NULL) {
2393 pr_err("transaction release %d bad node %016llx\n",
2394 debug_id, (u64)fp->binder);
2395 break;
2396 }
2397 binder_debug(BINDER_DEBUG_TRANSACTION,
2398 " node %d u%016llx\n",
2399 node->debug_id, (u64)node->ptr);
2400 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2401 0);
2402 binder_put_node(node);
2403 } break;
2404 case BINDER_TYPE_HANDLE:
2405 case BINDER_TYPE_WEAK_HANDLE: {
2406 struct flat_binder_object *fp;
2407 struct binder_ref_data rdata;
2408 int ret;
2409
2410 fp = to_flat_binder_object(hdr);
2411 ret = binder_dec_ref_for_handle(proc, fp->handle,
2412 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2413
2414 if (ret) {
2415 pr_err("transaction release %d bad handle %d, ret = %d\n",
2416 debug_id, fp->handle, ret);
2417 break;
2418 }
2419 binder_debug(BINDER_DEBUG_TRANSACTION,
2420 " ref %d desc %d\n",
2421 rdata.debug_id, rdata.desc);
2422 } break;
2423
2424 case BINDER_TYPE_FD: {
2425 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2426
2427 binder_debug(BINDER_DEBUG_TRANSACTION,
2428 " fd %d\n", fp->fd);
2429 if (failed_at)
2430 task_close_fd(proc, fp->fd);
2431 } break;
2432 case BINDER_TYPE_PTR:
2433 /*
2434 * Nothing to do here, this will get cleaned up when the
2435 * transaction buffer gets freed
2436 */
2437 break;
2438 case BINDER_TYPE_FDA: {
2439 struct binder_fd_array_object *fda;
2440 struct binder_buffer_object *parent;
2441 uintptr_t parent_buffer;
2442 u32 *fd_array;
2443 size_t fd_index;
2444 binder_size_t fd_buf_size;
2445
2446 fda = to_binder_fd_array_object(hdr);
2447 parent = binder_validate_ptr(buffer, fda->parent,
2448 off_start,
2449 offp - off_start);
2450 if (!parent) {
2451 pr_err("transaction release %d bad parent offset",
2452 debug_id);
2453 continue;
2454 }
2455 /*
2456 * Since the parent was already fixed up, convert it
2457 * back to kernel address space to access it
2458 */
2459 parent_buffer = parent->buffer -
2460 binder_alloc_get_user_buffer_offset(
2461 &proc->alloc);
2462
2463 fd_buf_size = sizeof(u32) * fda->num_fds;
2464 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2465 pr_err("transaction release %d invalid number of fds (%lld)\n",
2466 debug_id, (u64)fda->num_fds);
2467 continue;
2468 }
2469 if (fd_buf_size > parent->length ||
2470 fda->parent_offset > parent->length - fd_buf_size) {
2471 /* No space for all file descriptors here. */
2472 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2473 debug_id, (u64)fda->num_fds);
2474 continue;
2475 }
2476 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2477 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2478 task_close_fd(proc, fd_array[fd_index]);
2479 } break;
2480 default:
2481 pr_err("transaction release %d bad object type %x\n",
2482 debug_id, hdr->type);
2483 break;
2484 }
2485 }
2486 }
2487
2488 static int binder_translate_binder(struct flat_binder_object *fp,
2489 struct binder_transaction *t,
2490 struct binder_thread *thread)
2491 {
2492 struct binder_node *node;
2493 struct binder_proc *proc = thread->proc;
2494 struct binder_proc *target_proc = t->to_proc;
2495 struct binder_ref_data rdata;
2496 int ret = 0;
2497
2498 node = binder_get_node(proc, fp->binder);
2499 if (!node) {
2500 node = binder_new_node(proc, fp);
2501 if (!node)
2502 return -ENOMEM;
2503 }
2504 if (fp->cookie != node->cookie) {
2505 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2506 proc->pid, thread->pid, (u64)fp->binder,
2507 node->debug_id, (u64)fp->cookie,
2508 (u64)node->cookie);
2509 ret = -EINVAL;
2510 goto done;
2511 }
2512 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2513 ret = -EPERM;
2514 goto done;
2515 }
2516
2517 ret = binder_inc_ref_for_node(target_proc, node,
2518 fp->hdr.type == BINDER_TYPE_BINDER,
2519 &thread->todo, &rdata);
2520 if (ret)
2521 goto done;
2522
2523 if (fp->hdr.type == BINDER_TYPE_BINDER)
2524 fp->hdr.type = BINDER_TYPE_HANDLE;
2525 else
2526 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2527 fp->binder = 0;
2528 fp->handle = rdata.desc;
2529 fp->cookie = 0;
2530
2531 trace_binder_transaction_node_to_ref(t, node, &rdata);
2532 binder_debug(BINDER_DEBUG_TRANSACTION,
2533 " node %d u%016llx -> ref %d desc %d\n",
2534 node->debug_id, (u64)node->ptr,
2535 rdata.debug_id, rdata.desc);
2536 done:
2537 binder_put_node(node);
2538 return ret;
2539 }
2540
2541 static int binder_translate_handle(struct flat_binder_object *fp,
2542 struct binder_transaction *t,
2543 struct binder_thread *thread)
2544 {
2545 struct binder_proc *proc = thread->proc;
2546 struct binder_proc *target_proc = t->to_proc;
2547 struct binder_node *node;
2548 struct binder_ref_data src_rdata;
2549 int ret = 0;
2550
2551 node = binder_get_node_from_ref(proc, fp->handle,
2552 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2553 if (!node) {
2554 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2555 proc->pid, thread->pid, fp->handle);
2556 return -EINVAL;
2557 }
2558 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2559 ret = -EPERM;
2560 goto done;
2561 }
2562
2563 binder_node_lock(node);
2564 if (node->proc == target_proc) {
2565 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2566 fp->hdr.type = BINDER_TYPE_BINDER;
2567 else
2568 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2569 fp->binder = node->ptr;
2570 fp->cookie = node->cookie;
2571 if (node->proc)
2572 binder_inner_proc_lock(node->proc);
2573 binder_inc_node_nilocked(node,
2574 fp->hdr.type == BINDER_TYPE_BINDER,
2575 0, NULL);
2576 if (node->proc)
2577 binder_inner_proc_unlock(node->proc);
2578 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2579 binder_debug(BINDER_DEBUG_TRANSACTION,
2580 " ref %d desc %d -> node %d u%016llx\n",
2581 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2582 (u64)node->ptr);
2583 binder_node_unlock(node);
2584 } else {
2585 struct binder_ref_data dest_rdata;
2586
2587 binder_node_unlock(node);
2588 ret = binder_inc_ref_for_node(target_proc, node,
2589 fp->hdr.type == BINDER_TYPE_HANDLE,
2590 NULL, &dest_rdata);
2591 if (ret)
2592 goto done;
2593
2594 fp->binder = 0;
2595 fp->handle = dest_rdata.desc;
2596 fp->cookie = 0;
2597 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2598 &dest_rdata);
2599 binder_debug(BINDER_DEBUG_TRANSACTION,
2600 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2601 src_rdata.debug_id, src_rdata.desc,
2602 dest_rdata.debug_id, dest_rdata.desc,
2603 node->debug_id);
2604 }
2605 done:
2606 binder_put_node(node);
2607 return ret;
2608 }
2609
2610 static int binder_translate_fd(int fd,
2611 struct binder_transaction *t,
2612 struct binder_thread *thread,
2613 struct binder_transaction *in_reply_to)
2614 {
2615 struct binder_proc *proc = thread->proc;
2616 struct binder_proc *target_proc = t->to_proc;
2617 int target_fd;
2618 struct file *file;
2619 int ret;
2620 bool target_allows_fd;
2621
2622 if (in_reply_to)
2623 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2624 else
2625 target_allows_fd = t->buffer->target_node->accept_fds;
2626 if (!target_allows_fd) {
2627 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2628 proc->pid, thread->pid,
2629 in_reply_to ? "reply" : "transaction",
2630 fd);
2631 ret = -EPERM;
2632 goto err_fd_not_accepted;
2633 }
2634
2635 file = fget(fd);
2636 if (!file) {
2637 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2638 proc->pid, thread->pid, fd);
2639 ret = -EBADF;
2640 goto err_fget;
2641 }
2642 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2643 if (ret < 0) {
2644 ret = -EPERM;
2645 goto err_security;
2646 }
2647
2648 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2649 if (target_fd < 0) {
2650 ret = -ENOMEM;
2651 goto err_get_unused_fd;
2652 }
2653 task_fd_install(target_proc, target_fd, file);
2654 trace_binder_transaction_fd(t, fd, target_fd);
2655 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2656 fd, target_fd);
2657
2658 return target_fd;
2659
2660 err_get_unused_fd:
2661 err_security:
2662 fput(file);
2663 err_fget:
2664 err_fd_not_accepted:
2665 return ret;
2666 }
2667
2668 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2669 struct binder_buffer_object *parent,
2670 struct binder_transaction *t,
2671 struct binder_thread *thread,
2672 struct binder_transaction *in_reply_to)
2673 {
2674 binder_size_t fdi, fd_buf_size, num_installed_fds;
2675 int target_fd;
2676 uintptr_t parent_buffer;
2677 u32 *fd_array;
2678 struct binder_proc *proc = thread->proc;
2679 struct binder_proc *target_proc = t->to_proc;
2680
2681 fd_buf_size = sizeof(u32) * fda->num_fds;
2682 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2683 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2684 proc->pid, thread->pid, (u64)fda->num_fds);
2685 return -EINVAL;
2686 }
2687 if (fd_buf_size > parent->length ||
2688 fda->parent_offset > parent->length - fd_buf_size) {
2689 /* No space for all file descriptors here. */
2690 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2691 proc->pid, thread->pid, (u64)fda->num_fds);
2692 return -EINVAL;
2693 }
2694 /*
2695 * Since the parent was already fixed up, convert it
2696 * back to the kernel address space to access it
2697 */
2698 parent_buffer = parent->buffer -
2699 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2700 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2701 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2702 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2703 proc->pid, thread->pid);
2704 return -EINVAL;
2705 }
2706 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2707 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2708 in_reply_to);
2709 if (target_fd < 0)
2710 goto err_translate_fd_failed;
2711 fd_array[fdi] = target_fd;
2712 }
2713 return 0;
2714
2715 err_translate_fd_failed:
2716 /*
2717 * Failed to allocate fd or security error, free fds
2718 * installed so far.
2719 */
2720 num_installed_fds = fdi;
2721 for (fdi = 0; fdi < num_installed_fds; fdi++)
2722 task_close_fd(target_proc, fd_array[fdi]);
2723 return target_fd;
2724 }
2725
2726 static int binder_fixup_parent(struct binder_transaction *t,
2727 struct binder_thread *thread,
2728 struct binder_buffer_object *bp,
2729 binder_size_t *off_start,
2730 binder_size_t num_valid,
2731 struct binder_buffer_object *last_fixup_obj,
2732 binder_size_t last_fixup_min_off)
2733 {
2734 struct binder_buffer_object *parent;
2735 u8 *parent_buffer;
2736 struct binder_buffer *b = t->buffer;
2737 struct binder_proc *proc = thread->proc;
2738 struct binder_proc *target_proc = t->to_proc;
2739
2740 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2741 return 0;
2742
2743 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2744 if (!parent) {
2745 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2746 proc->pid, thread->pid);
2747 return -EINVAL;
2748 }
2749
2750 if (!binder_validate_fixup(b, off_start,
2751 parent, bp->parent_offset,
2752 last_fixup_obj,
2753 last_fixup_min_off)) {
2754 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2755 proc->pid, thread->pid);
2756 return -EINVAL;
2757 }
2758
2759 if (parent->length < sizeof(binder_uintptr_t) ||
2760 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2761 /* No space for a pointer here! */
2762 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2763 proc->pid, thread->pid);
2764 return -EINVAL;
2765 }
2766 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2767 binder_alloc_get_user_buffer_offset(
2768 &target_proc->alloc));
2769 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2770
2771 return 0;
2772 }
2773
2774 /**
2775 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2776 * @t: transaction to send
2777 * @proc: process to send the transaction to
2778 * @thread: thread in @proc to send the transaction to (may be NULL)
2779 *
2780 * This function queues a transaction to the specified process. It will try
2781 * to find a thread in the target process to handle the transaction and
2782 * wake it up. If no thread is found, the work is queued to the proc
2783 * waitqueue.
2784 *
2785 * If the @thread parameter is not NULL, the transaction is always queued
2786 * to the waitlist of that specific thread.
2787 *
2788 * Return: true if the transactions was successfully queued
2789 * false if the target process or thread is dead
2790 */
2791 static bool binder_proc_transaction(struct binder_transaction *t,
2792 struct binder_proc *proc,
2793 struct binder_thread *thread)
2794 {
2795 struct binder_node *node = t->buffer->target_node;
2796 struct binder_priority node_prio;
2797 bool oneway = !!(t->flags & TF_ONE_WAY);
2798 bool pending_async = false;
2799
2800 BUG_ON(!node);
2801 binder_node_lock(node);
2802 node_prio.prio = node->min_priority;
2803 node_prio.sched_policy = node->sched_policy;
2804
2805 if (oneway) {
2806 BUG_ON(thread);
2807 if (node->has_async_transaction) {
2808 pending_async = true;
2809 } else {
2810 node->has_async_transaction = 1;
2811 }
2812 }
2813
2814 binder_inner_proc_lock(proc);
2815
2816 if (proc->is_dead || (thread && thread->is_dead)) {
2817 binder_inner_proc_unlock(proc);
2818 binder_node_unlock(node);
2819 return false;
2820 }
2821
2822 if (!thread && !pending_async)
2823 thread = binder_select_thread_ilocked(proc);
2824
2825 if (thread) {
2826 binder_transaction_priority(thread->task, t, node_prio,
2827 node->inherit_rt);
2828 binder_enqueue_thread_work_ilocked(thread, &t->work);
2829 } else if (!pending_async) {
2830 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2831 } else {
2832 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2833 }
2834
2835 if (!pending_async)
2836 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2837
2838 binder_inner_proc_unlock(proc);
2839 binder_node_unlock(node);
2840
2841 return true;
2842 }
2843
2844 /**
2845 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2846 * @node: struct binder_node for which to get refs
2847 * @proc: returns @node->proc if valid
2848 * @error: if no @proc then returns BR_DEAD_REPLY
2849 *
2850 * User-space normally keeps the node alive when creating a transaction
2851 * since it has a reference to the target. The local strong ref keeps it
2852 * alive if the sending process dies before the target process processes
2853 * the transaction. If the source process is malicious or has a reference
2854 * counting bug, relying on the local strong ref can fail.
2855 *
2856 * Since user-space can cause the local strong ref to go away, we also take
2857 * a tmpref on the node to ensure it survives while we are constructing
2858 * the transaction. We also need a tmpref on the proc while we are
2859 * constructing the transaction, so we take that here as well.
2860 *
2861 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2862 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2863 * target proc has died, @error is set to BR_DEAD_REPLY
2864 */
2865 static struct binder_node *binder_get_node_refs_for_txn(
2866 struct binder_node *node,
2867 struct binder_proc **procp,
2868 uint32_t *error)
2869 {
2870 struct binder_node *target_node = NULL;
2871
2872 binder_node_inner_lock(node);
2873 if (node->proc) {
2874 target_node = node;
2875 binder_inc_node_nilocked(node, 1, 0, NULL);
2876 binder_inc_node_tmpref_ilocked(node);
2877 node->proc->tmp_ref++;
2878 *procp = node->proc;
2879 } else
2880 *error = BR_DEAD_REPLY;
2881 binder_node_inner_unlock(node);
2882
2883 return target_node;
2884 }
2885
2886 static void binder_transaction(struct binder_proc *proc,
2887 struct binder_thread *thread,
2888 struct binder_transaction_data *tr, int reply,
2889 binder_size_t extra_buffers_size)
2890 {
2891 int ret;
2892 struct binder_transaction *t;
2893 struct binder_work *tcomplete;
2894 binder_size_t *offp, *off_end, *off_start;
2895 binder_size_t off_min;
2896 u8 *sg_bufp, *sg_buf_end;
2897 struct binder_proc *target_proc = NULL;
2898 struct binder_thread *target_thread = NULL;
2899 struct binder_node *target_node = NULL;
2900 struct binder_transaction *in_reply_to = NULL;
2901 struct binder_transaction_log_entry *e;
2902 uint32_t return_error = 0;
2903 uint32_t return_error_param = 0;
2904 uint32_t return_error_line = 0;
2905 struct binder_buffer_object *last_fixup_obj = NULL;
2906 binder_size_t last_fixup_min_off = 0;
2907 struct binder_context *context = proc->context;
2908 int t_debug_id = atomic_inc_return(&binder_last_id);
2909 char *secctx = NULL;
2910 u32 secctx_sz = 0;
2911
2912 e = binder_transaction_log_add(&binder_transaction_log);
2913 e->debug_id = t_debug_id;
2914 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2915 e->from_proc = proc->pid;
2916 e->from_thread = thread->pid;
2917 e->target_handle = tr->target.handle;
2918 e->data_size = tr->data_size;
2919 e->offsets_size = tr->offsets_size;
2920 e->context_name = proc->context->name;
2921
2922 if (reply) {
2923 binder_inner_proc_lock(proc);
2924 in_reply_to = thread->transaction_stack;
2925 if (in_reply_to == NULL) {
2926 binder_inner_proc_unlock(proc);
2927 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2928 proc->pid, thread->pid);
2929 return_error = BR_FAILED_REPLY;
2930 return_error_param = -EPROTO;
2931 return_error_line = __LINE__;
2932 goto err_empty_call_stack;
2933 }
2934 if (in_reply_to->to_thread != thread) {
2935 spin_lock(&in_reply_to->lock);
2936 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2937 proc->pid, thread->pid, in_reply_to->debug_id,
2938 in_reply_to->to_proc ?
2939 in_reply_to->to_proc->pid : 0,
2940 in_reply_to->to_thread ?
2941 in_reply_to->to_thread->pid : 0);
2942 spin_unlock(&in_reply_to->lock);
2943 binder_inner_proc_unlock(proc);
2944 return_error = BR_FAILED_REPLY;
2945 return_error_param = -EPROTO;
2946 return_error_line = __LINE__;
2947 in_reply_to = NULL;
2948 goto err_bad_call_stack;
2949 }
2950 thread->transaction_stack = in_reply_to->to_parent;
2951 binder_inner_proc_unlock(proc);
2952 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2953 if (target_thread == NULL) {
2954 return_error = BR_DEAD_REPLY;
2955 return_error_line = __LINE__;
2956 goto err_dead_binder;
2957 }
2958 if (target_thread->transaction_stack != in_reply_to) {
2959 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2960 proc->pid, thread->pid,
2961 target_thread->transaction_stack ?
2962 target_thread->transaction_stack->debug_id : 0,
2963 in_reply_to->debug_id);
2964 binder_inner_proc_unlock(target_thread->proc);
2965 return_error = BR_FAILED_REPLY;
2966 return_error_param = -EPROTO;
2967 return_error_line = __LINE__;
2968 in_reply_to = NULL;
2969 target_thread = NULL;
2970 goto err_dead_binder;
2971 }
2972 target_proc = target_thread->proc;
2973 target_proc->tmp_ref++;
2974 binder_inner_proc_unlock(target_thread->proc);
2975 } else {
2976 if (tr->target.handle) {
2977 struct binder_ref *ref;
2978
2979 /*
2980 * There must already be a strong ref
2981 * on this node. If so, do a strong
2982 * increment on the node to ensure it
2983 * stays alive until the transaction is
2984 * done.
2985 */
2986 binder_proc_lock(proc);
2987 ref = binder_get_ref_olocked(proc, tr->target.handle,
2988 true);
2989 if (ref) {
2990 target_node = binder_get_node_refs_for_txn(
2991 ref->node, &target_proc,
2992 &return_error);
2993 } else {
2994 binder_user_error("%d:%d got transaction to invalid handle\n",
2995 proc->pid, thread->pid);
2996 return_error = BR_FAILED_REPLY;
2997 }
2998 binder_proc_unlock(proc);
2999 } else {
3000 mutex_lock(&context->context_mgr_node_lock);
3001 target_node = context->binder_context_mgr_node;
3002 if (target_node)
3003 target_node = binder_get_node_refs_for_txn(
3004 target_node, &target_proc,
3005 &return_error);
3006 else
3007 return_error = BR_DEAD_REPLY;
3008 mutex_unlock(&context->context_mgr_node_lock);
3009 }
3010 if (!target_node) {
3011 /*
3012 * return_error is set above
3013 */
3014 return_error_param = -EINVAL;
3015 return_error_line = __LINE__;
3016 goto err_dead_binder;
3017 }
3018 e->to_node = target_node->debug_id;
3019 #ifdef CONFIG_SAMSUNG_FREECESS
3020 if (target_proc
3021 && (target_proc->tsk->cred->euid.val > 10000)
3022 && (proc->pid != target_proc->pid)) {
3023 binder_report(proc->tsk, target_proc->tsk, tr->flags & TF_ONE_WAY);
3024 }
3025
3026 #endif
3027 if (security_binder_transaction(proc->tsk,
3028 target_proc->tsk) < 0) {
3029 return_error = BR_FAILED_REPLY;
3030 return_error_param = -EPERM;
3031 return_error_line = __LINE__;
3032 goto err_invalid_target_handle;
3033 }
3034 binder_inner_proc_lock(proc);
3035 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3036 struct binder_transaction *tmp;
3037
3038 tmp = thread->transaction_stack;
3039 if (tmp->to_thread != thread) {
3040 spin_lock(&tmp->lock);
3041 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
3042 proc->pid, thread->pid, tmp->debug_id,
3043 tmp->to_proc ? tmp->to_proc->pid : 0,
3044 tmp->to_thread ?
3045 tmp->to_thread->pid : 0);
3046 spin_unlock(&tmp->lock);
3047 binder_inner_proc_unlock(proc);
3048 return_error = BR_FAILED_REPLY;
3049 return_error_param = -EPROTO;
3050 return_error_line = __LINE__;
3051 goto err_bad_call_stack;
3052 }
3053 while (tmp) {
3054 struct binder_thread *from;
3055
3056 spin_lock(&tmp->lock);
3057 from = tmp->from;
3058 if (from && from->proc == target_proc) {
3059 atomic_inc(&from->tmp_ref);
3060 target_thread = from;
3061 spin_unlock(&tmp->lock);
3062 break;
3063 }
3064 spin_unlock(&tmp->lock);
3065 tmp = tmp->from_parent;
3066 }
3067 }
3068 binder_inner_proc_unlock(proc);
3069 }
3070 if (target_thread)
3071 e->to_thread = target_thread->pid;
3072 e->to_proc = target_proc->pid;
3073
3074 /* TODO: reuse incoming transaction for reply */
3075 t = kzalloc(sizeof(*t), GFP_KERNEL);
3076 if (t == NULL) {
3077 return_error = BR_FAILED_REPLY;
3078 return_error_param = -ENOMEM;
3079 return_error_line = __LINE__;
3080 goto err_alloc_t_failed;
3081 }
3082 binder_stats_created(BINDER_STAT_TRANSACTION);
3083 spin_lock_init(&t->lock);
3084
3085 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3086 if (tcomplete == NULL) {
3087 return_error = BR_FAILED_REPLY;
3088 return_error_param = -ENOMEM;
3089 return_error_line = __LINE__;
3090 goto err_alloc_tcomplete_failed;
3091 }
3092 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3093
3094 t->debug_id = t_debug_id;
3095
3096 if (reply)
3097 binder_debug(BINDER_DEBUG_TRANSACTION,
3098 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3099 proc->pid, thread->pid, t->debug_id,
3100 target_proc->pid, target_thread->pid,
3101 (u64)tr->data.ptr.buffer,
3102 (u64)tr->data.ptr.offsets,
3103 (u64)tr->data_size, (u64)tr->offsets_size,
3104 (u64)extra_buffers_size);
3105 else
3106 binder_debug(BINDER_DEBUG_TRANSACTION,
3107 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3108 proc->pid, thread->pid, t->debug_id,
3109 target_proc->pid, target_node->debug_id,
3110 (u64)tr->data.ptr.buffer,
3111 (u64)tr->data.ptr.offsets,
3112 (u64)tr->data_size, (u64)tr->offsets_size,
3113 (u64)extra_buffers_size);
3114
3115 if (!reply && !(tr->flags & TF_ONE_WAY))
3116 t->from = thread;
3117 else
3118 t->from = NULL;
3119 t->sender_euid = task_euid(proc->tsk);
3120 t->to_proc = target_proc;
3121 t->to_thread = target_thread;
3122 t->code = tr->code;
3123 t->flags = tr->flags;
3124 if (!(t->flags & TF_ONE_WAY) &&
3125 binder_supported_policy(current->policy)) {
3126 /* Inherit supported policies for synchronous transactions */
3127 t->priority.sched_policy = current->policy;
3128 t->priority.prio = current->normal_prio;
3129 } else {
3130 /* Otherwise, fall back to the default priority */
3131 t->priority = target_proc->default_priority;
3132 }
3133 if (target_node && target_node->txn_security_ctx) {
3134 u32 secid;
3135
3136 security_task_getsecid(proc->tsk, &secid);
3137 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3138 if (ret) {
3139 return_error = BR_FAILED_REPLY;
3140 return_error_param = ret;
3141 return_error_line = __LINE__;
3142 goto err_get_secctx_failed;
3143 }
3144 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
3145 }
3146
3147
3148 trace_binder_transaction(reply, t, target_node);
3149
3150 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3151 tr->offsets_size, extra_buffers_size,
3152 !reply && (t->flags & TF_ONE_WAY));
3153 if (IS_ERR(t->buffer)) {
3154 /*
3155 * -ESRCH indicates VMA cleared. The target is dying.
3156 */
3157 return_error_param = PTR_ERR(t->buffer);
3158 return_error = return_error_param == -ESRCH ?
3159 BR_DEAD_REPLY : BR_FAILED_REPLY;
3160 return_error_line = __LINE__;
3161 t->buffer = NULL;
3162 goto err_binder_alloc_buf_failed;
3163 }
3164 if (secctx) {
3165 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3166 ALIGN(tr->offsets_size, sizeof(void *)) +
3167 ALIGN(extra_buffers_size, sizeof(void *)) -
3168 ALIGN(secctx_sz, sizeof(u64));
3169 char *kptr = t->buffer->data + buf_offset;
3170
3171 t->security_ctx = (uintptr_t)kptr +
3172 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
3173 memcpy(kptr, secctx, secctx_sz);
3174 security_release_secctx(secctx, secctx_sz);
3175 secctx = NULL;
3176 }
3177
3178 t->buffer->debug_id = t->debug_id;
3179 t->buffer->transaction = t;
3180 t->buffer->target_node = target_node;
3181 trace_binder_transaction_alloc_buf(t->buffer);
3182 off_start = (binder_size_t *)(t->buffer->data +
3183 ALIGN(tr->data_size, sizeof(void *)));
3184 offp = off_start;
3185
3186 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3187 tr->data.ptr.buffer, tr->data_size)) {
3188 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3189 proc->pid, thread->pid);
3190 return_error = BR_FAILED_REPLY;
3191 return_error_param = -EFAULT;
3192 return_error_line = __LINE__;
3193 goto err_copy_data_failed;
3194 }
3195 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3196 tr->data.ptr.offsets, tr->offsets_size)) {
3197 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3198 proc->pid, thread->pid);
3199 return_error = BR_FAILED_REPLY;
3200 return_error_param = -EFAULT;
3201 return_error_line = __LINE__;
3202 goto err_copy_data_failed;
3203 }
3204 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3205 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3206 proc->pid, thread->pid, (u64)tr->offsets_size);
3207 return_error = BR_FAILED_REPLY;
3208 return_error_param = -EINVAL;
3209 return_error_line = __LINE__;
3210 goto err_bad_offset;
3211 }
3212 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3213 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3214 proc->pid, thread->pid,
3215 (u64)extra_buffers_size);
3216 return_error = BR_FAILED_REPLY;
3217 return_error_param = -EINVAL;
3218 return_error_line = __LINE__;
3219 goto err_bad_offset;
3220 }
3221 off_end = (void *)off_start + tr->offsets_size;
3222 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3223 sg_buf_end = sg_bufp + extra_buffers_size;
3224 off_min = 0;
3225 for (; offp < off_end; offp++) {
3226 struct binder_object_header *hdr;
3227 size_t object_size = binder_validate_object(t->buffer, *offp);
3228
3229 if (object_size == 0 || *offp < off_min) {
3230 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3231 proc->pid, thread->pid, (u64)*offp,
3232 (u64)off_min,
3233 (u64)t->buffer->data_size);
3234 return_error = BR_FAILED_REPLY;
3235 return_error_param = -EINVAL;
3236 return_error_line = __LINE__;
3237 goto err_bad_offset;
3238 }
3239
3240 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3241 off_min = *offp + object_size;
3242 switch (hdr->type) {
3243 case BINDER_TYPE_BINDER:
3244 case BINDER_TYPE_WEAK_BINDER: {
3245 struct flat_binder_object *fp;
3246
3247 fp = to_flat_binder_object(hdr);
3248 ret = binder_translate_binder(fp, t, thread);
3249 if (ret < 0) {
3250 return_error = BR_FAILED_REPLY;
3251 return_error_param = ret;
3252 return_error_line = __LINE__;
3253 goto err_translate_failed;
3254 }
3255 } break;
3256 case BINDER_TYPE_HANDLE:
3257 case BINDER_TYPE_WEAK_HANDLE: {
3258 struct flat_binder_object *fp;
3259
3260 fp = to_flat_binder_object(hdr);
3261 ret = binder_translate_handle(fp, t, thread);
3262 if (ret < 0) {
3263 return_error = BR_FAILED_REPLY;
3264 return_error_param = ret;
3265 return_error_line = __LINE__;
3266 goto err_translate_failed;
3267 }
3268 } break;
3269
3270 case BINDER_TYPE_FD: {
3271 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3272 int target_fd = binder_translate_fd(fp->fd, t, thread,
3273 in_reply_to);
3274
3275 if (target_fd < 0) {
3276 return_error = BR_FAILED_REPLY;
3277 return_error_param = target_fd;
3278 return_error_line = __LINE__;
3279 goto err_translate_failed;
3280 }
3281 fp->pad_binder = 0;
3282 fp->fd = target_fd;
3283 } break;
3284 case BINDER_TYPE_FDA: {
3285 struct binder_fd_array_object *fda =
3286 to_binder_fd_array_object(hdr);
3287 struct binder_buffer_object *parent =
3288 binder_validate_ptr(t->buffer, fda->parent,
3289 off_start,
3290 offp - off_start);
3291 if (!parent) {
3292 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3293 proc->pid, thread->pid);
3294 return_error = BR_FAILED_REPLY;
3295 return_error_param = -EINVAL;
3296 return_error_line = __LINE__;
3297 goto err_bad_parent;
3298 }
3299 if (!binder_validate_fixup(t->buffer, off_start,
3300 parent, fda->parent_offset,
3301 last_fixup_obj,
3302 last_fixup_min_off)) {
3303 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3304 proc->pid, thread->pid);
3305 return_error = BR_FAILED_REPLY;
3306 return_error_param = -EINVAL;
3307 return_error_line = __LINE__;
3308 goto err_bad_parent;
3309 }
3310 ret = binder_translate_fd_array(fda, parent, t, thread,
3311 in_reply_to);
3312 if (ret < 0) {
3313 return_error = BR_FAILED_REPLY;
3314 return_error_param = ret;
3315 return_error_line = __LINE__;
3316 goto err_translate_failed;
3317 }
3318 last_fixup_obj = parent;
3319 last_fixup_min_off =
3320 fda->parent_offset + sizeof(u32) * fda->num_fds;
3321 } break;
3322 case BINDER_TYPE_PTR: {
3323 struct binder_buffer_object *bp =
3324 to_binder_buffer_object(hdr);
3325 size_t buf_left = sg_buf_end - sg_bufp;
3326
3327 if (bp->length > buf_left) {
3328 binder_user_error("%d:%d got transaction with too large buffer\n",
3329 proc->pid, thread->pid);
3330 return_error = BR_FAILED_REPLY;
3331 return_error_param = -EINVAL;
3332 return_error_line = __LINE__;
3333 goto err_bad_offset;
3334 }
3335 if (copy_from_user(sg_bufp,
3336 (const void __user *)(uintptr_t)
3337 bp->buffer, bp->length)) {
3338 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3339 proc->pid, thread->pid);
3340 return_error_param = -EFAULT;
3341 return_error = BR_FAILED_REPLY;
3342 return_error_line = __LINE__;
3343 goto err_copy_data_failed;
3344 }
3345 /* Fixup buffer pointer to target proc address space */
3346 bp->buffer = (uintptr_t)sg_bufp +
3347 binder_alloc_get_user_buffer_offset(
3348 &target_proc->alloc);
3349 sg_bufp += ALIGN(bp->length, sizeof(u64));
3350
3351 ret = binder_fixup_parent(t, thread, bp, off_start,
3352 offp - off_start,
3353 last_fixup_obj,
3354 last_fixup_min_off);
3355 if (ret < 0) {
3356 return_error = BR_FAILED_REPLY;
3357 return_error_param = ret;
3358 return_error_line = __LINE__;
3359 goto err_translate_failed;
3360 }
3361 last_fixup_obj = bp;
3362 last_fixup_min_off = 0;
3363 } break;
3364 default:
3365 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3366 proc->pid, thread->pid, hdr->type);
3367 return_error = BR_FAILED_REPLY;
3368 return_error_param = -EINVAL;
3369 return_error_line = __LINE__;
3370 goto err_bad_object_type;
3371 }
3372 }
3373 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3374 t->work.type = BINDER_WORK_TRANSACTION;
3375
3376 if (reply) {
3377 binder_enqueue_thread_work(thread, tcomplete);
3378 binder_inner_proc_lock(target_proc);
3379 if (target_thread->is_dead) {
3380 binder_inner_proc_unlock(target_proc);
3381 goto err_dead_proc_or_thread;
3382 }
3383 BUG_ON(t->buffer->async_transaction != 0);
3384 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3385 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3386 binder_inner_proc_unlock(target_proc);
3387 wake_up_interruptible_sync(&target_thread->wait);
3388 binder_restore_priority(current, in_reply_to->saved_priority);
3389 binder_free_transaction(in_reply_to);
3390 } else if (!(t->flags & TF_ONE_WAY)) {
3391 BUG_ON(t->buffer->async_transaction != 0);
3392 binder_inner_proc_lock(proc);
3393 /*
3394 * Defer the TRANSACTION_COMPLETE, so we don't return to
3395 * userspace immediately; this allows the target process to
3396 * immediately start processing this transaction, reducing
3397 * latency. We will then return the TRANSACTION_COMPLETE when
3398 * the target replies (or there is an error).
3399 */
3400 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3401 t->need_reply = 1;
3402 t->from_parent = thread->transaction_stack;
3403 thread->transaction_stack = t;
3404 binder_inner_proc_unlock(proc);
3405 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3406 binder_inner_proc_lock(proc);
3407 binder_pop_transaction_ilocked(thread, t);
3408 binder_inner_proc_unlock(proc);
3409 goto err_dead_proc_or_thread;
3410 }
3411 } else {
3412 BUG_ON(target_node == NULL);
3413 BUG_ON(t->buffer->async_transaction != 1);
3414 binder_enqueue_thread_work(thread, tcomplete);
3415 if (!binder_proc_transaction(t, target_proc, NULL))
3416 goto err_dead_proc_or_thread;
3417 }
3418 if (target_thread)
3419 binder_thread_dec_tmpref(target_thread);
3420 binder_proc_dec_tmpref(target_proc);
3421 if (target_node)
3422 binder_dec_node_tmpref(target_node);
3423 /*
3424 * write barrier to synchronize with initialization
3425 * of log entry
3426 */
3427 smp_wmb();
3428 WRITE_ONCE(e->debug_id_done, t_debug_id);
3429 return;
3430
3431 err_dead_proc_or_thread:
3432 return_error = BR_DEAD_REPLY;
3433 return_error_line = __LINE__;
3434 binder_dequeue_work(proc, tcomplete);
3435 err_translate_failed:
3436 err_bad_object_type:
3437 err_bad_offset:
3438 err_bad_parent:
3439 err_copy_data_failed:
3440 trace_binder_transaction_failed_buffer_release(t->buffer);
3441 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3442 if (target_node)
3443 binder_dec_node_tmpref(target_node);
3444 target_node = NULL;
3445 t->buffer->transaction = NULL;
3446 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3447 err_binder_alloc_buf_failed:
3448 if (secctx)
3449 security_release_secctx(secctx, secctx_sz);
3450 err_get_secctx_failed:
3451 kfree(tcomplete);
3452 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3453 err_alloc_tcomplete_failed:
3454 kfree(t);
3455 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3456 err_alloc_t_failed:
3457 err_bad_call_stack:
3458 err_empty_call_stack:
3459 err_dead_binder:
3460 err_invalid_target_handle:
3461 if (target_thread)
3462 binder_thread_dec_tmpref(target_thread);
3463 if (target_proc)
3464 binder_proc_dec_tmpref(target_proc);
3465 if (target_node) {
3466 binder_dec_node(target_node, 1, 0);
3467 binder_dec_node_tmpref(target_node);
3468 }
3469
3470 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3471 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3472 proc->pid, thread->pid, return_error, return_error_param,
3473 (u64)tr->data_size, (u64)tr->offsets_size,
3474 return_error_line);
3475
3476 {
3477 struct binder_transaction_log_entry *fe;
3478
3479 e->return_error = return_error;
3480 e->return_error_param = return_error_param;
3481 e->return_error_line = return_error_line;
3482 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3483 *fe = *e;
3484 /*
3485 * write barrier to synchronize with initialization
3486 * of log entry
3487 */
3488 smp_wmb();
3489 WRITE_ONCE(e->debug_id_done, t_debug_id);
3490 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3491 }
3492
3493 BUG_ON(thread->return_error.cmd != BR_OK);
3494 if (in_reply_to) {
3495 binder_restore_priority(current, in_reply_to->saved_priority);
3496 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3497 binder_enqueue_thread_work(thread, &thread->return_error.work);
3498 binder_send_failed_reply(in_reply_to, return_error);
3499 } else {
3500 thread->return_error.cmd = return_error;
3501 binder_enqueue_thread_work(thread, &thread->return_error.work);
3502 }
3503 }
3504
3505 static int binder_thread_write(struct binder_proc *proc,
3506 struct binder_thread *thread,
3507 binder_uintptr_t binder_buffer, size_t size,
3508 binder_size_t *consumed)
3509 {
3510 uint32_t cmd;
3511 struct binder_context *context = proc->context;
3512 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3513 void __user *ptr = buffer + *consumed;
3514 void __user *end = buffer + size;
3515
3516 while (ptr < end && thread->return_error.cmd == BR_OK) {
3517 int ret;
3518
3519 if (get_user(cmd, (uint32_t __user *)ptr))
3520 return -EFAULT;
3521 ptr += sizeof(uint32_t);
3522 trace_binder_command(cmd);
3523 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3524 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3525 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3526 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3527 }
3528 switch (cmd) {
3529 case BC_INCREFS:
3530 case BC_ACQUIRE:
3531 case BC_RELEASE:
3532 case BC_DECREFS: {
3533 uint32_t target;
3534 const char *debug_string;
3535 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3536 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3537 struct binder_ref_data rdata;
3538
3539 if (get_user(target, (uint32_t __user *)ptr))
3540 return -EFAULT;
3541
3542 ptr += sizeof(uint32_t);
3543 ret = -1;
3544 if (increment && !target) {
3545 struct binder_node *ctx_mgr_node;
3546 mutex_lock(&context->context_mgr_node_lock);
3547 ctx_mgr_node = context->binder_context_mgr_node;
3548 if (ctx_mgr_node)
3549 ret = binder_inc_ref_for_node(
3550 proc, ctx_mgr_node,
3551 strong, NULL, &rdata);
3552 mutex_unlock(&context->context_mgr_node_lock);
3553 }
3554 if (ret)
3555 ret = binder_update_ref_for_handle(
3556 proc, target, increment, strong,
3557 &rdata);
3558 if (!ret && rdata.desc != target) {
3559 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3560 proc->pid, thread->pid,
3561 target, rdata.desc);
3562 }
3563 switch (cmd) {
3564 case BC_INCREFS:
3565 debug_string = "IncRefs";
3566 break;
3567 case BC_ACQUIRE:
3568 debug_string = "Acquire";
3569 break;
3570 case BC_RELEASE:
3571 debug_string = "Release";
3572 break;
3573 case BC_DECREFS:
3574 default:
3575 debug_string = "DecRefs";
3576 break;
3577 }
3578 if (ret) {
3579 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3580 proc->pid, thread->pid, debug_string,
3581 strong, target, ret);
3582 break;
3583 }
3584 binder_debug(BINDER_DEBUG_USER_REFS,
3585 "%d:%d %s ref %d desc %d s %d w %d\n",
3586 proc->pid, thread->pid, debug_string,
3587 rdata.debug_id, rdata.desc, rdata.strong,
3588 rdata.weak);
3589 break;
3590 }
3591 case BC_INCREFS_DONE:
3592 case BC_ACQUIRE_DONE: {
3593 binder_uintptr_t node_ptr;
3594 binder_uintptr_t cookie;
3595 struct binder_node *node;
3596 bool free_node;
3597
3598 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3599 return -EFAULT;
3600 ptr += sizeof(binder_uintptr_t);
3601 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3602 return -EFAULT;
3603 ptr += sizeof(binder_uintptr_t);
3604 node = binder_get_node(proc, node_ptr);
3605 if (node == NULL) {
3606 binder_user_error("%d:%d %s u%016llx no match\n",
3607 proc->pid, thread->pid,
3608 cmd == BC_INCREFS_DONE ?
3609 "BC_INCREFS_DONE" :
3610 "BC_ACQUIRE_DONE",
3611 (u64)node_ptr);
3612 break;
3613 }
3614 if (cookie != node->cookie) {
3615 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3616 proc->pid, thread->pid,
3617 cmd == BC_INCREFS_DONE ?
3618 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3619 (u64)node_ptr, node->debug_id,
3620 (u64)cookie, (u64)node->cookie);
3621 binder_put_node(node);
3622 break;
3623 }
3624 binder_node_inner_lock(node);
3625 if (cmd == BC_ACQUIRE_DONE) {
3626 if (node->pending_strong_ref == 0) {
3627 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3628 proc->pid, thread->pid,
3629 node->debug_id);
3630 binder_node_inner_unlock(node);
3631 binder_put_node(node);
3632 break;
3633 }
3634 node->pending_strong_ref = 0;
3635 } else {
3636 if (node->pending_weak_ref == 0) {
3637 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3638 proc->pid, thread->pid,
3639 node->debug_id);
3640 binder_node_inner_unlock(node);
3641 binder_put_node(node);
3642 break;
3643 }
3644 node->pending_weak_ref = 0;
3645 }
3646 free_node = binder_dec_node_nilocked(node,
3647 cmd == BC_ACQUIRE_DONE, 0);
3648 WARN_ON(free_node);
3649 binder_debug(BINDER_DEBUG_USER_REFS,
3650 "%d:%d %s node %d ls %d lw %d tr %d\n",
3651 proc->pid, thread->pid,
3652 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3653 node->debug_id, node->local_strong_refs,
3654 node->local_weak_refs, node->tmp_refs);
3655 binder_node_inner_unlock(node);
3656 binder_put_node(node);
3657 break;
3658 }
3659 case BC_ATTEMPT_ACQUIRE:
3660 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3661 return -EINVAL;
3662 case BC_ACQUIRE_RESULT:
3663 pr_err("BC_ACQUIRE_RESULT not supported\n");
3664 return -EINVAL;
3665
3666 case BC_FREE_BUFFER: {
3667 binder_uintptr_t data_ptr;
3668 struct binder_buffer *buffer;
3669
3670 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3671 return -EFAULT;
3672 ptr += sizeof(binder_uintptr_t);
3673
3674 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3675 data_ptr);
3676 if (IS_ERR_OR_NULL(buffer)) {
3677 if (PTR_ERR(buffer) == -EPERM) {
3678 binder_user_error(
3679 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3680 proc->pid, thread->pid,
3681 (u64)data_ptr);
3682 } else {
3683 binder_user_error(
3684 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3685 proc->pid, thread->pid,
3686 (u64)data_ptr);
3687 }
3688 break;
3689 }
3690 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3691 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3692 proc->pid, thread->pid, (u64)data_ptr,
3693 buffer->debug_id,
3694 buffer->transaction ? "active" : "finished");
3695
3696 if (buffer->transaction) {
3697 buffer->transaction->buffer = NULL;
3698 buffer->transaction = NULL;
3699 }
3700 if (buffer->async_transaction && buffer->target_node) {
3701 struct binder_node *buf_node;
3702 struct binder_work *w;
3703
3704 buf_node = buffer->target_node;
3705 binder_node_inner_lock(buf_node);
3706 BUG_ON(!buf_node->has_async_transaction);
3707 BUG_ON(buf_node->proc != proc);
3708 w = binder_dequeue_work_head_ilocked(
3709 &buf_node->async_todo);
3710 if (!w) {
3711 buf_node->has_async_transaction = 0;
3712 } else {
3713 binder_enqueue_work_ilocked(
3714 w, &proc->todo);
3715 binder_wakeup_proc_ilocked(proc);
3716 }
3717 binder_node_inner_unlock(buf_node);
3718 }
3719 trace_binder_transaction_buffer_release(buffer);
3720 binder_transaction_buffer_release(proc, buffer, NULL);
3721 binder_alloc_free_buf(&proc->alloc, buffer);
3722 break;
3723 }
3724
3725 case BC_TRANSACTION_SG:
3726 case BC_REPLY_SG: {
3727 struct binder_transaction_data_sg tr;
3728
3729 if (copy_from_user(&tr, ptr, sizeof(tr)))
3730 return -EFAULT;
3731 ptr += sizeof(tr);
3732 binder_transaction(proc, thread, &tr.transaction_data,
3733 cmd == BC_REPLY_SG, tr.buffers_size);
3734 break;
3735 }
3736 case BC_TRANSACTION:
3737 case BC_REPLY: {
3738 struct binder_transaction_data tr;
3739
3740 if (copy_from_user(&tr, ptr, sizeof(tr)))
3741 return -EFAULT;
3742 ptr += sizeof(tr);
3743 binder_transaction(proc, thread, &tr,
3744 cmd == BC_REPLY, 0);
3745 break;
3746 }
3747
3748 case BC_REGISTER_LOOPER:
3749 binder_debug(BINDER_DEBUG_THREADS,
3750 "%d:%d BC_REGISTER_LOOPER\n",
3751 proc->pid, thread->pid);
3752 binder_inner_proc_lock(proc);
3753 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3754 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3755 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3756 proc->pid, thread->pid);
3757 } else if (proc->requested_threads == 0) {
3758 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3759 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3760 proc->pid, thread->pid);
3761 } else {
3762 proc->requested_threads--;
3763 proc->requested_threads_started++;
3764 }
3765 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3766 binder_inner_proc_unlock(proc);
3767 break;
3768 case BC_ENTER_LOOPER:
3769 binder_debug(BINDER_DEBUG_THREADS,
3770 "%d:%d BC_ENTER_LOOPER\n",
3771 proc->pid, thread->pid);
3772 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3773 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3774 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3775 proc->pid, thread->pid);
3776 }
3777 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3778 break;
3779 case BC_EXIT_LOOPER:
3780 binder_debug(BINDER_DEBUG_THREADS,
3781 "%d:%d BC_EXIT_LOOPER\n",
3782 proc->pid, thread->pid);
3783 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3784 break;
3785
3786 case BC_REQUEST_DEATH_NOTIFICATION:
3787 case BC_CLEAR_DEATH_NOTIFICATION: {
3788 uint32_t target;
3789 binder_uintptr_t cookie;
3790 struct binder_ref *ref;
3791 struct binder_ref_death *death = NULL;
3792
3793 if (get_user(target, (uint32_t __user *)ptr))
3794 return -EFAULT;
3795 ptr += sizeof(uint32_t);
3796 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3797 return -EFAULT;
3798 ptr += sizeof(binder_uintptr_t);
3799 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3800 /*
3801 * Allocate memory for death notification
3802 * before taking lock
3803 */
3804 death = kzalloc(sizeof(*death), GFP_KERNEL);
3805 if (death == NULL) {
3806 WARN_ON(thread->return_error.cmd !=
3807 BR_OK);
3808 thread->return_error.cmd = BR_ERROR;
3809 binder_enqueue_thread_work(
3810 thread,
3811 &thread->return_error.work);
3812 binder_debug(
3813 BINDER_DEBUG_FAILED_TRANSACTION,
3814 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3815 proc->pid, thread->pid);
3816 break;
3817 }
3818 }
3819 binder_proc_lock(proc);
3820 ref = binder_get_ref_olocked(proc, target, false);
3821 if (ref == NULL) {
3822 binder_user_error("%d:%d %s invalid ref %d\n",
3823 proc->pid, thread->pid,
3824 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3825 "BC_REQUEST_DEATH_NOTIFICATION" :
3826 "BC_CLEAR_DEATH_NOTIFICATION",
3827 target);
3828 binder_proc_unlock(proc);
3829 kfree(death);
3830 break;
3831 }
3832
3833 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3834 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3835 proc->pid, thread->pid,
3836 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3837 "BC_REQUEST_DEATH_NOTIFICATION" :
3838 "BC_CLEAR_DEATH_NOTIFICATION",
3839 (u64)cookie, ref->data.debug_id,
3840 ref->data.desc, ref->data.strong,
3841 ref->data.weak, ref->node->debug_id);
3842
3843 binder_node_lock(ref->node);
3844 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3845 if (ref->death) {
3846 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3847 proc->pid, thread->pid);
3848 binder_node_unlock(ref->node);
3849 binder_proc_unlock(proc);
3850 kfree(death);
3851 break;
3852 }
3853 binder_stats_created(BINDER_STAT_DEATH);
3854 INIT_LIST_HEAD(&death->work.entry);
3855 death->cookie = cookie;
3856 ref->death = death;
3857 if (ref->node->proc == NULL) {
3858 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3859
3860 binder_inner_proc_lock(proc);
3861 binder_enqueue_work_ilocked(
3862 &ref->death->work, &proc->todo);
3863 binder_wakeup_proc_ilocked(proc);
3864 binder_inner_proc_unlock(proc);
3865 }
3866 } else {
3867 if (ref->death == NULL) {
3868 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3869 proc->pid, thread->pid);
3870 binder_node_unlock(ref->node);
3871 binder_proc_unlock(proc);
3872 break;
3873 }
3874 death = ref->death;
3875 if (death->cookie != cookie) {
3876 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3877 proc->pid, thread->pid,
3878 (u64)death->cookie,
3879 (u64)cookie);
3880 binder_node_unlock(ref->node);
3881 binder_proc_unlock(proc);
3882 break;
3883 }
3884 ref->death = NULL;
3885 binder_inner_proc_lock(proc);
3886 if (list_empty(&death->work.entry)) {
3887 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3888 if (thread->looper &
3889 (BINDER_LOOPER_STATE_REGISTERED |
3890 BINDER_LOOPER_STATE_ENTERED))
3891 binder_enqueue_thread_work_ilocked(
3892 thread,
3893 &death->work);
3894 else {
3895 binder_enqueue_work_ilocked(
3896 &death->work,
3897 &proc->todo);
3898 binder_wakeup_proc_ilocked(
3899 proc);
3900 }
3901 } else {
3902 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3903 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3904 }
3905 binder_inner_proc_unlock(proc);
3906 }
3907 binder_node_unlock(ref->node);
3908 binder_proc_unlock(proc);
3909 } break;
3910 case BC_DEAD_BINDER_DONE: {
3911 struct binder_work *w;
3912 binder_uintptr_t cookie;
3913 struct binder_ref_death *death = NULL;
3914
3915 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3916 return -EFAULT;
3917
3918 ptr += sizeof(cookie);
3919 binder_inner_proc_lock(proc);
3920 list_for_each_entry(w, &proc->delivered_death,
3921 entry) {
3922 struct binder_ref_death *tmp_death =
3923 container_of(w,
3924 struct binder_ref_death,
3925 work);
3926
3927 if (tmp_death->cookie == cookie) {
3928 death = tmp_death;
3929 break;
3930 }
3931 }
3932 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3933 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3934 proc->pid, thread->pid, (u64)cookie,
3935 death);
3936 if (death == NULL) {
3937 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3938 proc->pid, thread->pid, (u64)cookie);
3939 binder_inner_proc_unlock(proc);
3940 break;
3941 }
3942 binder_dequeue_work_ilocked(&death->work);
3943 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3944 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3945 if (thread->looper &
3946 (BINDER_LOOPER_STATE_REGISTERED |
3947 BINDER_LOOPER_STATE_ENTERED))
3948 binder_enqueue_thread_work_ilocked(
3949 thread, &death->work);
3950 else {
3951 binder_enqueue_work_ilocked(
3952 &death->work,
3953 &proc->todo);
3954 binder_wakeup_proc_ilocked(proc);
3955 }
3956 }
3957 binder_inner_proc_unlock(proc);
3958 } break;
3959
3960 default:
3961 pr_err("%d:%d unknown command %d\n",
3962 proc->pid, thread->pid, cmd);
3963 return -EINVAL;
3964 }
3965 *consumed = ptr - buffer;
3966 }
3967 return 0;
3968 }
3969
3970 static void binder_stat_br(struct binder_proc *proc,
3971 struct binder_thread *thread, uint32_t cmd)
3972 {
3973 trace_binder_return(cmd);
3974 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3975 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3976 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3977 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3978 }
3979 }
3980
3981 static int binder_put_node_cmd(struct binder_proc *proc,
3982 struct binder_thread *thread,
3983 void __user **ptrp,
3984 binder_uintptr_t node_ptr,
3985 binder_uintptr_t node_cookie,
3986 int node_debug_id,
3987 uint32_t cmd, const char *cmd_name)
3988 {
3989 void __user *ptr = *ptrp;
3990
3991 if (put_user(cmd, (uint32_t __user *)ptr))
3992 return -EFAULT;
3993 ptr += sizeof(uint32_t);
3994
3995 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3996 return -EFAULT;
3997 ptr += sizeof(binder_uintptr_t);
3998
3999 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4000 return -EFAULT;
4001 ptr += sizeof(binder_uintptr_t);
4002
4003 binder_stat_br(proc, thread, cmd);
4004 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4005 proc->pid, thread->pid, cmd_name, node_debug_id,
4006 (u64)node_ptr, (u64)node_cookie);
4007
4008 *ptrp = ptr;
4009 return 0;
4010 }
4011
4012 static int binder_wait_for_work(struct binder_thread *thread,
4013 bool do_proc_work)
4014 {
4015 DEFINE_WAIT(wait);
4016 struct binder_proc *proc = thread->proc;
4017 int ret = 0;
4018
4019 freezer_do_not_count();
4020 binder_inner_proc_lock(proc);
4021 for (;;) {
4022 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4023 if (binder_has_work_ilocked(thread, do_proc_work))
4024 break;
4025 if (do_proc_work)
4026 list_add(&thread->waiting_thread_node,
4027 &proc->waiting_threads);
4028 binder_inner_proc_unlock(proc);
4029 schedule();
4030 binder_inner_proc_lock(proc);
4031 list_del_init(&thread->waiting_thread_node);
4032 if (signal_pending(current)) {
4033 ret = -ERESTARTSYS;
4034 break;
4035 }
4036 }
4037 finish_wait(&thread->wait, &wait);
4038 binder_inner_proc_unlock(proc);
4039 freezer_count();
4040
4041 return ret;
4042 }
4043
4044 static int binder_thread_read(struct binder_proc *proc,
4045 struct binder_thread *thread,
4046 binder_uintptr_t binder_buffer, size_t size,
4047 binder_size_t *consumed, int non_block)
4048 {
4049 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4050 void __user *ptr = buffer + *consumed;
4051 void __user *end = buffer + size;
4052
4053 int ret = 0;
4054 int wait_for_proc_work;
4055
4056 if (*consumed == 0) {
4057 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4058 return -EFAULT;
4059 ptr += sizeof(uint32_t);
4060 }
4061
4062 retry:
4063 binder_inner_proc_lock(proc);
4064 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4065 binder_inner_proc_unlock(proc);
4066
4067 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4068
4069 trace_binder_wait_for_work(wait_for_proc_work,
4070 !!thread->transaction_stack,
4071 !binder_worklist_empty(proc, &thread->todo));
4072 if (wait_for_proc_work) {
4073 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4074 BINDER_LOOPER_STATE_ENTERED))) {
4075 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4076 proc->pid, thread->pid, thread->looper);
4077 wait_event_interruptible(binder_user_error_wait,
4078 binder_stop_on_user_error < 2);
4079 }
4080 binder_restore_priority(current, proc->default_priority);
4081 }
4082
4083 if (non_block) {
4084 if (!binder_has_work(thread, wait_for_proc_work))
4085 ret = -EAGAIN;
4086 } else {
4087 ret = binder_wait_for_work(thread, wait_for_proc_work);
4088 }
4089
4090 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4091
4092 if (ret)
4093 return ret;
4094
4095 while (1) {
4096 uint32_t cmd;
4097 struct binder_transaction_data_secctx tr;
4098 struct binder_transaction_data *trd = &tr.transaction_data;
4099
4100 struct binder_work *w = NULL;
4101 struct list_head *list = NULL;
4102 struct binder_transaction *t = NULL;
4103 struct binder_thread *t_from;
4104 size_t trsize = sizeof(*trd);
4105
4106 binder_inner_proc_lock(proc);
4107 if (!binder_worklist_empty_ilocked(&thread->todo))
4108 list = &thread->todo;
4109 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4110 wait_for_proc_work)
4111 list = &proc->todo;
4112 else {
4113 binder_inner_proc_unlock(proc);
4114
4115 /* no data added */
4116 if (ptr - buffer == 4 && !thread->looper_need_return)
4117 goto retry;
4118 break;
4119 }
4120
4121 if (end - ptr < sizeof(tr) + 4) {
4122 binder_inner_proc_unlock(proc);
4123 break;
4124 }
4125 w = binder_dequeue_work_head_ilocked(list);
4126 if (binder_worklist_empty_ilocked(&thread->todo))
4127 thread->process_todo = false;
4128
4129 switch (w->type) {
4130 case BINDER_WORK_TRANSACTION: {
4131 binder_inner_proc_unlock(proc);
4132 t = container_of(w, struct binder_transaction, work);
4133 } break;
4134 case BINDER_WORK_RETURN_ERROR: {
4135 struct binder_error *e = container_of(
4136 w, struct binder_error, work);
4137
4138 WARN_ON(e->cmd == BR_OK);
4139 binder_inner_proc_unlock(proc);
4140 if (put_user(e->cmd, (uint32_t __user *)ptr))
4141 return -EFAULT;
4142 e->cmd = BR_OK;
4143 ptr += sizeof(uint32_t);
4144
4145 binder_stat_br(proc, thread, cmd);
4146 } break;
4147 case BINDER_WORK_TRANSACTION_COMPLETE: {
4148 binder_inner_proc_unlock(proc);
4149 cmd = BR_TRANSACTION_COMPLETE;
4150 if (put_user(cmd, (uint32_t __user *)ptr))
4151 return -EFAULT;
4152 ptr += sizeof(uint32_t);
4153
4154 binder_stat_br(proc, thread, cmd);
4155 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4156 "%d:%d BR_TRANSACTION_COMPLETE\n",
4157 proc->pid, thread->pid);
4158 kfree(w);
4159 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4160 } break;
4161 case BINDER_WORK_NODE: {
4162 struct binder_node *node = container_of(w, struct binder_node, work);
4163 int strong, weak;
4164 binder_uintptr_t node_ptr = node->ptr;
4165 binder_uintptr_t node_cookie = node->cookie;
4166 int node_debug_id = node->debug_id;
4167 int has_weak_ref;
4168 int has_strong_ref;
4169 void __user *orig_ptr = ptr;
4170
4171 BUG_ON(proc != node->proc);
4172 strong = node->internal_strong_refs ||
4173 node->local_strong_refs;
4174 weak = !hlist_empty(&node->refs) ||
4175 node->local_weak_refs ||
4176 node->tmp_refs || strong;
4177 has_strong_ref = node->has_strong_ref;
4178 has_weak_ref = node->has_weak_ref;
4179
4180 if (weak && !has_weak_ref) {
4181 node->has_weak_ref = 1;
4182 node->pending_weak_ref = 1;
4183 node->local_weak_refs++;
4184 }
4185 if (strong && !has_strong_ref) {
4186 node->has_strong_ref = 1;
4187 node->pending_strong_ref = 1;
4188 node->local_strong_refs++;
4189 }
4190 if (!strong && has_strong_ref)
4191 node->has_strong_ref = 0;
4192 if (!weak && has_weak_ref)
4193 node->has_weak_ref = 0;
4194 if (!weak && !strong) {
4195 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4196 "%d:%d node %d u%016llx c%016llx deleted\n",
4197 proc->pid, thread->pid,
4198 node_debug_id,
4199 (u64)node_ptr,
4200 (u64)node_cookie);
4201 rb_erase(&node->rb_node, &proc->nodes);
4202 binder_inner_proc_unlock(proc);
4203 binder_node_lock(node);
4204 /*
4205 * Acquire the node lock before freeing the
4206 * node to serialize with other threads that
4207 * may have been holding the node lock while
4208 * decrementing this node (avoids race where
4209 * this thread frees while the other thread
4210 * is unlocking the node after the final
4211 * decrement)
4212 */
4213 binder_node_unlock(node);
4214 binder_free_node(node);
4215 } else
4216 binder_inner_proc_unlock(proc);
4217
4218 if (weak && !has_weak_ref)
4219 ret = binder_put_node_cmd(
4220 proc, thread, &ptr, node_ptr,
4221 node_cookie, node_debug_id,
4222 BR_INCREFS, "BR_INCREFS");
4223 if (!ret && strong && !has_strong_ref)
4224 ret = binder_put_node_cmd(
4225 proc, thread, &ptr, node_ptr,
4226 node_cookie, node_debug_id,
4227 BR_ACQUIRE, "BR_ACQUIRE");
4228 if (!ret && !strong && has_strong_ref)
4229 ret = binder_put_node_cmd(
4230 proc, thread, &ptr, node_ptr,
4231 node_cookie, node_debug_id,
4232 BR_RELEASE, "BR_RELEASE");
4233 if (!ret && !weak && has_weak_ref)
4234 ret = binder_put_node_cmd(
4235 proc, thread, &ptr, node_ptr,
4236 node_cookie, node_debug_id,
4237 BR_DECREFS, "BR_DECREFS");
4238 if (orig_ptr == ptr)
4239 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4240 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4241 proc->pid, thread->pid,
4242 node_debug_id,
4243 (u64)node_ptr,
4244 (u64)node_cookie);
4245 if (ret)
4246 return ret;
4247 } break;
4248 case BINDER_WORK_DEAD_BINDER:
4249 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4250 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4251 struct binder_ref_death *death;
4252 uint32_t cmd;
4253 binder_uintptr_t cookie;
4254
4255 death = container_of(w, struct binder_ref_death, work);
4256 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4257 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4258 else
4259 cmd = BR_DEAD_BINDER;
4260 cookie = death->cookie;
4261
4262 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4263 "%d:%d %s %016llx\n",
4264 proc->pid, thread->pid,
4265 cmd == BR_DEAD_BINDER ?
4266 "BR_DEAD_BINDER" :
4267 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4268 (u64)cookie);
4269 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4270 binder_inner_proc_unlock(proc);
4271 kfree(death);
4272 binder_stats_deleted(BINDER_STAT_DEATH);
4273 } else {
4274 binder_enqueue_work_ilocked(
4275 w, &proc->delivered_death);
4276 binder_inner_proc_unlock(proc);
4277 }
4278 if (put_user(cmd, (uint32_t __user *)ptr))
4279 return -EFAULT;
4280 ptr += sizeof(uint32_t);
4281 if (put_user(cookie,
4282 (binder_uintptr_t __user *)ptr))
4283 return -EFAULT;
4284 ptr += sizeof(binder_uintptr_t);
4285 binder_stat_br(proc, thread, cmd);
4286 if (cmd == BR_DEAD_BINDER)
4287 goto done; /* DEAD_BINDER notifications can cause transactions */
4288 } break;
4289 }
4290
4291 if (!t)
4292 continue;
4293
4294 BUG_ON(t->buffer == NULL);
4295 if (t->buffer->target_node) {
4296 struct binder_node *target_node = t->buffer->target_node;
4297 struct binder_priority node_prio;
4298
4299 trd->target.ptr = target_node->ptr;
4300 trd->cookie = target_node->cookie;
4301
4302 node_prio.sched_policy = target_node->sched_policy;
4303 node_prio.prio = target_node->min_priority;
4304 binder_transaction_priority(current, t, node_prio,
4305 target_node->inherit_rt);
4306 cmd = BR_TRANSACTION;
4307 } else {
4308 trd->target.ptr = 0;
4309 trd->cookie = 0;
4310 cmd = BR_REPLY;
4311 }
4312 trd->code = t->code;
4313 trd->flags = t->flags;
4314 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4315
4316 t_from = binder_get_txn_from(t);
4317 if (t_from) {
4318 struct task_struct *sender = t_from->proc->tsk;
4319
4320 trd->sender_pid = task_tgid_nr_ns(sender,
4321 task_active_pid_ns(current));
4322 } else {
4323 trd->sender_pid = 0;
4324 }
4325
4326 trd->data_size = t->buffer->data_size;
4327 trd->offsets_size = t->buffer->offsets_size;
4328 trd->data.ptr.buffer = (binder_uintptr_t)
4329 ((uintptr_t)t->buffer->data +
4330 binder_alloc_get_user_buffer_offset(&proc->alloc));
4331 trd->data.ptr.offsets = trd->data.ptr.buffer +
4332 ALIGN(t->buffer->data_size,
4333 sizeof(void *));
4334 tr.secctx = t->security_ctx;
4335 if (t->security_ctx) {
4336 cmd = BR_TRANSACTION_SEC_CTX;
4337 trsize = sizeof(tr);
4338 }
4339
4340
4341 if (put_user(cmd, (uint32_t __user *)ptr)) {
4342 if (t_from)
4343 binder_thread_dec_tmpref(t_from);
4344
4345 binder_cleanup_transaction(t, "put_user failed",
4346 BR_FAILED_REPLY);
4347
4348 return -EFAULT;
4349 }
4350 ptr += sizeof(uint32_t);
4351 if (copy_to_user(ptr, &tr, trsize)) {
4352 if (t_from)
4353 binder_thread_dec_tmpref(t_from);
4354
4355 binder_cleanup_transaction(t, "copy_to_user failed",
4356 BR_FAILED_REPLY);
4357
4358 return -EFAULT;
4359 }
4360 ptr += trsize;
4361
4362 trace_binder_transaction_received(t);
4363 binder_stat_br(proc, thread, cmd);
4364 binder_debug(BINDER_DEBUG_TRANSACTION,
4365 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4366 proc->pid, thread->pid,
4367 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4368 (cmd == BR_TRANSACTION_SEC_CTX) ?
4369 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4370 t->debug_id, t_from ? t_from->proc->pid : 0,
4371 t_from ? t_from->pid : 0, cmd,
4372 t->buffer->data_size, t->buffer->offsets_size,
4373 (u64)trd->data.ptr.buffer,
4374 (u64)trd->data.ptr.offsets);
4375
4376 if (t_from)
4377 binder_thread_dec_tmpref(t_from);
4378 t->buffer->allow_user_free = 1;
4379 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4380 binder_inner_proc_lock(thread->proc);
4381 t->to_parent = thread->transaction_stack;
4382 t->to_thread = thread;
4383 thread->transaction_stack = t;
4384 binder_inner_proc_unlock(thread->proc);
4385 } else {
4386 binder_free_transaction(t);
4387 }
4388 break;
4389 }
4390
4391 done:
4392
4393 *consumed = ptr - buffer;
4394 binder_inner_proc_lock(proc);
4395 if (proc->requested_threads == 0 &&
4396 list_empty(&thread->proc->waiting_threads) &&
4397 proc->requested_threads_started < proc->max_threads &&
4398 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4399 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4400 /*spawn a new thread if we leave this out */) {
4401 proc->requested_threads++;
4402 binder_inner_proc_unlock(proc);
4403 binder_debug(BINDER_DEBUG_THREADS,
4404 "%d:%d BR_SPAWN_LOOPER\n",
4405 proc->pid, thread->pid);
4406 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4407 return -EFAULT;
4408 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4409 } else
4410 binder_inner_proc_unlock(proc);
4411 return 0;
4412 }
4413
4414 static void binder_release_work(struct binder_proc *proc,
4415 struct list_head *list)
4416 {
4417 struct binder_work *w;
4418
4419 while (1) {
4420 w = binder_dequeue_work_head(proc, list);
4421 if (!w)
4422 return;
4423
4424 switch (w->type) {
4425 case BINDER_WORK_TRANSACTION: {
4426 struct binder_transaction *t;
4427
4428 t = container_of(w, struct binder_transaction, work);
4429
4430 binder_cleanup_transaction(t, "process died.",
4431 BR_DEAD_REPLY);
4432 } break;
4433 case BINDER_WORK_RETURN_ERROR: {
4434 struct binder_error *e = container_of(
4435 w, struct binder_error, work);
4436
4437 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4438 "undelivered TRANSACTION_ERROR: %u\n",
4439 e->cmd);
4440 } break;
4441 case BINDER_WORK_TRANSACTION_COMPLETE: {
4442 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4443 "undelivered TRANSACTION_COMPLETE\n");
4444 kfree(w);
4445 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4446 } break;
4447 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4448 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4449 struct binder_ref_death *death;
4450
4451 death = container_of(w, struct binder_ref_death, work);
4452 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4453 "undelivered death notification, %016llx\n",
4454 (u64)death->cookie);
4455 kfree(death);
4456 binder_stats_deleted(BINDER_STAT_DEATH);
4457 } break;
4458 default:
4459 pr_err("unexpected work type, %d, not freed\n",
4460 w->type);
4461 break;
4462 }
4463 }
4464
4465 }
4466
4467 static struct binder_thread *binder_get_thread_ilocked(
4468 struct binder_proc *proc, struct binder_thread *new_thread)
4469 {
4470 struct binder_thread *thread = NULL;
4471 struct rb_node *parent = NULL;
4472 struct rb_node **p = &proc->threads.rb_node;
4473
4474 while (*p) {
4475 parent = *p;
4476 thread = rb_entry(parent, struct binder_thread, rb_node);
4477
4478 if (current->pid < thread->pid)
4479 p = &(*p)->rb_left;
4480 else if (current->pid > thread->pid)
4481 p = &(*p)->rb_right;
4482 else
4483 return thread;
4484 }
4485 if (!new_thread)
4486 return NULL;
4487 thread = new_thread;
4488 binder_stats_created(BINDER_STAT_THREAD);
4489 thread->proc = proc;
4490 thread->pid = current->pid;
4491 get_task_struct(current);
4492 thread->task = current;
4493 atomic_set(&thread->tmp_ref, 0);
4494 init_waitqueue_head(&thread->wait);
4495 INIT_LIST_HEAD(&thread->todo);
4496 rb_link_node(&thread->rb_node, parent, p);
4497 rb_insert_color(&thread->rb_node, &proc->threads);
4498 thread->looper_need_return = true;
4499 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4500 thread->return_error.cmd = BR_OK;
4501 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4502 thread->reply_error.cmd = BR_OK;
4503 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4504 return thread;
4505 }
4506
4507 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4508 {
4509 struct binder_thread *thread;
4510 struct binder_thread *new_thread;
4511
4512 binder_inner_proc_lock(proc);
4513 thread = binder_get_thread_ilocked(proc, NULL);
4514 binder_inner_proc_unlock(proc);
4515 if (!thread) {
4516 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4517 if (new_thread == NULL)
4518 return NULL;
4519 binder_inner_proc_lock(proc);
4520 thread = binder_get_thread_ilocked(proc, new_thread);
4521 binder_inner_proc_unlock(proc);
4522 if (thread != new_thread)
4523 kfree(new_thread);
4524 }
4525 return thread;
4526 }
4527
4528 static void binder_free_proc(struct binder_proc *proc)
4529 {
4530 BUG_ON(!list_empty(&proc->todo));
4531 BUG_ON(!list_empty(&proc->delivered_death));
4532 binder_alloc_deferred_release(&proc->alloc);
4533 put_task_struct(proc->tsk);
4534 binder_stats_deleted(BINDER_STAT_PROC);
4535 kfree(proc);
4536 }
4537
4538 static void binder_free_thread(struct binder_thread *thread)
4539 {
4540 BUG_ON(!list_empty(&thread->todo));
4541 binder_stats_deleted(BINDER_STAT_THREAD);
4542 binder_proc_dec_tmpref(thread->proc);
4543 put_task_struct(thread->task);
4544 kfree(thread);
4545 }
4546
4547 static int binder_thread_release(struct binder_proc *proc,
4548 struct binder_thread *thread)
4549 {
4550 struct binder_transaction *t;
4551 struct binder_transaction *send_reply = NULL;
4552 int active_transactions = 0;
4553 struct binder_transaction *last_t = NULL;
4554
4555 binder_inner_proc_lock(thread->proc);
4556 /*
4557 * take a ref on the proc so it survives
4558 * after we remove this thread from proc->threads.
4559 * The corresponding dec is when we actually
4560 * free the thread in binder_free_thread()
4561 */
4562 proc->tmp_ref++;
4563 /*
4564 * take a ref on this thread to ensure it
4565 * survives while we are releasing it
4566 */
4567 atomic_inc(&thread->tmp_ref);
4568 rb_erase(&thread->rb_node, &proc->threads);
4569 t = thread->transaction_stack;
4570 if (t) {
4571 spin_lock(&t->lock);
4572 if (t->to_thread == thread)
4573 send_reply = t;
4574 }
4575 thread->is_dead = true;
4576
4577 while (t) {
4578 last_t = t;
4579 active_transactions++;
4580 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4581 "release %d:%d transaction %d %s, still active\n",
4582 proc->pid, thread->pid,
4583 t->debug_id,
4584 (t->to_thread == thread) ? "in" : "out");
4585
4586 if (t->to_thread == thread) {
4587 t->to_proc = NULL;
4588 t->to_thread = NULL;
4589 if (t->buffer) {
4590 t->buffer->transaction = NULL;
4591 t->buffer = NULL;
4592 }
4593 t = t->to_parent;
4594 } else if (t->from == thread) {
4595 t->from = NULL;
4596 t = t->from_parent;
4597 } else
4598 BUG();
4599 spin_unlock(&last_t->lock);
4600 if (t)
4601 spin_lock(&t->lock);
4602 }
4603 binder_inner_proc_unlock(thread->proc);
4604
4605 if (send_reply)
4606 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4607 binder_release_work(proc, &thread->todo);
4608 binder_thread_dec_tmpref(thread);
4609 return active_transactions;
4610 }
4611
4612 static unsigned int binder_poll(struct file *filp,
4613 struct poll_table_struct *wait)
4614 {
4615 struct binder_proc *proc = filp->private_data;
4616 struct binder_thread *thread = NULL;
4617 bool wait_for_proc_work;
4618
4619 thread = binder_get_thread(proc);
4620
4621 binder_inner_proc_lock(thread->proc);
4622 thread->looper |= BINDER_LOOPER_STATE_POLL;
4623 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4624
4625 binder_inner_proc_unlock(thread->proc);
4626
4627 poll_wait(filp, &thread->wait, wait);
4628
4629 if (binder_has_work(thread, wait_for_proc_work))
4630 return POLLIN;
4631
4632 return 0;
4633 }
4634
4635 static int binder_ioctl_write_read(struct file *filp,
4636 unsigned int cmd, unsigned long arg,
4637 struct binder_thread *thread)
4638 {
4639 int ret = 0;
4640 struct binder_proc *proc = filp->private_data;
4641 unsigned int size = _IOC_SIZE(cmd);
4642 void __user *ubuf = (void __user *)arg;
4643 struct binder_write_read bwr;
4644
4645 if (size != sizeof(struct binder_write_read)) {
4646 ret = -EINVAL;
4647 goto out;
4648 }
4649 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4650 ret = -EFAULT;
4651 goto out;
4652 }
4653 binder_debug(BINDER_DEBUG_READ_WRITE,
4654 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4655 proc->pid, thread->pid,
4656 (u64)bwr.write_size, (u64)bwr.write_buffer,
4657 (u64)bwr.read_size, (u64)bwr.read_buffer);
4658
4659 if (bwr.write_size > 0) {
4660 ret = binder_thread_write(proc, thread,
4661 bwr.write_buffer,
4662 bwr.write_size,
4663 &bwr.write_consumed);
4664 trace_binder_write_done(ret);
4665 if (ret < 0) {
4666 bwr.read_consumed = 0;
4667 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4668 ret = -EFAULT;
4669 goto out;
4670 }
4671 }
4672 if (bwr.read_size > 0) {
4673 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4674 bwr.read_size,
4675 &bwr.read_consumed,
4676 filp->f_flags & O_NONBLOCK);
4677 trace_binder_read_done(ret);
4678 binder_inner_proc_lock(proc);
4679 if (!binder_worklist_empty_ilocked(&proc->todo))
4680 binder_wakeup_proc_ilocked(proc);
4681 binder_inner_proc_unlock(proc);
4682 if (ret < 0) {
4683 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4684 ret = -EFAULT;
4685 goto out;
4686 }
4687 }
4688 binder_debug(BINDER_DEBUG_READ_WRITE,
4689 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4690 proc->pid, thread->pid,
4691 (u64)bwr.write_consumed, (u64)bwr.write_size,
4692 (u64)bwr.read_consumed, (u64)bwr.read_size);
4693 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4694 ret = -EFAULT;
4695 goto out;
4696 }
4697 out:
4698 return ret;
4699 }
4700
4701 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4702 struct flat_binder_object *fbo)
4703 {
4704 int ret = 0;
4705 struct binder_proc *proc = filp->private_data;
4706 struct binder_context *context = proc->context;
4707 struct binder_node *new_node;
4708 kuid_t curr_euid = current_euid();
4709
4710 mutex_lock(&context->context_mgr_node_lock);
4711 if (context->binder_context_mgr_node) {
4712 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4713 ret = -EBUSY;
4714 goto out;
4715 }
4716 ret = security_binder_set_context_mgr(proc->tsk);
4717 if (ret < 0)
4718 goto out;
4719 if (uid_valid(context->binder_context_mgr_uid)) {
4720 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4721 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4722 from_kuid(&init_user_ns, curr_euid),
4723 from_kuid(&init_user_ns,
4724 context->binder_context_mgr_uid));
4725 ret = -EPERM;
4726 goto out;
4727 }
4728 } else {
4729 context->binder_context_mgr_uid = curr_euid;
4730 }
4731 new_node = binder_new_node(proc, fbo);
4732 if (!new_node) {
4733 ret = -ENOMEM;
4734 goto out;
4735 }
4736 binder_node_lock(new_node);
4737 new_node->local_weak_refs++;
4738 new_node->local_strong_refs++;
4739 new_node->has_strong_ref = 1;
4740 new_node->has_weak_ref = 1;
4741 context->binder_context_mgr_node = new_node;
4742 binder_node_unlock(new_node);
4743 binder_put_node(new_node);
4744 out:
4745 mutex_unlock(&context->context_mgr_node_lock);
4746 return ret;
4747 }
4748
4749 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4750 struct binder_node_debug_info *info)
4751 {
4752 struct rb_node *n;
4753 binder_uintptr_t ptr = info->ptr;
4754
4755 memset(info, 0, sizeof(*info));
4756
4757 binder_inner_proc_lock(proc);
4758 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4759 struct binder_node *node = rb_entry(n, struct binder_node,
4760 rb_node);
4761 if (node->ptr > ptr) {
4762 info->ptr = node->ptr;
4763 info->cookie = node->cookie;
4764 info->has_strong_ref = node->has_strong_ref;
4765 info->has_weak_ref = node->has_weak_ref;
4766 break;
4767 }
4768 }
4769 binder_inner_proc_unlock(proc);
4770
4771 return 0;
4772 }
4773
4774 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4775 {
4776 int ret;
4777 struct binder_proc *proc = filp->private_data;
4778 struct binder_thread *thread;
4779 unsigned int size = _IOC_SIZE(cmd);
4780 void __user *ubuf = (void __user *)arg;
4781
4782 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4783 proc->pid, current->pid, cmd, arg);*/
4784
4785 binder_selftest_alloc(&proc->alloc);
4786
4787 trace_binder_ioctl(cmd, arg);
4788
4789 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4790 if (ret)
4791 goto err_unlocked;
4792
4793 thread = binder_get_thread(proc);
4794 if (thread == NULL) {
4795 ret = -ENOMEM;
4796 goto err;
4797 }
4798
4799 switch (cmd) {
4800 case BINDER_WRITE_READ:
4801 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4802 if (ret)
4803 goto err;
4804 break;
4805 case BINDER_SET_MAX_THREADS: {
4806 int max_threads;
4807
4808 if (copy_from_user(&max_threads, ubuf,
4809 sizeof(max_threads))) {
4810 ret = -EINVAL;
4811 goto err;
4812 }
4813 binder_inner_proc_lock(proc);
4814 proc->max_threads = max_threads;
4815 binder_inner_proc_unlock(proc);
4816 break;
4817 }
4818 case BINDER_SET_CONTEXT_MGR_EXT: {
4819 struct flat_binder_object fbo;
4820
4821 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4822 ret = -EINVAL;
4823 goto err;
4824 }
4825 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4826 if (ret)
4827 goto err;
4828 break;
4829 }
4830
4831 case BINDER_SET_CONTEXT_MGR:
4832 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4833 if (ret)
4834 goto err;
4835 break;
4836 case BINDER_THREAD_EXIT:
4837 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4838 proc->pid, thread->pid);
4839 binder_thread_release(proc, thread);
4840 thread = NULL;
4841 break;
4842 case BINDER_VERSION: {
4843 struct binder_version __user *ver = ubuf;
4844
4845 if (size != sizeof(struct binder_version)) {
4846 ret = -EINVAL;
4847 goto err;
4848 }
4849 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4850 &ver->protocol_version)) {
4851 ret = -EINVAL;
4852 goto err;
4853 }
4854 break;
4855 }
4856 case BINDER_GET_NODE_DEBUG_INFO: {
4857 struct binder_node_debug_info info;
4858
4859 if (copy_from_user(&info, ubuf, sizeof(info))) {
4860 ret = -EFAULT;
4861 goto err;
4862 }
4863
4864 ret = binder_ioctl_get_node_debug_info(proc, &info);
4865 if (ret < 0)
4866 goto err;
4867
4868 if (copy_to_user(ubuf, &info, sizeof(info))) {
4869 ret = -EFAULT;
4870 goto err;
4871 }
4872 break;
4873 }
4874 default:
4875 ret = -EINVAL;
4876 goto err;
4877 }
4878 ret = 0;
4879 err:
4880 if (thread)
4881 thread->looper_need_return = false;
4882 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4883 if (ret && ret != -ERESTARTSYS)
4884 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4885 err_unlocked:
4886 trace_binder_ioctl_done(ret);
4887 return ret;
4888 }
4889
4890 static void binder_vma_open(struct vm_area_struct *vma)
4891 {
4892 struct binder_proc *proc = vma->vm_private_data;
4893
4894 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4895 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4896 proc->pid, vma->vm_start, vma->vm_end,
4897 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4898 (unsigned long)pgprot_val(vma->vm_page_prot));
4899 }
4900
4901 static void binder_vma_close(struct vm_area_struct *vma)
4902 {
4903 struct binder_proc *proc = vma->vm_private_data;
4904
4905 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4906 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4907 proc->pid, vma->vm_start, vma->vm_end,
4908 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4909 (unsigned long)pgprot_val(vma->vm_page_prot));
4910 binder_alloc_vma_close(&proc->alloc);
4911 }
4912
4913 static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4914 {
4915 return VM_FAULT_SIGBUS;
4916 }
4917
4918 static const struct vm_operations_struct binder_vm_ops = {
4919 .open = binder_vma_open,
4920 .close = binder_vma_close,
4921 .fault = binder_vm_fault,
4922 };
4923
4924 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4925 {
4926 int ret;
4927 struct binder_proc *proc = filp->private_data;
4928 const char *failure_string;
4929
4930 if (proc->tsk != current->group_leader)
4931 return -EINVAL;
4932
4933 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4934 vma->vm_end = vma->vm_start + SZ_4M;
4935
4936 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4937 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4938 __func__, proc->pid, vma->vm_start, vma->vm_end,
4939 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4940 (unsigned long)pgprot_val(vma->vm_page_prot));
4941
4942 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4943 ret = -EPERM;
4944 failure_string = "bad vm_flags";
4945 goto err_bad_arg;
4946 }
4947 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4948 vma->vm_ops = &binder_vm_ops;
4949 vma->vm_private_data = proc;
4950
4951 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4952
4953 return ret;
4954
4955 err_bad_arg:
4956 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4957 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4958 return ret;
4959 }
4960
4961 static int binder_open(struct inode *nodp, struct file *filp)
4962 {
4963 struct binder_proc *proc;
4964 struct binder_device *binder_dev;
4965
4966 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4967 current->group_leader->pid, current->pid);
4968
4969 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4970 if (proc == NULL)
4971 return -ENOMEM;
4972 spin_lock_init(&proc->inner_lock);
4973 spin_lock_init(&proc->outer_lock);
4974 get_task_struct(current->group_leader);
4975 proc->tsk = current->group_leader;
4976 INIT_LIST_HEAD(&proc->todo);
4977 if (binder_supported_policy(current->policy)) {
4978 proc->default_priority.sched_policy = current->policy;
4979 proc->default_priority.prio = current->normal_prio;
4980 } else {
4981 proc->default_priority.sched_policy = SCHED_NORMAL;
4982 proc->default_priority.prio = NICE_TO_PRIO(0);
4983 }
4984
4985 binder_dev = container_of(filp->private_data, struct binder_device,
4986 miscdev);
4987 proc->context = &binder_dev->context;
4988 binder_alloc_init(&proc->alloc);
4989
4990 binder_stats_created(BINDER_STAT_PROC);
4991 proc->pid = current->group_leader->pid;
4992 INIT_LIST_HEAD(&proc->delivered_death);
4993 INIT_LIST_HEAD(&proc->waiting_threads);
4994 filp->private_data = proc;
4995
4996 mutex_lock(&binder_procs_lock);
4997 hlist_add_head(&proc->proc_node, &binder_procs);
4998 mutex_unlock(&binder_procs_lock);
4999
5000 if (binder_debugfs_dir_entry_proc) {
5001 char strbuf[11];
5002
5003 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5004 /*
5005 * proc debug entries are shared between contexts, so
5006 * this will fail if the process tries to open the driver
5007 * again with a different context. The priting code will
5008 * anyway print all contexts that a given PID has, so this
5009 * is not a problem.
5010 */
5011 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
5012 binder_debugfs_dir_entry_proc,
5013 (void *)(unsigned long)proc->pid,
5014 &binder_proc_fops);
5015 }
5016
5017 return 0;
5018 }
5019
5020 static int binder_flush(struct file *filp, fl_owner_t id)
5021 {
5022 struct binder_proc *proc = filp->private_data;
5023
5024 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5025
5026 return 0;
5027 }
5028
5029 static void binder_deferred_flush(struct binder_proc *proc)
5030 {
5031 struct rb_node *n;
5032 int wake_count = 0;
5033
5034 binder_inner_proc_lock(proc);
5035 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5036 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5037
5038 thread->looper_need_return = true;
5039 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5040 wake_up_interruptible(&thread->wait);
5041 wake_count++;
5042 }
5043 }
5044 binder_inner_proc_unlock(proc);
5045
5046 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5047 "binder_flush: %d woke %d threads\n", proc->pid,
5048 wake_count);
5049 }
5050
5051 static int binder_release(struct inode *nodp, struct file *filp)
5052 {
5053 struct binder_proc *proc = filp->private_data;
5054
5055 debugfs_remove(proc->debugfs_entry);
5056 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5057
5058 return 0;
5059 }
5060
5061 static int binder_node_release(struct binder_node *node, int refs)
5062 {
5063 struct binder_ref *ref;
5064 int death = 0;
5065 struct binder_proc *proc = node->proc;
5066
5067 binder_release_work(proc, &node->async_todo);
5068
5069 binder_node_lock(node);
5070 binder_inner_proc_lock(proc);
5071 binder_dequeue_work_ilocked(&node->work);
5072 /*
5073 * The caller must have taken a temporary ref on the node,
5074 */
5075 BUG_ON(!node->tmp_refs);
5076 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5077 binder_inner_proc_unlock(proc);
5078 binder_node_unlock(node);
5079 binder_free_node(node);
5080
5081 return refs;
5082 }
5083
5084 node->proc = NULL;
5085 node->local_strong_refs = 0;
5086 node->local_weak_refs = 0;
5087 binder_inner_proc_unlock(proc);
5088
5089 spin_lock(&binder_dead_nodes_lock);
5090 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5091 spin_unlock(&binder_dead_nodes_lock);
5092
5093 hlist_for_each_entry(ref, &node->refs, node_entry) {
5094 refs++;
5095 /*
5096 * Need the node lock to synchronize
5097 * with new notification requests and the
5098 * inner lock to synchronize with queued
5099 * death notifications.
5100 */
5101 binder_inner_proc_lock(ref->proc);
5102 if (!ref->death) {
5103 binder_inner_proc_unlock(ref->proc);
5104 continue;
5105 }
5106
5107 death++;
5108
5109 BUG_ON(!list_empty(&ref->death->work.entry));
5110 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5111 binder_enqueue_work_ilocked(&ref->death->work,
5112 &ref->proc->todo);
5113 binder_wakeup_proc_ilocked(ref->proc);
5114 binder_inner_proc_unlock(ref->proc);
5115 }
5116
5117 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5118 "node %d now dead, refs %d, death %d\n",
5119 node->debug_id, refs, death);
5120 binder_node_unlock(node);
5121 binder_put_node(node);
5122
5123 return refs;
5124 }
5125
5126 static void binder_deferred_release(struct binder_proc *proc)
5127 {
5128 struct binder_context *context = proc->context;
5129 struct rb_node *n;
5130 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5131
5132 mutex_lock(&binder_procs_lock);
5133 hlist_del(&proc->proc_node);
5134 mutex_unlock(&binder_procs_lock);
5135
5136 mutex_lock(&context->context_mgr_node_lock);
5137 if (context->binder_context_mgr_node &&
5138 context->binder_context_mgr_node->proc == proc) {
5139 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5140 "%s: %d context_mgr_node gone\n",
5141 __func__, proc->pid);
5142 context->binder_context_mgr_node = NULL;
5143 }
5144 mutex_unlock(&context->context_mgr_node_lock);
5145 binder_inner_proc_lock(proc);
5146 /*
5147 * Make sure proc stays alive after we
5148 * remove all the threads
5149 */
5150 proc->tmp_ref++;
5151
5152 proc->is_dead = true;
5153 threads = 0;
5154 active_transactions = 0;
5155 while ((n = rb_first(&proc->threads))) {
5156 struct binder_thread *thread;
5157
5158 thread = rb_entry(n, struct binder_thread, rb_node);
5159 binder_inner_proc_unlock(proc);
5160 threads++;
5161 active_transactions += binder_thread_release(proc, thread);
5162 binder_inner_proc_lock(proc);
5163 }
5164
5165 nodes = 0;
5166 incoming_refs = 0;
5167 while ((n = rb_first(&proc->nodes))) {
5168 struct binder_node *node;
5169
5170 node = rb_entry(n, struct binder_node, rb_node);
5171 nodes++;
5172 /*
5173 * take a temporary ref on the node before
5174 * calling binder_node_release() which will either
5175 * kfree() the node or call binder_put_node()
5176 */
5177 binder_inc_node_tmpref_ilocked(node);
5178 rb_erase(&node->rb_node, &proc->nodes);
5179 binder_inner_proc_unlock(proc);
5180 incoming_refs = binder_node_release(node, incoming_refs);
5181 binder_inner_proc_lock(proc);
5182 }
5183 binder_inner_proc_unlock(proc);
5184
5185 outgoing_refs = 0;
5186 binder_proc_lock(proc);
5187 while ((n = rb_first(&proc->refs_by_desc))) {
5188 struct binder_ref *ref;
5189
5190 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5191 outgoing_refs++;
5192 binder_cleanup_ref_olocked(ref);
5193 binder_proc_unlock(proc);
5194 binder_free_ref(ref);
5195 binder_proc_lock(proc);
5196 }
5197 binder_proc_unlock(proc);
5198
5199 binder_release_work(proc, &proc->todo);
5200 binder_release_work(proc, &proc->delivered_death);
5201
5202 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5203 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5204 __func__, proc->pid, threads, nodes, incoming_refs,
5205 outgoing_refs, active_transactions);
5206
5207 binder_proc_dec_tmpref(proc);
5208 }
5209
5210 static void binder_deferred_func(struct work_struct *work)
5211 {
5212 struct binder_proc *proc;
5213 int defer;
5214
5215 do {
5216 mutex_lock(&binder_deferred_lock);
5217 if (!hlist_empty(&binder_deferred_list)) {
5218 proc = hlist_entry(binder_deferred_list.first,
5219 struct binder_proc, deferred_work_node);
5220 hlist_del_init(&proc->deferred_work_node);
5221 defer = proc->deferred_work;
5222 proc->deferred_work = 0;
5223 } else {
5224 proc = NULL;
5225 defer = 0;
5226 }
5227 mutex_unlock(&binder_deferred_lock);
5228
5229 if (defer & BINDER_DEFERRED_FLUSH)
5230 binder_deferred_flush(proc);
5231
5232 if (defer & BINDER_DEFERRED_RELEASE)
5233 binder_deferred_release(proc); /* frees proc */
5234 } while (proc);
5235 }
5236 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5237
5238 static void
5239 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5240 {
5241 mutex_lock(&binder_deferred_lock);
5242 proc->deferred_work |= defer;
5243 if (hlist_unhashed(&proc->deferred_work_node)) {
5244 hlist_add_head(&proc->deferred_work_node,
5245 &binder_deferred_list);
5246 schedule_work(&binder_deferred_work);
5247 }
5248 mutex_unlock(&binder_deferred_lock);
5249 }
5250
5251 static void print_binder_transaction_ilocked(struct seq_file *m,
5252 struct binder_proc *proc,
5253 const char *prefix,
5254 struct binder_transaction *t)
5255 {
5256 struct binder_proc *to_proc;
5257 struct binder_buffer *buffer = t->buffer;
5258
5259 spin_lock(&t->lock);
5260 to_proc = t->to_proc;
5261 seq_printf(m,
5262 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5263 prefix, t->debug_id, t,
5264 t->from ? t->from->proc->pid : 0,
5265 t->from ? t->from->pid : 0,
5266 to_proc ? to_proc->pid : 0,
5267 t->to_thread ? t->to_thread->pid : 0,
5268 t->code, t->flags, t->priority.sched_policy,
5269 t->priority.prio, t->need_reply);
5270 spin_unlock(&t->lock);
5271
5272 if (proc != to_proc) {
5273 /*
5274 * Can only safely deref buffer if we are holding the
5275 * correct proc inner lock for this node
5276 */
5277 seq_puts(m, "\n");
5278 return;
5279 }
5280
5281 if (buffer == NULL) {
5282 seq_puts(m, " buffer free\n");
5283 return;
5284 }
5285 if (buffer->target_node)
5286 seq_printf(m, " node %d", buffer->target_node->debug_id);
5287 seq_printf(m, " size %zd:%zd data %p\n",
5288 buffer->data_size, buffer->offsets_size,
5289 buffer->data);
5290 }
5291
5292 static void print_binder_work_ilocked(struct seq_file *m,
5293 struct binder_proc *proc,
5294 const char *prefix,
5295 const char *transaction_prefix,
5296 struct binder_work *w)
5297 {
5298 struct binder_node *node;
5299 struct binder_transaction *t;
5300
5301 switch (w->type) {
5302 case BINDER_WORK_TRANSACTION:
5303 t = container_of(w, struct binder_transaction, work);
5304 print_binder_transaction_ilocked(
5305 m, proc, transaction_prefix, t);
5306 break;
5307 case BINDER_WORK_RETURN_ERROR: {
5308 struct binder_error *e = container_of(
5309 w, struct binder_error, work);
5310
5311 seq_printf(m, "%stransaction error: %u\n",
5312 prefix, e->cmd);
5313 } break;
5314 case BINDER_WORK_TRANSACTION_COMPLETE:
5315 seq_printf(m, "%stransaction complete\n", prefix);
5316 break;
5317 case BINDER_WORK_NODE:
5318 node = container_of(w, struct binder_node, work);
5319 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5320 prefix, node->debug_id,
5321 (u64)node->ptr, (u64)node->cookie);
5322 break;
5323 case BINDER_WORK_DEAD_BINDER:
5324 seq_printf(m, "%shas dead binder\n", prefix);
5325 break;
5326 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5327 seq_printf(m, "%shas cleared dead binder\n", prefix);
5328 break;
5329 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5330 seq_printf(m, "%shas cleared death notification\n", prefix);
5331 break;
5332 default:
5333 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5334 break;
5335 }
5336 }
5337
5338 static void print_binder_thread_ilocked(struct seq_file *m,
5339 struct binder_thread *thread,
5340 int print_always)
5341 {
5342 struct binder_transaction *t;
5343 struct binder_work *w;
5344 size_t start_pos = m->count;
5345 size_t header_pos;
5346
5347 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5348 thread->pid, thread->looper,
5349 thread->looper_need_return,
5350 atomic_read(&thread->tmp_ref));
5351 header_pos = m->count;
5352 t = thread->transaction_stack;
5353 while (t) {
5354 if (t->from == thread) {
5355 print_binder_transaction_ilocked(m, thread->proc,
5356 " outgoing transaction", t);
5357 t = t->from_parent;
5358 } else if (t->to_thread == thread) {
5359 print_binder_transaction_ilocked(m, thread->proc,
5360 " incoming transaction", t);
5361 t = t->to_parent;
5362 } else {
5363 print_binder_transaction_ilocked(m, thread->proc,
5364 " bad transaction", t);
5365 t = NULL;
5366 }
5367 }
5368 list_for_each_entry(w, &thread->todo, entry) {
5369 print_binder_work_ilocked(m, thread->proc, " ",
5370 " pending transaction", w);
5371 }
5372 if (!print_always && m->count == header_pos)
5373 m->count = start_pos;
5374 }
5375
5376 static void print_binder_node_nilocked(struct seq_file *m,
5377 struct binder_node *node)
5378 {
5379 struct binder_ref *ref;
5380 struct binder_work *w;
5381 int count;
5382
5383 count = 0;
5384 hlist_for_each_entry(ref, &node->refs, node_entry)
5385 count++;
5386
5387 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5388 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5389 node->sched_policy, node->min_priority,
5390 node->has_strong_ref, node->has_weak_ref,
5391 node->local_strong_refs, node->local_weak_refs,
5392 node->internal_strong_refs, count, node->tmp_refs);
5393 if (count) {
5394 seq_puts(m, " proc");
5395 hlist_for_each_entry(ref, &node->refs, node_entry)
5396 seq_printf(m, " %d", ref->proc->pid);
5397 }
5398 seq_puts(m, "\n");
5399 if (node->proc) {
5400 list_for_each_entry(w, &node->async_todo, entry)
5401 print_binder_work_ilocked(m, node->proc, " ",
5402 " pending async transaction", w);
5403 }
5404 }
5405
5406 static void print_binder_ref_olocked(struct seq_file *m,
5407 struct binder_ref *ref)
5408 {
5409 binder_node_lock(ref->node);
5410 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5411 ref->data.debug_id, ref->data.desc,
5412 ref->node->proc ? "" : "dead ",
5413 ref->node->debug_id, ref->data.strong,
5414 ref->data.weak, ref->death);
5415 binder_node_unlock(ref->node);
5416 }
5417
5418 static void print_binder_proc(struct seq_file *m,
5419 struct binder_proc *proc, int print_all)
5420 {
5421 struct binder_work *w;
5422 struct rb_node *n;
5423 size_t start_pos = m->count;
5424 size_t header_pos;
5425 struct binder_node *last_node = NULL;
5426
5427 seq_printf(m, "proc %d\n", proc->pid);
5428 seq_printf(m, "context %s\n", proc->context->name);
5429 header_pos = m->count;
5430
5431 binder_inner_proc_lock(proc);
5432 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5433 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5434 rb_node), print_all);
5435
5436 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5437 struct binder_node *node = rb_entry(n, struct binder_node,
5438 rb_node);
5439 /*
5440 * take a temporary reference on the node so it
5441 * survives and isn't removed from the tree
5442 * while we print it.
5443 */
5444 binder_inc_node_tmpref_ilocked(node);
5445 /* Need to drop inner lock to take node lock */
5446 binder_inner_proc_unlock(proc);
5447 if (last_node)
5448 binder_put_node(last_node);
5449 binder_node_inner_lock(node);
5450 print_binder_node_nilocked(m, node);
5451 binder_node_inner_unlock(node);
5452 last_node = node;
5453 binder_inner_proc_lock(proc);
5454 }
5455 binder_inner_proc_unlock(proc);
5456 if (last_node)
5457 binder_put_node(last_node);
5458
5459 if (print_all) {
5460 binder_proc_lock(proc);
5461 for (n = rb_first(&proc->refs_by_desc);
5462 n != NULL;
5463 n = rb_next(n))
5464 print_binder_ref_olocked(m, rb_entry(n,
5465 struct binder_ref,
5466 rb_node_desc));
5467 binder_proc_unlock(proc);
5468 }
5469 binder_alloc_print_allocated(m, &proc->alloc);
5470 binder_inner_proc_lock(proc);
5471 list_for_each_entry(w, &proc->todo, entry)
5472 print_binder_work_ilocked(m, proc, " ",
5473 " pending transaction", w);
5474 list_for_each_entry(w, &proc->delivered_death, entry) {
5475 seq_puts(m, " has delivered dead binder\n");
5476 break;
5477 }
5478 binder_inner_proc_unlock(proc);
5479 if (!print_all && m->count == header_pos)
5480 m->count = start_pos;
5481 }
5482
5483 #ifdef CONFIG_SAMSUNG_FREECESS
5484 static void binder_in_transaction(struct binder_proc *proc)
5485 {
5486 struct rb_node *n = NULL;
5487 struct binder_thread *thread = NULL;
5488 int uid = -1;
5489 struct task_struct *tsk = NULL;
5490 struct binder_transaction *t = NULL;
5491 bool empty = true;
5492 bool found = false;
5493
5494 //check binder threads todo and transcation_stack list
5495 binder_inner_proc_lock(proc);
5496 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5497 thread = rb_entry(n, struct binder_thread, rb_node);
5498 empty = binder_worklist_empty_ilocked(&thread->todo);
5499 tsk = thread->task;
5500
5501 if (tsk != NULL) {
5502 //have some binders to do
5503 if (!empty) {
5504 //report uid to FW, only report one time
5505 uid = tsk->cred->euid.val;
5506 binder_inner_proc_unlock(proc);
5507 cfb_report(uid, "thread");
5508 return;
5509 }
5510
5511 //processing one binder call
5512 t = thread->transaction_stack;
5513 if (t) {
5514 spin_lock(&t->lock);
5515 if (t->to_thread == thread) {
5516 //check incoming, it has one
5517 found = true;
5518 uid = tsk->cred->euid.val;
5519 }
5520 spin_unlock(&t->lock);
5521 if (found == true){
5522 //report uid to FW, only report one time
5523 binder_inner_proc_unlock(proc);
5524 cfb_report(uid, "transaction_stack");
5525 return;
5526 }
5527 }
5528 }
5529 }
5530
5531 //check binder proc todo list
5532 empty = binder_worklist_empty_ilocked(&proc->todo);
5533 tsk = proc->tsk;
5534 if (tsk != NULL && !empty) {
5535 //report uid to FW
5536 uid = tsk->cred->euid.val;
5537 binder_inner_proc_unlock(proc);
5538 cfb_report(uid, "proc");
5539 }
5540 else
5541 binder_inner_proc_unlock(proc);
5542 }
5543
5544 void binders_in_transcation(int uid)
5545 {
5546 struct binder_proc *itr;
5547
5548 mutex_lock(&binder_procs_lock);
5549 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5550 if (itr != NULL && (itr->tsk->cred->euid.val == uid)) {
5551 binder_in_transaction(itr);
5552 }
5553 }
5554 mutex_unlock(&binder_procs_lock);
5555 }
5556 #endif
5557
5558 static const char * const binder_return_strings[] = {
5559 "BR_ERROR",
5560 "BR_OK",
5561 "BR_TRANSACTION",
5562 "BR_REPLY",
5563 "BR_ACQUIRE_RESULT",
5564 "BR_DEAD_REPLY",
5565 "BR_TRANSACTION_COMPLETE",
5566 "BR_INCREFS",
5567 "BR_ACQUIRE",
5568 "BR_RELEASE",
5569 "BR_DECREFS",
5570 "BR_ATTEMPT_ACQUIRE",
5571 "BR_NOOP",
5572 "BR_SPAWN_LOOPER",
5573 "BR_FINISHED",
5574 "BR_DEAD_BINDER",
5575 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5576 "BR_FAILED_REPLY"
5577 };
5578
5579 static const char * const binder_command_strings[] = {
5580 "BC_TRANSACTION",
5581 "BC_REPLY",
5582 "BC_ACQUIRE_RESULT",
5583 "BC_FREE_BUFFER",
5584 "BC_INCREFS",
5585 "BC_ACQUIRE",
5586 "BC_RELEASE",
5587 "BC_DECREFS",
5588 "BC_INCREFS_DONE",
5589 "BC_ACQUIRE_DONE",
5590 "BC_ATTEMPT_ACQUIRE",
5591 "BC_REGISTER_LOOPER",
5592 "BC_ENTER_LOOPER",
5593 "BC_EXIT_LOOPER",
5594 "BC_REQUEST_DEATH_NOTIFICATION",
5595 "BC_CLEAR_DEATH_NOTIFICATION",
5596 "BC_DEAD_BINDER_DONE",
5597 "BC_TRANSACTION_SG",
5598 "BC_REPLY_SG",
5599 };
5600
5601 static const char * const binder_objstat_strings[] = {
5602 "proc",
5603 "thread",
5604 "node",
5605 "ref",
5606 "death",
5607 "transaction",
5608 "transaction_complete"
5609 };
5610
5611 static void print_binder_stats(struct seq_file *m, const char *prefix,
5612 struct binder_stats *stats)
5613 {
5614 int i;
5615
5616 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5617 ARRAY_SIZE(binder_command_strings));
5618 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5619 int temp = atomic_read(&stats->bc[i]);
5620
5621 if (temp)
5622 seq_printf(m, "%s%s: %d\n", prefix,
5623 binder_command_strings[i], temp);
5624 }
5625
5626 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5627 ARRAY_SIZE(binder_return_strings));
5628 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5629 int temp = atomic_read(&stats->br[i]);
5630
5631 if (temp)
5632 seq_printf(m, "%s%s: %d\n", prefix,
5633 binder_return_strings[i], temp);
5634 }
5635
5636 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5637 ARRAY_SIZE(binder_objstat_strings));
5638 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5639 ARRAY_SIZE(stats->obj_deleted));
5640 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5641 int created = atomic_read(&stats->obj_created[i]);
5642 int deleted = atomic_read(&stats->obj_deleted[i]);
5643
5644 if (created || deleted)
5645 seq_printf(m, "%s%s: active %d total %d\n",
5646 prefix,
5647 binder_objstat_strings[i],
5648 created - deleted,
5649 created);
5650 }
5651 }
5652
5653 static void print_binder_proc_stats(struct seq_file *m,
5654 struct binder_proc *proc)
5655 {
5656 struct binder_work *w;
5657 struct binder_thread *thread;
5658 struct rb_node *n;
5659 int count, strong, weak, ready_threads;
5660 size_t free_async_space =
5661 binder_alloc_get_free_async_space(&proc->alloc);
5662
5663 seq_printf(m, "proc %d\n", proc->pid);
5664 seq_printf(m, "context %s\n", proc->context->name);
5665 count = 0;
5666 ready_threads = 0;
5667 binder_inner_proc_lock(proc);
5668 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5669 count++;
5670
5671 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5672 ready_threads++;
5673
5674 seq_printf(m, " threads: %d\n", count);
5675 seq_printf(m, " requested threads: %d+%d/%d\n"
5676 " ready threads %d\n"
5677 " free async space %zd\n", proc->requested_threads,
5678 proc->requested_threads_started, proc->max_threads,
5679 ready_threads,
5680 free_async_space);
5681 count = 0;
5682 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5683 count++;
5684 binder_inner_proc_unlock(proc);
5685 seq_printf(m, " nodes: %d\n", count);
5686 count = 0;
5687 strong = 0;
5688 weak = 0;
5689 binder_proc_lock(proc);
5690 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5691 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5692 rb_node_desc);
5693 count++;
5694 strong += ref->data.strong;
5695 weak += ref->data.weak;
5696 }
5697 binder_proc_unlock(proc);
5698 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5699
5700 count = binder_alloc_get_allocated_count(&proc->alloc);
5701 seq_printf(m, " buffers: %d\n", count);
5702
5703 binder_alloc_print_pages(m, &proc->alloc);
5704
5705 count = 0;
5706 binder_inner_proc_lock(proc);
5707 list_for_each_entry(w, &proc->todo, entry) {
5708 if (w->type == BINDER_WORK_TRANSACTION)
5709 count++;
5710 }
5711 binder_inner_proc_unlock(proc);
5712 seq_printf(m, " pending transactions: %d\n", count);
5713
5714 print_binder_stats(m, " ", &proc->stats);
5715 }
5716
5717
5718 static int binder_state_show(struct seq_file *m, void *unused)
5719 {
5720 struct binder_proc *proc;
5721 struct binder_node *node;
5722 struct binder_node *last_node = NULL;
5723
5724 seq_puts(m, "binder state:\n");
5725
5726 spin_lock(&binder_dead_nodes_lock);
5727 if (!hlist_empty(&binder_dead_nodes))
5728 seq_puts(m, "dead nodes:\n");
5729 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5730 /*
5731 * take a temporary reference on the node so it
5732 * survives and isn't removed from the list
5733 * while we print it.
5734 */
5735 node->tmp_refs++;
5736 spin_unlock(&binder_dead_nodes_lock);
5737 if (last_node)
5738 binder_put_node(last_node);
5739 binder_node_lock(node);
5740 print_binder_node_nilocked(m, node);
5741 binder_node_unlock(node);
5742 last_node = node;
5743 spin_lock(&binder_dead_nodes_lock);
5744 }
5745 spin_unlock(&binder_dead_nodes_lock);
5746 if (last_node)
5747 binder_put_node(last_node);
5748
5749 mutex_lock(&binder_procs_lock);
5750 hlist_for_each_entry(proc, &binder_procs, proc_node)
5751 print_binder_proc(m, proc, 1);
5752 mutex_unlock(&binder_procs_lock);
5753
5754 return 0;
5755 }
5756
5757 static int binder_stats_show(struct seq_file *m, void *unused)
5758 {
5759 struct binder_proc *proc;
5760
5761 seq_puts(m, "binder stats:\n");
5762
5763 print_binder_stats(m, "", &binder_stats);
5764
5765 mutex_lock(&binder_procs_lock);
5766 hlist_for_each_entry(proc, &binder_procs, proc_node)
5767 print_binder_proc_stats(m, proc);
5768 mutex_unlock(&binder_procs_lock);
5769
5770 return 0;
5771 }
5772
5773 static int binder_transactions_show(struct seq_file *m, void *unused)
5774 {
5775 struct binder_proc *proc;
5776
5777 seq_puts(m, "binder transactions:\n");
5778 mutex_lock(&binder_procs_lock);
5779 hlist_for_each_entry(proc, &binder_procs, proc_node)
5780 print_binder_proc(m, proc, 0);
5781 mutex_unlock(&binder_procs_lock);
5782
5783 return 0;
5784 }
5785
5786 static int binder_proc_show(struct seq_file *m, void *unused)
5787 {
5788 struct binder_proc *itr;
5789 int pid = (unsigned long)m->private;
5790
5791 mutex_lock(&binder_procs_lock);
5792 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5793 if (itr->pid == pid) {
5794 seq_puts(m, "binder proc state:\n");
5795 print_binder_proc(m, itr, 1);
5796 }
5797 }
5798 mutex_unlock(&binder_procs_lock);
5799
5800 return 0;
5801 }
5802
5803 static void print_binder_transaction_log_entry(struct seq_file *m,
5804 struct binder_transaction_log_entry *e)
5805 {
5806 int debug_id = READ_ONCE(e->debug_id_done);
5807 /*
5808 * read barrier to guarantee debug_id_done read before
5809 * we print the log values
5810 */
5811 smp_rmb();
5812 seq_printf(m,
5813 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5814 e->debug_id, (e->call_type == 2) ? "reply" :
5815 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5816 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5817 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5818 e->return_error, e->return_error_param,
5819 e->return_error_line);
5820 /*
5821 * read-barrier to guarantee read of debug_id_done after
5822 * done printing the fields of the entry
5823 */
5824 smp_rmb();
5825 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5826 "\n" : " (incomplete)\n");
5827 }
5828
5829 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5830 {
5831 struct binder_transaction_log *log = m->private;
5832 unsigned int log_cur = atomic_read(&log->cur);
5833 unsigned int count;
5834 unsigned int cur;
5835 int i;
5836
5837 count = log_cur + 1;
5838 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5839 0 : count % ARRAY_SIZE(log->entry);
5840 if (count > ARRAY_SIZE(log->entry) || log->full)
5841 count = ARRAY_SIZE(log->entry);
5842 for (i = 0; i < count; i++) {
5843 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5844
5845 print_binder_transaction_log_entry(m, &log->entry[index]);
5846 }
5847 return 0;
5848 }
5849
5850 static const struct file_operations binder_fops = {
5851 .owner = THIS_MODULE,
5852 .poll = binder_poll,
5853 .unlocked_ioctl = binder_ioctl,
5854 .compat_ioctl = binder_ioctl,
5855 .mmap = binder_mmap,
5856 .open = binder_open,
5857 .flush = binder_flush,
5858 .release = binder_release,
5859 };
5860
5861 BINDER_DEBUG_ENTRY(state);
5862 BINDER_DEBUG_ENTRY(stats);
5863 BINDER_DEBUG_ENTRY(transactions);
5864 BINDER_DEBUG_ENTRY(transaction_log);
5865
5866 static int __init init_binder_device(const char *name)
5867 {
5868 int ret;
5869 struct binder_device *binder_device;
5870
5871 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5872 if (!binder_device)
5873 return -ENOMEM;
5874
5875 binder_device->miscdev.fops = &binder_fops;
5876 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5877 binder_device->miscdev.name = name;
5878
5879 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5880 binder_device->context.name = name;
5881 mutex_init(&binder_device->context.context_mgr_node_lock);
5882
5883 ret = misc_register(&binder_device->miscdev);
5884 if (ret < 0) {
5885 kfree(binder_device);
5886 return ret;
5887 }
5888
5889 hlist_add_head(&binder_device->hlist, &binder_devices);
5890
5891 return ret;
5892 }
5893
5894 static int __init binder_init(void)
5895 {
5896 int ret;
5897 char *device_name, *device_names;
5898 struct binder_device *device;
5899 struct hlist_node *tmp;
5900
5901 binder_alloc_shrinker_init();
5902
5903 atomic_set(&binder_transaction_log.cur, ~0U);
5904 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5905
5906 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5907 if (binder_debugfs_dir_entry_root)
5908 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5909 binder_debugfs_dir_entry_root);
5910
5911 if (binder_debugfs_dir_entry_root) {
5912 debugfs_create_file("state",
5913 S_IRUGO,
5914 binder_debugfs_dir_entry_root,
5915 NULL,
5916 &binder_state_fops);
5917 debugfs_create_file("stats",
5918 S_IRUGO,
5919 binder_debugfs_dir_entry_root,
5920 NULL,
5921 &binder_stats_fops);
5922 debugfs_create_file("transactions",
5923 S_IRUGO,
5924 binder_debugfs_dir_entry_root,
5925 NULL,
5926 &binder_transactions_fops);
5927 debugfs_create_file("transaction_log",
5928 S_IRUGO,
5929 binder_debugfs_dir_entry_root,
5930 &binder_transaction_log,
5931 &binder_transaction_log_fops);
5932 debugfs_create_file("failed_transaction_log",
5933 S_IRUGO,
5934 binder_debugfs_dir_entry_root,
5935 &binder_transaction_log_failed,
5936 &binder_transaction_log_fops);
5937 }
5938
5939 /*
5940 * Copy the module_parameter string, because we don't want to
5941 * tokenize it in-place.
5942 */
5943 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5944 if (!device_names) {
5945 ret = -ENOMEM;
5946 goto err_alloc_device_names_failed;
5947 }
5948 strcpy(device_names, binder_devices_param);
5949
5950 while ((device_name = strsep(&device_names, ","))) {
5951 ret = init_binder_device(device_name);
5952 if (ret)
5953 goto err_init_binder_device_failed;
5954 }
5955
5956 return ret;
5957
5958 err_init_binder_device_failed:
5959 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5960 misc_deregister(&device->miscdev);
5961 hlist_del(&device->hlist);
5962 kfree(device);
5963 }
5964 err_alloc_device_names_failed:
5965 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5966
5967 return ret;
5968 }
5969
5970 device_initcall(binder_init);
5971
5972 #define CREATE_TRACE_POINTS
5973 #include "binder_trace.h"
5974
5975 MODULE_LICENSE("GPL v2");