binder: refactor binder ref inc/dec for thread safety
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
56b468fc
AS
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
355b0502
GKH
20#include <asm/cacheflush.h>
21#include <linux/fdtable.h>
22#include <linux/file.h>
e2610b26 23#include <linux/freezer.h>
355b0502
GKH
24#include <linux/fs.h>
25#include <linux/list.h>
26#include <linux/miscdevice.h>
355b0502
GKH
27#include <linux/module.h>
28#include <linux/mutex.h>
29#include <linux/nsproxy.h>
30#include <linux/poll.h>
16b66554 31#include <linux/debugfs.h>
355b0502 32#include <linux/rbtree.h>
3f07c014 33#include <linux/sched/signal.h>
6e84f315 34#include <linux/sched/mm.h>
5249f488 35#include <linux/seq_file.h>
355b0502 36#include <linux/uaccess.h>
17cf22c3 37#include <linux/pid_namespace.h>
79af7307 38#include <linux/security.h>
355b0502 39
9246a4a9
GKH
40#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
41#define BINDER_IPC_32BIT 1
42#endif
43
44#include <uapi/linux/android/binder.h>
0c972a05 45#include "binder_alloc.h"
975a1ac9 46#include "binder_trace.h"
355b0502 47
975a1ac9 48static DEFINE_MUTEX(binder_main_lock);
c44b1231
TK
49
50static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
51static DEFINE_MUTEX(binder_deferred_lock);
52
ac4812c5 53static HLIST_HEAD(binder_devices);
355b0502 54static HLIST_HEAD(binder_procs);
c44b1231
TK
55static DEFINE_MUTEX(binder_procs_lock);
56
355b0502 57static HLIST_HEAD(binder_dead_nodes);
c44b1231 58static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 59
16b66554
AH
60static struct dentry *binder_debugfs_dir_entry_root;
61static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 62static atomic_t binder_last_id;
355b0502 63
5249f488
AH
64#define BINDER_DEBUG_ENTRY(name) \
65static int binder_##name##_open(struct inode *inode, struct file *file) \
66{ \
16b66554 67 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
68} \
69\
70static const struct file_operations binder_##name##_fops = { \
71 .owner = THIS_MODULE, \
72 .open = binder_##name##_open, \
73 .read = seq_read, \
74 .llseek = seq_lseek, \
75 .release = single_release, \
76}
77
78static int binder_proc_show(struct seq_file *m, void *unused);
79BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
80
81/* This is only defined in include/asm-arm/sizes.h */
82#ifndef SZ_1K
83#define SZ_1K 0x400
84#endif
85
86#ifndef SZ_4M
87#define SZ_4M 0x400000
88#endif
89
90#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
91
92#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
93
94enum {
95 BINDER_DEBUG_USER_ERROR = 1U << 0,
96 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
97 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
98 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
99 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
100 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
101 BINDER_DEBUG_READ_WRITE = 1U << 6,
102 BINDER_DEBUG_USER_REFS = 1U << 7,
103 BINDER_DEBUG_THREADS = 1U << 8,
104 BINDER_DEBUG_TRANSACTION = 1U << 9,
105 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
106 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
107 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 108 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
355b0502
GKH
109};
110static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
111 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
112module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
113
ac4812c5
MC
114static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
115module_param_named(devices, binder_devices_param, charp, 0444);
116
355b0502
GKH
117static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
118static int binder_stop_on_user_error;
119
120static int binder_set_stop_on_user_error(const char *val,
121 struct kernel_param *kp)
122{
123 int ret;
10f62861 124
355b0502
GKH
125 ret = param_set_int(val, kp);
126 if (binder_stop_on_user_error < 2)
127 wake_up(&binder_user_error_wait);
128 return ret;
129}
130module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
131 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
132
133#define binder_debug(mask, x...) \
134 do { \
135 if (binder_debug_mask & mask) \
258767fe 136 pr_info(x); \
355b0502
GKH
137 } while (0)
138
139#define binder_user_error(x...) \
140 do { \
141 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 142 pr_info(x); \
355b0502
GKH
143 if (binder_stop_on_user_error) \
144 binder_stop_on_user_error = 2; \
145 } while (0)
146
feba3900
MC
147#define to_flat_binder_object(hdr) \
148 container_of(hdr, struct flat_binder_object, hdr)
149
150#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
151
7980240b
MC
152#define to_binder_buffer_object(hdr) \
153 container_of(hdr, struct binder_buffer_object, hdr)
154
def95c73
MC
155#define to_binder_fd_array_object(hdr) \
156 container_of(hdr, struct binder_fd_array_object, hdr)
157
355b0502
GKH
158enum binder_stat_types {
159 BINDER_STAT_PROC,
160 BINDER_STAT_THREAD,
161 BINDER_STAT_NODE,
162 BINDER_STAT_REF,
163 BINDER_STAT_DEATH,
164 BINDER_STAT_TRANSACTION,
165 BINDER_STAT_TRANSACTION_COMPLETE,
166 BINDER_STAT_COUNT
167};
168
169struct binder_stats {
0953c797
BJS
170 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
171 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
172 atomic_t obj_created[BINDER_STAT_COUNT];
173 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
174};
175
176static struct binder_stats binder_stats;
177
178static inline void binder_stats_deleted(enum binder_stat_types type)
179{
0953c797 180 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
181}
182
183static inline void binder_stats_created(enum binder_stat_types type)
184{
0953c797 185 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
186}
187
188struct binder_transaction_log_entry {
189 int debug_id;
d99c7333 190 int debug_id_done;
355b0502
GKH
191 int call_type;
192 int from_proc;
193 int from_thread;
194 int target_handle;
195 int to_proc;
196 int to_thread;
197 int to_node;
198 int data_size;
199 int offsets_size;
57ada2fb
TK
200 int return_error_line;
201 uint32_t return_error;
202 uint32_t return_error_param;
14db3181 203 const char *context_name;
355b0502
GKH
204};
205struct binder_transaction_log {
d99c7333
TK
206 atomic_t cur;
207 bool full;
355b0502
GKH
208 struct binder_transaction_log_entry entry[32];
209};
210static struct binder_transaction_log binder_transaction_log;
211static struct binder_transaction_log binder_transaction_log_failed;
212
213static struct binder_transaction_log_entry *binder_transaction_log_add(
214 struct binder_transaction_log *log)
215{
216 struct binder_transaction_log_entry *e;
d99c7333 217 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 218
d99c7333 219 if (cur >= ARRAY_SIZE(log->entry))
355b0502 220 log->full = 1;
d99c7333
TK
221 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
222 WRITE_ONCE(e->debug_id_done, 0);
223 /*
224 * write-barrier to synchronize access to e->debug_id_done.
225 * We make sure the initialized 0 value is seen before
226 * memset() other fields are zeroed by memset.
227 */
228 smp_wmb();
229 memset(e, 0, sizeof(*e));
355b0502
GKH
230 return e;
231}
232
342e5c90
MC
233struct binder_context {
234 struct binder_node *binder_context_mgr_node;
c44b1231
TK
235 struct mutex context_mgr_node_lock;
236
342e5c90 237 kuid_t binder_context_mgr_uid;
14db3181 238 const char *name;
342e5c90
MC
239};
240
ac4812c5
MC
241struct binder_device {
242 struct hlist_node hlist;
243 struct miscdevice miscdev;
244 struct binder_context context;
342e5c90
MC
245};
246
355b0502
GKH
247struct binder_work {
248 struct list_head entry;
249 enum {
250 BINDER_WORK_TRANSACTION = 1,
251 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 252 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
253 BINDER_WORK_NODE,
254 BINDER_WORK_DEAD_BINDER,
255 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
256 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
257 } type;
258};
259
26549d17
TK
260struct binder_error {
261 struct binder_work work;
262 uint32_t cmd;
263};
264
355b0502
GKH
265struct binder_node {
266 int debug_id;
267 struct binder_work work;
268 union {
269 struct rb_node rb_node;
270 struct hlist_node dead_node;
271 };
272 struct binder_proc *proc;
273 struct hlist_head refs;
274 int internal_strong_refs;
275 int local_weak_refs;
276 int local_strong_refs;
da49889d
AH
277 binder_uintptr_t ptr;
278 binder_uintptr_t cookie;
355b0502
GKH
279 unsigned has_strong_ref:1;
280 unsigned pending_strong_ref:1;
281 unsigned has_weak_ref:1;
282 unsigned pending_weak_ref:1;
283 unsigned has_async_transaction:1;
284 unsigned accept_fds:1;
285 unsigned min_priority:8;
286 struct list_head async_todo;
287};
288
289struct binder_ref_death {
290 struct binder_work work;
da49889d 291 binder_uintptr_t cookie;
355b0502
GKH
292};
293
372e3147
TK
294/**
295 * struct binder_ref_data - binder_ref counts and id
296 * @debug_id: unique ID for the ref
297 * @desc: unique userspace handle for ref
298 * @strong: strong ref count (debugging only if not locked)
299 * @weak: weak ref count (debugging only if not locked)
300 *
301 * Structure to hold ref count and ref id information. Since
302 * the actual ref can only be accessed with a lock, this structure
303 * is used to return information about the ref to callers of
304 * ref inc/dec functions.
305 */
306struct binder_ref_data {
307 int debug_id;
308 uint32_t desc;
309 int strong;
310 int weak;
311};
312
313/**
314 * struct binder_ref - struct to track references on nodes
315 * @data: binder_ref_data containing id, handle, and current refcounts
316 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
317 * @rb_node_node: node for lookup by @node in proc's rb_tree
318 * @node_entry: list entry for node->refs list in target node
319 * @proc: binder_proc containing ref
320 * @node: binder_node of target node. When cleaning up a
321 * ref for deletion in binder_cleanup_ref, a non-NULL
322 * @node indicates the node must be freed
323 * @death: pointer to death notification (ref_death) if requested
324 *
325 * Structure to track references from procA to target node (on procB). This
326 * structure is unsafe to access without holding @proc->outer_lock.
327 */
355b0502
GKH
328struct binder_ref {
329 /* Lookups needed: */
330 /* node + proc => ref (transaction) */
331 /* desc + proc => ref (transaction, inc/dec ref) */
332 /* node => refs + procs (proc exit) */
372e3147 333 struct binder_ref_data data;
355b0502
GKH
334 struct rb_node rb_node_desc;
335 struct rb_node rb_node_node;
336 struct hlist_node node_entry;
337 struct binder_proc *proc;
338 struct binder_node *node;
355b0502
GKH
339 struct binder_ref_death *death;
340};
341
355b0502
GKH
342enum binder_deferred_state {
343 BINDER_DEFERRED_PUT_FILES = 0x01,
344 BINDER_DEFERRED_FLUSH = 0x02,
345 BINDER_DEFERRED_RELEASE = 0x04,
346};
347
348struct binder_proc {
349 struct hlist_node proc_node;
350 struct rb_root threads;
351 struct rb_root nodes;
352 struct rb_root refs_by_desc;
353 struct rb_root refs_by_node;
354 int pid;
355b0502
GKH
355 struct task_struct *tsk;
356 struct files_struct *files;
357 struct hlist_node deferred_work_node;
358 int deferred_work;
7a4408c6 359 bool is_dead;
355b0502 360
355b0502
GKH
361 struct list_head todo;
362 wait_queue_head_t wait;
363 struct binder_stats stats;
364 struct list_head delivered_death;
365 int max_threads;
366 int requested_threads;
367 int requested_threads_started;
368 int ready_threads;
7a4408c6 369 int tmp_ref;
355b0502 370 long default_priority;
16b66554 371 struct dentry *debugfs_entry;
fdfb4a99 372 struct binder_alloc alloc;
342e5c90 373 struct binder_context *context;
355b0502
GKH
374};
375
376enum {
377 BINDER_LOOPER_STATE_REGISTERED = 0x01,
378 BINDER_LOOPER_STATE_ENTERED = 0x02,
379 BINDER_LOOPER_STATE_EXITED = 0x04,
380 BINDER_LOOPER_STATE_INVALID = 0x08,
381 BINDER_LOOPER_STATE_WAITING = 0x10,
355b0502
GKH
382};
383
384struct binder_thread {
385 struct binder_proc *proc;
386 struct rb_node rb_node;
387 int pid;
08dabcee
TK
388 int looper; /* only modified by this thread */
389 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
390 struct binder_transaction *transaction_stack;
391 struct list_head todo;
26549d17
TK
392 struct binder_error return_error;
393 struct binder_error reply_error;
355b0502
GKH
394 wait_queue_head_t wait;
395 struct binder_stats stats;
7a4408c6
TK
396 atomic_t tmp_ref;
397 bool is_dead;
355b0502
GKH
398};
399
400struct binder_transaction {
401 int debug_id;
402 struct binder_work work;
403 struct binder_thread *from;
404 struct binder_transaction *from_parent;
405 struct binder_proc *to_proc;
406 struct binder_thread *to_thread;
407 struct binder_transaction *to_parent;
408 unsigned need_reply:1;
409 /* unsigned is_dead:1; */ /* not used at the moment */
410
411 struct binder_buffer *buffer;
412 unsigned int code;
413 unsigned int flags;
414 long priority;
415 long saved_priority;
4a2ebb93 416 kuid_t sender_euid;
7a4408c6
TK
417 /**
418 * @lock: protects @from, @to_proc, and @to_thread
419 *
420 * @from, @to_proc, and @to_thread can be set to NULL
421 * during thread teardown
422 */
423 spinlock_t lock;
355b0502
GKH
424};
425
426static void
427binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
7a4408c6
TK
428static void binder_free_thread(struct binder_thread *thread);
429static void binder_free_proc(struct binder_proc *proc);
355b0502 430
efde99cd 431static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
432{
433 struct files_struct *files = proc->files;
355b0502
GKH
434 unsigned long rlim_cur;
435 unsigned long irqs;
436
437 if (files == NULL)
438 return -ESRCH;
439
dcfadfa4
AV
440 if (!lock_task_sighand(proc->tsk, &irqs))
441 return -EMFILE;
bf202361 442
dcfadfa4
AV
443 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
444 unlock_task_sighand(proc->tsk, &irqs);
355b0502 445
dcfadfa4 446 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
447}
448
449/*
450 * copied from fd_install
451 */
452static void task_fd_install(
453 struct binder_proc *proc, unsigned int fd, struct file *file)
454{
f869e8a7
AV
455 if (proc->files)
456 __fd_install(proc->files, fd, file);
355b0502
GKH
457}
458
459/*
460 * copied from sys_close
461 */
462static long task_close_fd(struct binder_proc *proc, unsigned int fd)
463{
355b0502
GKH
464 int retval;
465
483ce1d4 466 if (proc->files == NULL)
355b0502
GKH
467 return -ESRCH;
468
483ce1d4 469 retval = __close_fd(proc->files, fd);
355b0502
GKH
470 /* can't restart close syscall because file table entry was cleared */
471 if (unlikely(retval == -ERESTARTSYS ||
472 retval == -ERESTARTNOINTR ||
473 retval == -ERESTARTNOHAND ||
474 retval == -ERESTART_RESTARTBLOCK))
475 retval = -EINTR;
476
477 return retval;
355b0502
GKH
478}
479
975a1ac9
AH
480static inline void binder_lock(const char *tag)
481{
482 trace_binder_lock(tag);
483 mutex_lock(&binder_main_lock);
484 trace_binder_locked(tag);
485}
486
487static inline void binder_unlock(const char *tag)
488{
489 trace_binder_unlock(tag);
490 mutex_unlock(&binder_main_lock);
491}
492
355b0502
GKH
493static void binder_set_nice(long nice)
494{
495 long min_nice;
10f62861 496
355b0502
GKH
497 if (can_nice(current, nice)) {
498 set_user_nice(current, nice);
499 return;
500 }
7aa2c016 501 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 502 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
503 "%d: nice value %ld not allowed use %ld instead\n",
504 current->pid, nice, min_nice);
355b0502 505 set_user_nice(current, min_nice);
8698a745 506 if (min_nice <= MAX_NICE)
355b0502 507 return;
56b468fc 508 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
509}
510
355b0502 511static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 512 binder_uintptr_t ptr)
355b0502
GKH
513{
514 struct rb_node *n = proc->nodes.rb_node;
515 struct binder_node *node;
516
517 while (n) {
518 node = rb_entry(n, struct binder_node, rb_node);
519
520 if (ptr < node->ptr)
521 n = n->rb_left;
522 else if (ptr > node->ptr)
523 n = n->rb_right;
524 else
525 return node;
526 }
527 return NULL;
528}
529
530static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
531 binder_uintptr_t ptr,
532 binder_uintptr_t cookie)
355b0502
GKH
533{
534 struct rb_node **p = &proc->nodes.rb_node;
535 struct rb_node *parent = NULL;
536 struct binder_node *node;
537
538 while (*p) {
539 parent = *p;
540 node = rb_entry(parent, struct binder_node, rb_node);
541
542 if (ptr < node->ptr)
543 p = &(*p)->rb_left;
544 else if (ptr > node->ptr)
545 p = &(*p)->rb_right;
546 else
547 return NULL;
548 }
549
550 node = kzalloc(sizeof(*node), GFP_KERNEL);
551 if (node == NULL)
552 return NULL;
553 binder_stats_created(BINDER_STAT_NODE);
554 rb_link_node(&node->rb_node, parent, p);
555 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 556 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
557 node->proc = proc;
558 node->ptr = ptr;
559 node->cookie = cookie;
560 node->work.type = BINDER_WORK_NODE;
561 INIT_LIST_HEAD(&node->work.entry);
562 INIT_LIST_HEAD(&node->async_todo);
563 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 564 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 565 proc->pid, current->pid, node->debug_id,
da49889d 566 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
567 return node;
568}
569
570static int binder_inc_node(struct binder_node *node, int strong, int internal,
571 struct list_head *target_list)
572{
573 if (strong) {
574 if (internal) {
575 if (target_list == NULL &&
576 node->internal_strong_refs == 0 &&
342e5c90
MC
577 !(node->proc &&
578 node == node->proc->context->binder_context_mgr_node &&
579 node->has_strong_ref)) {
56b468fc
AS
580 pr_err("invalid inc strong node for %d\n",
581 node->debug_id);
355b0502
GKH
582 return -EINVAL;
583 }
584 node->internal_strong_refs++;
585 } else
586 node->local_strong_refs++;
587 if (!node->has_strong_ref && target_list) {
588 list_del_init(&node->work.entry);
589 list_add_tail(&node->work.entry, target_list);
590 }
591 } else {
592 if (!internal)
593 node->local_weak_refs++;
594 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
595 if (target_list == NULL) {
56b468fc
AS
596 pr_err("invalid inc weak node for %d\n",
597 node->debug_id);
355b0502
GKH
598 return -EINVAL;
599 }
600 list_add_tail(&node->work.entry, target_list);
601 }
602 }
603 return 0;
604}
605
606static int binder_dec_node(struct binder_node *node, int strong, int internal)
607{
608 if (strong) {
609 if (internal)
610 node->internal_strong_refs--;
611 else
612 node->local_strong_refs--;
613 if (node->local_strong_refs || node->internal_strong_refs)
614 return 0;
615 } else {
616 if (!internal)
617 node->local_weak_refs--;
618 if (node->local_weak_refs || !hlist_empty(&node->refs))
619 return 0;
620 }
621 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
622 if (list_empty(&node->work.entry)) {
623 list_add_tail(&node->work.entry, &node->proc->todo);
624 wake_up_interruptible(&node->proc->wait);
625 }
626 } else {
627 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
628 !node->local_weak_refs) {
629 list_del_init(&node->work.entry);
630 if (node->proc) {
631 rb_erase(&node->rb_node, &node->proc->nodes);
632 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 633 "refless node %d deleted\n",
355b0502
GKH
634 node->debug_id);
635 } else {
c44b1231 636 spin_lock(&binder_dead_nodes_lock);
355b0502 637 hlist_del(&node->dead_node);
c44b1231 638 spin_unlock(&binder_dead_nodes_lock);
355b0502 639 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 640 "dead node %d deleted\n",
355b0502
GKH
641 node->debug_id);
642 }
643 kfree(node);
644 binder_stats_deleted(BINDER_STAT_NODE);
645 }
646 }
647
648 return 0;
649}
650
651
652static struct binder_ref *binder_get_ref(struct binder_proc *proc,
0a3ffab9 653 u32 desc, bool need_strong_ref)
355b0502
GKH
654{
655 struct rb_node *n = proc->refs_by_desc.rb_node;
656 struct binder_ref *ref;
657
658 while (n) {
659 ref = rb_entry(n, struct binder_ref, rb_node_desc);
660
372e3147 661 if (desc < ref->data.desc) {
355b0502 662 n = n->rb_left;
372e3147 663 } else if (desc > ref->data.desc) {
355b0502 664 n = n->rb_right;
372e3147 665 } else if (need_strong_ref && !ref->data.strong) {
0a3ffab9
AH
666 binder_user_error("tried to use weak ref as strong ref\n");
667 return NULL;
668 } else {
355b0502 669 return ref;
0a3ffab9 670 }
355b0502
GKH
671 }
672 return NULL;
673}
674
372e3147
TK
675/**
676 * binder_get_ref_for_node() - get the ref associated with given node
677 * @proc: binder_proc that owns the ref
678 * @node: binder_node of target
679 * @new_ref: newly allocated binder_ref to be initialized or %NULL
680 *
681 * Look up the ref for the given node and return it if it exists
682 *
683 * If it doesn't exist and the caller provides a newly allocated
684 * ref, initialize the fields of the newly allocated ref and insert
685 * into the given proc rb_trees and node refs list.
686 *
687 * Return: the ref for node. It is possible that another thread
688 * allocated/initialized the ref first in which case the
689 * returned ref would be different than the passed-in
690 * new_ref. new_ref must be kfree'd by the caller in
691 * this case.
692 */
355b0502 693static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
372e3147
TK
694 struct binder_node *node,
695 struct binder_ref *new_ref)
355b0502 696{
372e3147 697 struct binder_context *context = proc->context;
355b0502
GKH
698 struct rb_node **p = &proc->refs_by_node.rb_node;
699 struct rb_node *parent = NULL;
372e3147
TK
700 struct binder_ref *ref;
701 struct rb_node *n;
355b0502
GKH
702
703 while (*p) {
704 parent = *p;
705 ref = rb_entry(parent, struct binder_ref, rb_node_node);
706
707 if (node < ref->node)
708 p = &(*p)->rb_left;
709 else if (node > ref->node)
710 p = &(*p)->rb_right;
711 else
712 return ref;
713 }
372e3147 714 if (!new_ref)
355b0502 715 return NULL;
372e3147 716
355b0502 717 binder_stats_created(BINDER_STAT_REF);
372e3147 718 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
719 new_ref->proc = proc;
720 new_ref->node = node;
721 rb_link_node(&new_ref->rb_node_node, parent, p);
722 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
723
372e3147 724 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
725 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
726 ref = rb_entry(n, struct binder_ref, rb_node_desc);
372e3147 727 if (ref->data.desc > new_ref->data.desc)
355b0502 728 break;
372e3147 729 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
730 }
731
732 p = &proc->refs_by_desc.rb_node;
733 while (*p) {
734 parent = *p;
735 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
736
372e3147 737 if (new_ref->data.desc < ref->data.desc)
355b0502 738 p = &(*p)->rb_left;
372e3147 739 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
740 p = &(*p)->rb_right;
741 else
742 BUG();
743 }
744 rb_link_node(&new_ref->rb_node_desc, parent, p);
745 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
e4cffcf4 746 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 747
e4cffcf4
TK
748 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
749 "%d new ref %d desc %d for node %d\n",
372e3147 750 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
e4cffcf4 751 node->debug_id);
355b0502
GKH
752 return new_ref;
753}
754
372e3147 755static void binder_cleanup_ref(struct binder_ref *ref)
355b0502
GKH
756{
757 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 758 "%d delete ref %d desc %d for node %d\n",
372e3147 759 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 760 ref->node->debug_id);
355b0502
GKH
761
762 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
763 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
372e3147
TK
764
765 if (ref->data.strong)
355b0502 766 binder_dec_node(ref->node, 1, 1);
372e3147 767
355b0502
GKH
768 hlist_del(&ref->node_entry);
769 binder_dec_node(ref->node, 0, 1);
372e3147 770
355b0502
GKH
771 if (ref->death) {
772 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 773 "%d delete ref %d desc %d has death notification\n",
372e3147
TK
774 ref->proc->pid, ref->data.debug_id,
775 ref->data.desc);
355b0502 776 list_del(&ref->death->work.entry);
355b0502
GKH
777 binder_stats_deleted(BINDER_STAT_DEATH);
778 }
355b0502
GKH
779 binder_stats_deleted(BINDER_STAT_REF);
780}
781
372e3147
TK
782/**
783 * binder_inc_ref() - increment the ref for given handle
784 * @ref: ref to be incremented
785 * @strong: if true, strong increment, else weak
786 * @target_list: list to queue node work on
787 *
788 * Increment the ref.
789 *
790 * Return: 0, if successful, else errno
791 */
355b0502
GKH
792static int binder_inc_ref(struct binder_ref *ref, int strong,
793 struct list_head *target_list)
794{
795 int ret;
10f62861 796
355b0502 797 if (strong) {
372e3147 798 if (ref->data.strong == 0) {
355b0502
GKH
799 ret = binder_inc_node(ref->node, 1, 1, target_list);
800 if (ret)
801 return ret;
802 }
372e3147 803 ref->data.strong++;
355b0502 804 } else {
372e3147 805 if (ref->data.weak == 0) {
355b0502
GKH
806 ret = binder_inc_node(ref->node, 0, 1, target_list);
807 if (ret)
808 return ret;
809 }
372e3147 810 ref->data.weak++;
355b0502
GKH
811 }
812 return 0;
813}
814
372e3147
TK
815/**
816 * binder_dec_ref() - dec the ref for given handle
817 * @ref: ref to be decremented
818 * @strong: if true, strong decrement, else weak
819 *
820 * Decrement the ref.
821 *
822 * TODO: kfree is avoided here since an upcoming patch
823 * will put this under a lock.
824 *
825 * Return: true if ref is cleaned up and ready to be freed
826 */
827static bool binder_dec_ref(struct binder_ref *ref, int strong)
355b0502
GKH
828{
829 if (strong) {
372e3147 830 if (ref->data.strong == 0) {
56b468fc 831 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
372e3147
TK
832 ref->proc->pid, ref->data.debug_id,
833 ref->data.desc, ref->data.strong,
834 ref->data.weak);
835 return false;
355b0502 836 }
372e3147
TK
837 ref->data.strong--;
838 if (ref->data.strong == 0) {
355b0502 839 int ret;
10f62861 840
355b0502
GKH
841 ret = binder_dec_node(ref->node, strong, 1);
842 if (ret)
372e3147 843 return false;
355b0502
GKH
844 }
845 } else {
372e3147 846 if (ref->data.weak == 0) {
56b468fc 847 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
372e3147
TK
848 ref->proc->pid, ref->data.debug_id,
849 ref->data.desc, ref->data.strong,
850 ref->data.weak);
851 return false;
355b0502 852 }
372e3147 853 ref->data.weak--;
355b0502 854 }
372e3147
TK
855 if (ref->data.strong == 0 && ref->data.weak == 0) {
856 binder_cleanup_ref(ref);
857 /*
858 * TODO: we could kfree(ref) here, but an upcoming
859 * patch will call this with a lock held, so we
860 * return an indication that the ref should be
861 * freed.
862 */
863 return true;
864 }
865 return false;
866}
867
868/**
869 * binder_get_node_from_ref() - get the node from the given proc/desc
870 * @proc: proc containing the ref
871 * @desc: the handle associated with the ref
872 * @need_strong_ref: if true, only return node if ref is strong
873 * @rdata: the id/refcount data for the ref
874 *
875 * Given a proc and ref handle, return the associated binder_node
876 *
877 * Return: a binder_node or NULL if not found or not strong when strong required
878 */
879static struct binder_node *binder_get_node_from_ref(
880 struct binder_proc *proc,
881 u32 desc, bool need_strong_ref,
882 struct binder_ref_data *rdata)
883{
884 struct binder_node *node;
885 struct binder_ref *ref;
886
887 ref = binder_get_ref(proc, desc, need_strong_ref);
888 if (!ref)
889 goto err_no_ref;
890 node = ref->node;
891 if (rdata)
892 *rdata = ref->data;
893
894 return node;
895
896err_no_ref:
897 return NULL;
898}
899
900/**
901 * binder_free_ref() - free the binder_ref
902 * @ref: ref to free
903 *
904 * Free the binder_ref and the binder_ref_death indicated by ref->death.
905 */
906static void binder_free_ref(struct binder_ref *ref)
907{
908 kfree(ref->death);
909 kfree(ref);
910}
911
912/**
913 * binder_update_ref_for_handle() - inc/dec the ref for given handle
914 * @proc: proc containing the ref
915 * @desc: the handle associated with the ref
916 * @increment: true=inc reference, false=dec reference
917 * @strong: true=strong reference, false=weak reference
918 * @rdata: the id/refcount data for the ref
919 *
920 * Given a proc and ref handle, increment or decrement the ref
921 * according to "increment" arg.
922 *
923 * Return: 0 if successful, else errno
924 */
925static int binder_update_ref_for_handle(struct binder_proc *proc,
926 uint32_t desc, bool increment, bool strong,
927 struct binder_ref_data *rdata)
928{
929 int ret = 0;
930 struct binder_ref *ref;
931 bool delete_ref = false;
932
933 ref = binder_get_ref(proc, desc, strong);
934 if (!ref) {
935 ret = -EINVAL;
936 goto err_no_ref;
937 }
938 if (increment)
939 ret = binder_inc_ref(ref, strong, NULL);
940 else
941 delete_ref = binder_dec_ref(ref, strong);
942
943 if (rdata)
944 *rdata = ref->data;
945
946 if (delete_ref)
947 binder_free_ref(ref);
948 return ret;
949
950err_no_ref:
951 return ret;
952}
953
954/**
955 * binder_dec_ref_for_handle() - dec the ref for given handle
956 * @proc: proc containing the ref
957 * @desc: the handle associated with the ref
958 * @strong: true=strong reference, false=weak reference
959 * @rdata: the id/refcount data for the ref
960 *
961 * Just calls binder_update_ref_for_handle() to decrement the ref.
962 *
963 * Return: 0 if successful, else errno
964 */
965static int binder_dec_ref_for_handle(struct binder_proc *proc,
966 uint32_t desc, bool strong, struct binder_ref_data *rdata)
967{
968 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
969}
970
971
972/**
973 * binder_inc_ref_for_node() - increment the ref for given proc/node
974 * @proc: proc containing the ref
975 * @node: target node
976 * @strong: true=strong reference, false=weak reference
977 * @target_list: worklist to use if node is incremented
978 * @rdata: the id/refcount data for the ref
979 *
980 * Given a proc and node, increment the ref. Create the ref if it
981 * doesn't already exist
982 *
983 * Return: 0 if successful, else errno
984 */
985static int binder_inc_ref_for_node(struct binder_proc *proc,
986 struct binder_node *node,
987 bool strong,
988 struct list_head *target_list,
989 struct binder_ref_data *rdata)
990{
991 struct binder_ref *ref;
992 struct binder_ref *new_ref = NULL;
993 int ret = 0;
994
995 ref = binder_get_ref_for_node(proc, node, NULL);
996 if (!ref) {
997 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
998 if (!new_ref)
999 return -ENOMEM;
1000 ref = binder_get_ref_for_node(proc, node, new_ref);
1001 }
1002 ret = binder_inc_ref(ref, strong, target_list);
1003 *rdata = ref->data;
1004 if (new_ref && ref != new_ref)
1005 /*
1006 * Another thread created the ref first so
1007 * free the one we allocated
1008 */
1009 kfree(new_ref);
1010 return ret;
355b0502
GKH
1011}
1012
1013static void binder_pop_transaction(struct binder_thread *target_thread,
1014 struct binder_transaction *t)
1015{
b6d282ce
TK
1016 BUG_ON(!target_thread);
1017 BUG_ON(target_thread->transaction_stack != t);
1018 BUG_ON(target_thread->transaction_stack->from != target_thread);
1019 target_thread->transaction_stack =
1020 target_thread->transaction_stack->from_parent;
1021 t->from = NULL;
1022}
1023
7a4408c6
TK
1024/**
1025 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1026 * @thread: thread to decrement
1027 *
1028 * A thread needs to be kept alive while being used to create or
1029 * handle a transaction. binder_get_txn_from() is used to safely
1030 * extract t->from from a binder_transaction and keep the thread
1031 * indicated by t->from from being freed. When done with that
1032 * binder_thread, this function is called to decrement the
1033 * tmp_ref and free if appropriate (thread has been released
1034 * and no transaction being processed by the driver)
1035 */
1036static void binder_thread_dec_tmpref(struct binder_thread *thread)
1037{
1038 /*
1039 * atomic is used to protect the counter value while
1040 * it cannot reach zero or thread->is_dead is false
1041 *
1042 * TODO: future patch adds locking to ensure that the
1043 * check of tmp_ref and is_dead is done with a lock held
1044 */
1045 atomic_dec(&thread->tmp_ref);
1046 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1047 binder_free_thread(thread);
1048 return;
1049 }
1050}
1051
1052/**
1053 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1054 * @proc: proc to decrement
1055 *
1056 * A binder_proc needs to be kept alive while being used to create or
1057 * handle a transaction. proc->tmp_ref is incremented when
1058 * creating a new transaction or the binder_proc is currently in-use
1059 * by threads that are being released. When done with the binder_proc,
1060 * this function is called to decrement the counter and free the
1061 * proc if appropriate (proc has been released, all threads have
1062 * been released and not currenly in-use to process a transaction).
1063 */
1064static void binder_proc_dec_tmpref(struct binder_proc *proc)
1065{
1066 proc->tmp_ref--;
1067 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1068 !proc->tmp_ref) {
1069 binder_free_proc(proc);
1070 return;
1071 }
1072}
1073
1074/**
1075 * binder_get_txn_from() - safely extract the "from" thread in transaction
1076 * @t: binder transaction for t->from
1077 *
1078 * Atomically return the "from" thread and increment the tmp_ref
1079 * count for the thread to ensure it stays alive until
1080 * binder_thread_dec_tmpref() is called.
1081 *
1082 * Return: the value of t->from
1083 */
1084static struct binder_thread *binder_get_txn_from(
1085 struct binder_transaction *t)
1086{
1087 struct binder_thread *from;
1088
1089 spin_lock(&t->lock);
1090 from = t->from;
1091 if (from)
1092 atomic_inc(&from->tmp_ref);
1093 spin_unlock(&t->lock);
1094 return from;
1095}
1096
b6d282ce
TK
1097static void binder_free_transaction(struct binder_transaction *t)
1098{
355b0502
GKH
1099 if (t->buffer)
1100 t->buffer->transaction = NULL;
1101 kfree(t);
1102 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1103}
1104
1105static void binder_send_failed_reply(struct binder_transaction *t,
1106 uint32_t error_code)
1107{
1108 struct binder_thread *target_thread;
d4ec15e1 1109 struct binder_transaction *next;
10f62861 1110
355b0502
GKH
1111 BUG_ON(t->flags & TF_ONE_WAY);
1112 while (1) {
7a4408c6 1113 target_thread = binder_get_txn_from(t);
355b0502 1114 if (target_thread) {
26549d17
TK
1115 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1116 "send failed reply for transaction %d to %d:%d\n",
1117 t->debug_id,
1118 target_thread->proc->pid,
1119 target_thread->pid);
1120
1121 binder_pop_transaction(target_thread, t);
1122 if (target_thread->reply_error.cmd == BR_OK) {
1123 target_thread->reply_error.cmd = error_code;
1124 list_add_tail(
1125 &target_thread->reply_error.work.entry,
1126 &target_thread->todo);
355b0502
GKH
1127 wake_up_interruptible(&target_thread->wait);
1128 } else {
26549d17
TK
1129 WARN(1, "Unexpected reply error: %u\n",
1130 target_thread->reply_error.cmd);
355b0502 1131 }
7a4408c6 1132 binder_thread_dec_tmpref(target_thread);
26549d17 1133 binder_free_transaction(t);
355b0502 1134 return;
d4ec15e1
LT
1135 }
1136 next = t->from_parent;
1137
1138 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1139 "send failed reply for transaction %d, target dead\n",
1140 t->debug_id);
1141
b6d282ce 1142 binder_free_transaction(t);
d4ec15e1 1143 if (next == NULL) {
355b0502 1144 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
1145 "reply failed, no target thread at root\n");
1146 return;
355b0502 1147 }
d4ec15e1
LT
1148 t = next;
1149 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1150 "reply failed, no target thread -- retry %d\n",
1151 t->debug_id);
355b0502
GKH
1152 }
1153}
1154
feba3900
MC
1155/**
1156 * binder_validate_object() - checks for a valid metadata object in a buffer.
1157 * @buffer: binder_buffer that we're parsing.
1158 * @offset: offset in the buffer at which to validate an object.
1159 *
1160 * Return: If there's a valid metadata object at @offset in @buffer, the
1161 * size of that object. Otherwise, it returns zero.
1162 */
1163static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1164{
1165 /* Check if we can read a header first */
1166 struct binder_object_header *hdr;
1167 size_t object_size = 0;
1168
1169 if (offset > buffer->data_size - sizeof(*hdr) ||
1170 buffer->data_size < sizeof(*hdr) ||
1171 !IS_ALIGNED(offset, sizeof(u32)))
1172 return 0;
1173
1174 /* Ok, now see if we can read a complete object. */
1175 hdr = (struct binder_object_header *)(buffer->data + offset);
1176 switch (hdr->type) {
1177 case BINDER_TYPE_BINDER:
1178 case BINDER_TYPE_WEAK_BINDER:
1179 case BINDER_TYPE_HANDLE:
1180 case BINDER_TYPE_WEAK_HANDLE:
1181 object_size = sizeof(struct flat_binder_object);
1182 break;
1183 case BINDER_TYPE_FD:
1184 object_size = sizeof(struct binder_fd_object);
1185 break;
7980240b
MC
1186 case BINDER_TYPE_PTR:
1187 object_size = sizeof(struct binder_buffer_object);
1188 break;
def95c73
MC
1189 case BINDER_TYPE_FDA:
1190 object_size = sizeof(struct binder_fd_array_object);
1191 break;
feba3900
MC
1192 default:
1193 return 0;
1194 }
1195 if (offset <= buffer->data_size - object_size &&
1196 buffer->data_size >= object_size)
1197 return object_size;
1198 else
1199 return 0;
1200}
1201
7980240b
MC
1202/**
1203 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1204 * @b: binder_buffer containing the object
1205 * @index: index in offset array at which the binder_buffer_object is
1206 * located
1207 * @start: points to the start of the offset array
1208 * @num_valid: the number of valid offsets in the offset array
1209 *
1210 * Return: If @index is within the valid range of the offset array
1211 * described by @start and @num_valid, and if there's a valid
1212 * binder_buffer_object at the offset found in index @index
1213 * of the offset array, that object is returned. Otherwise,
1214 * %NULL is returned.
1215 * Note that the offset found in index @index itself is not
1216 * verified; this function assumes that @num_valid elements
1217 * from @start were previously verified to have valid offsets.
1218 */
1219static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1220 binder_size_t index,
1221 binder_size_t *start,
1222 binder_size_t num_valid)
1223{
1224 struct binder_buffer_object *buffer_obj;
1225 binder_size_t *offp;
1226
1227 if (index >= num_valid)
1228 return NULL;
1229
1230 offp = start + index;
1231 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1232 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1233 return NULL;
1234
1235 return buffer_obj;
1236}
1237
1238/**
1239 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1240 * @b: transaction buffer
1241 * @objects_start start of objects buffer
1242 * @buffer: binder_buffer_object in which to fix up
1243 * @offset: start offset in @buffer to fix up
1244 * @last_obj: last binder_buffer_object that we fixed up in
1245 * @last_min_offset: minimum fixup offset in @last_obj
1246 *
1247 * Return: %true if a fixup in buffer @buffer at offset @offset is
1248 * allowed.
1249 *
1250 * For safety reasons, we only allow fixups inside a buffer to happen
1251 * at increasing offsets; additionally, we only allow fixup on the last
1252 * buffer object that was verified, or one of its parents.
1253 *
1254 * Example of what is allowed:
1255 *
1256 * A
1257 * B (parent = A, offset = 0)
1258 * C (parent = A, offset = 16)
1259 * D (parent = C, offset = 0)
1260 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1261 *
1262 * Examples of what is not allowed:
1263 *
1264 * Decreasing offsets within the same parent:
1265 * A
1266 * C (parent = A, offset = 16)
1267 * B (parent = A, offset = 0) // decreasing offset within A
1268 *
1269 * Referring to a parent that wasn't the last object or any of its parents:
1270 * A
1271 * B (parent = A, offset = 0)
1272 * C (parent = A, offset = 0)
1273 * C (parent = A, offset = 16)
1274 * D (parent = B, offset = 0) // B is not A or any of A's parents
1275 */
1276static bool binder_validate_fixup(struct binder_buffer *b,
1277 binder_size_t *objects_start,
1278 struct binder_buffer_object *buffer,
1279 binder_size_t fixup_offset,
1280 struct binder_buffer_object *last_obj,
1281 binder_size_t last_min_offset)
1282{
1283 if (!last_obj) {
1284 /* Nothing to fix up in */
1285 return false;
1286 }
1287
1288 while (last_obj != buffer) {
1289 /*
1290 * Safe to retrieve the parent of last_obj, since it
1291 * was already previously verified by the driver.
1292 */
1293 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1294 return false;
1295 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1296 last_obj = (struct binder_buffer_object *)
1297 (b->data + *(objects_start + last_obj->parent));
1298 }
1299 return (fixup_offset >= last_min_offset);
1300}
1301
355b0502
GKH
1302static void binder_transaction_buffer_release(struct binder_proc *proc,
1303 struct binder_buffer *buffer,
da49889d 1304 binder_size_t *failed_at)
355b0502 1305{
7980240b 1306 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
1307 int debug_id = buffer->debug_id;
1308
1309 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 1310 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
1311 proc->pid, buffer->debug_id,
1312 buffer->data_size, buffer->offsets_size, failed_at);
1313
1314 if (buffer->target_node)
1315 binder_dec_node(buffer->target_node, 1, 0);
1316
7980240b
MC
1317 off_start = (binder_size_t *)(buffer->data +
1318 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
1319 if (failed_at)
1320 off_end = failed_at;
1321 else
7980240b
MC
1322 off_end = (void *)off_start + buffer->offsets_size;
1323 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
1324 struct binder_object_header *hdr;
1325 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1326
feba3900
MC
1327 if (object_size == 0) {
1328 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1329 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1330 continue;
1331 }
feba3900
MC
1332 hdr = (struct binder_object_header *)(buffer->data + *offp);
1333 switch (hdr->type) {
355b0502
GKH
1334 case BINDER_TYPE_BINDER:
1335 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
1336 struct flat_binder_object *fp;
1337 struct binder_node *node;
10f62861 1338
feba3900
MC
1339 fp = to_flat_binder_object(hdr);
1340 node = binder_get_node(proc, fp->binder);
355b0502 1341 if (node == NULL) {
da49889d
AH
1342 pr_err("transaction release %d bad node %016llx\n",
1343 debug_id, (u64)fp->binder);
355b0502
GKH
1344 break;
1345 }
1346 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1347 " node %d u%016llx\n",
1348 node->debug_id, (u64)node->ptr);
feba3900
MC
1349 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1350 0);
355b0502
GKH
1351 } break;
1352 case BINDER_TYPE_HANDLE:
1353 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1354 struct flat_binder_object *fp;
372e3147
TK
1355 struct binder_ref_data rdata;
1356 int ret;
0a3ffab9 1357
feba3900 1358 fp = to_flat_binder_object(hdr);
372e3147
TK
1359 ret = binder_dec_ref_for_handle(proc, fp->handle,
1360 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1361
1362 if (ret) {
1363 pr_err("transaction release %d bad handle %d, ret = %d\n",
1364 debug_id, fp->handle, ret);
355b0502
GKH
1365 break;
1366 }
1367 binder_debug(BINDER_DEBUG_TRANSACTION,
372e3147
TK
1368 " ref %d desc %d\n",
1369 rdata.debug_id, rdata.desc);
355b0502
GKH
1370 } break;
1371
feba3900
MC
1372 case BINDER_TYPE_FD: {
1373 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1374
355b0502 1375 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 1376 " fd %d\n", fp->fd);
355b0502 1377 if (failed_at)
feba3900
MC
1378 task_close_fd(proc, fp->fd);
1379 } break;
7980240b
MC
1380 case BINDER_TYPE_PTR:
1381 /*
1382 * Nothing to do here, this will get cleaned up when the
1383 * transaction buffer gets freed
1384 */
1385 break;
def95c73
MC
1386 case BINDER_TYPE_FDA: {
1387 struct binder_fd_array_object *fda;
1388 struct binder_buffer_object *parent;
1389 uintptr_t parent_buffer;
1390 u32 *fd_array;
1391 size_t fd_index;
1392 binder_size_t fd_buf_size;
1393
1394 fda = to_binder_fd_array_object(hdr);
1395 parent = binder_validate_ptr(buffer, fda->parent,
1396 off_start,
1397 offp - off_start);
1398 if (!parent) {
1399 pr_err("transaction release %d bad parent offset",
1400 debug_id);
1401 continue;
1402 }
1403 /*
1404 * Since the parent was already fixed up, convert it
1405 * back to kernel address space to access it
1406 */
1407 parent_buffer = parent->buffer -
19c98724
TK
1408 binder_alloc_get_user_buffer_offset(
1409 &proc->alloc);
def95c73
MC
1410
1411 fd_buf_size = sizeof(u32) * fda->num_fds;
1412 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1413 pr_err("transaction release %d invalid number of fds (%lld)\n",
1414 debug_id, (u64)fda->num_fds);
1415 continue;
1416 }
1417 if (fd_buf_size > parent->length ||
1418 fda->parent_offset > parent->length - fd_buf_size) {
1419 /* No space for all file descriptors here. */
1420 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1421 debug_id, (u64)fda->num_fds);
1422 continue;
1423 }
1424 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1425 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1426 task_close_fd(proc, fd_array[fd_index]);
1427 } break;
355b0502 1428 default:
64dcfe6b 1429 pr_err("transaction release %d bad object type %x\n",
feba3900 1430 debug_id, hdr->type);
355b0502
GKH
1431 break;
1432 }
1433 }
1434}
1435
a056af42
MC
1436static int binder_translate_binder(struct flat_binder_object *fp,
1437 struct binder_transaction *t,
1438 struct binder_thread *thread)
1439{
1440 struct binder_node *node;
a056af42
MC
1441 struct binder_proc *proc = thread->proc;
1442 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
1443 struct binder_ref_data rdata;
1444 int ret;
a056af42
MC
1445
1446 node = binder_get_node(proc, fp->binder);
1447 if (!node) {
1448 node = binder_new_node(proc, fp->binder, fp->cookie);
1449 if (!node)
1450 return -ENOMEM;
1451
1452 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1453 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1454 }
1455 if (fp->cookie != node->cookie) {
1456 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1457 proc->pid, thread->pid, (u64)fp->binder,
1458 node->debug_id, (u64)fp->cookie,
1459 (u64)node->cookie);
1460 return -EINVAL;
1461 }
1462 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1463 return -EPERM;
1464
372e3147
TK
1465 ret = binder_inc_ref_for_node(target_proc, node,
1466 fp->hdr.type == BINDER_TYPE_BINDER,
1467 &thread->todo, &rdata);
1468 if (ret)
1469 return ret;
a056af42
MC
1470
1471 if (fp->hdr.type == BINDER_TYPE_BINDER)
1472 fp->hdr.type = BINDER_TYPE_HANDLE;
1473 else
1474 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1475 fp->binder = 0;
372e3147 1476 fp->handle = rdata.desc;
a056af42 1477 fp->cookie = 0;
a056af42 1478
372e3147 1479 trace_binder_transaction_node_to_ref(t, node, &rdata);
a056af42
MC
1480 binder_debug(BINDER_DEBUG_TRANSACTION,
1481 " node %d u%016llx -> ref %d desc %d\n",
1482 node->debug_id, (u64)node->ptr,
372e3147 1483 rdata.debug_id, rdata.desc);
a056af42
MC
1484 return 0;
1485}
1486
1487static int binder_translate_handle(struct flat_binder_object *fp,
1488 struct binder_transaction *t,
1489 struct binder_thread *thread)
1490{
a056af42
MC
1491 struct binder_proc *proc = thread->proc;
1492 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
1493 struct binder_node *node;
1494 struct binder_ref_data src_rdata;
a056af42 1495
372e3147
TK
1496 node = binder_get_node_from_ref(proc, fp->handle,
1497 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
1498 if (!node) {
a056af42
MC
1499 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1500 proc->pid, thread->pid, fp->handle);
1501 return -EINVAL;
1502 }
1503 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
1504 return -EPERM;
1505
372e3147 1506 if (node->proc == target_proc) {
a056af42
MC
1507 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1508 fp->hdr.type = BINDER_TYPE_BINDER;
1509 else
1510 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
372e3147
TK
1511 fp->binder = node->ptr;
1512 fp->cookie = node->cookie;
1513 binder_inc_node(node,
1514 fp->hdr.type == BINDER_TYPE_BINDER,
a056af42 1515 0, NULL);
372e3147 1516 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
a056af42
MC
1517 binder_debug(BINDER_DEBUG_TRANSACTION,
1518 " ref %d desc %d -> node %d u%016llx\n",
372e3147
TK
1519 src_rdata.debug_id, src_rdata.desc, node->debug_id,
1520 (u64)node->ptr);
a056af42 1521 } else {
372e3147
TK
1522 int ret;
1523 struct binder_ref_data dest_rdata;
a056af42 1524
372e3147
TK
1525 ret = binder_inc_ref_for_node(target_proc, node,
1526 fp->hdr.type == BINDER_TYPE_HANDLE,
1527 NULL, &dest_rdata);
1528 if (ret)
1529 return ret;
a056af42
MC
1530
1531 fp->binder = 0;
372e3147 1532 fp->handle = dest_rdata.desc;
a056af42 1533 fp->cookie = 0;
372e3147
TK
1534 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
1535 &dest_rdata);
a056af42
MC
1536 binder_debug(BINDER_DEBUG_TRANSACTION,
1537 " ref %d desc %d -> ref %d desc %d (node %d)\n",
372e3147
TK
1538 src_rdata.debug_id, src_rdata.desc,
1539 dest_rdata.debug_id, dest_rdata.desc,
1540 node->debug_id);
a056af42
MC
1541 }
1542 return 0;
1543}
1544
1545static int binder_translate_fd(int fd,
1546 struct binder_transaction *t,
1547 struct binder_thread *thread,
1548 struct binder_transaction *in_reply_to)
1549{
1550 struct binder_proc *proc = thread->proc;
1551 struct binder_proc *target_proc = t->to_proc;
1552 int target_fd;
1553 struct file *file;
1554 int ret;
1555 bool target_allows_fd;
1556
1557 if (in_reply_to)
1558 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1559 else
1560 target_allows_fd = t->buffer->target_node->accept_fds;
1561 if (!target_allows_fd) {
1562 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1563 proc->pid, thread->pid,
1564 in_reply_to ? "reply" : "transaction",
1565 fd);
1566 ret = -EPERM;
1567 goto err_fd_not_accepted;
1568 }
1569
1570 file = fget(fd);
1571 if (!file) {
1572 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1573 proc->pid, thread->pid, fd);
1574 ret = -EBADF;
1575 goto err_fget;
1576 }
1577 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1578 if (ret < 0) {
1579 ret = -EPERM;
1580 goto err_security;
1581 }
1582
1583 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1584 if (target_fd < 0) {
1585 ret = -ENOMEM;
1586 goto err_get_unused_fd;
1587 }
1588 task_fd_install(target_proc, target_fd, file);
1589 trace_binder_transaction_fd(t, fd, target_fd);
1590 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1591 fd, target_fd);
1592
1593 return target_fd;
1594
1595err_get_unused_fd:
1596err_security:
1597 fput(file);
1598err_fget:
1599err_fd_not_accepted:
1600 return ret;
1601}
1602
def95c73
MC
1603static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1604 struct binder_buffer_object *parent,
1605 struct binder_transaction *t,
1606 struct binder_thread *thread,
1607 struct binder_transaction *in_reply_to)
1608{
1609 binder_size_t fdi, fd_buf_size, num_installed_fds;
1610 int target_fd;
1611 uintptr_t parent_buffer;
1612 u32 *fd_array;
1613 struct binder_proc *proc = thread->proc;
1614 struct binder_proc *target_proc = t->to_proc;
1615
1616 fd_buf_size = sizeof(u32) * fda->num_fds;
1617 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1618 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1619 proc->pid, thread->pid, (u64)fda->num_fds);
1620 return -EINVAL;
1621 }
1622 if (fd_buf_size > parent->length ||
1623 fda->parent_offset > parent->length - fd_buf_size) {
1624 /* No space for all file descriptors here. */
1625 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1626 proc->pid, thread->pid, (u64)fda->num_fds);
1627 return -EINVAL;
1628 }
1629 /*
1630 * Since the parent was already fixed up, convert it
1631 * back to the kernel address space to access it
1632 */
19c98724
TK
1633 parent_buffer = parent->buffer -
1634 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
def95c73
MC
1635 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1636 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1637 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1638 proc->pid, thread->pid);
1639 return -EINVAL;
1640 }
1641 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1642 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1643 in_reply_to);
1644 if (target_fd < 0)
1645 goto err_translate_fd_failed;
1646 fd_array[fdi] = target_fd;
1647 }
1648 return 0;
1649
1650err_translate_fd_failed:
1651 /*
1652 * Failed to allocate fd or security error, free fds
1653 * installed so far.
1654 */
1655 num_installed_fds = fdi;
1656 for (fdi = 0; fdi < num_installed_fds; fdi++)
1657 task_close_fd(target_proc, fd_array[fdi]);
1658 return target_fd;
1659}
1660
7980240b
MC
1661static int binder_fixup_parent(struct binder_transaction *t,
1662 struct binder_thread *thread,
1663 struct binder_buffer_object *bp,
1664 binder_size_t *off_start,
1665 binder_size_t num_valid,
1666 struct binder_buffer_object *last_fixup_obj,
1667 binder_size_t last_fixup_min_off)
1668{
1669 struct binder_buffer_object *parent;
1670 u8 *parent_buffer;
1671 struct binder_buffer *b = t->buffer;
1672 struct binder_proc *proc = thread->proc;
1673 struct binder_proc *target_proc = t->to_proc;
1674
1675 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1676 return 0;
1677
1678 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1679 if (!parent) {
1680 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1681 proc->pid, thread->pid);
1682 return -EINVAL;
1683 }
1684
1685 if (!binder_validate_fixup(b, off_start,
1686 parent, bp->parent_offset,
1687 last_fixup_obj,
1688 last_fixup_min_off)) {
1689 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1690 proc->pid, thread->pid);
1691 return -EINVAL;
1692 }
1693
1694 if (parent->length < sizeof(binder_uintptr_t) ||
1695 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
1696 /* No space for a pointer here! */
1697 binder_user_error("%d:%d got transaction with invalid parent offset\n",
1698 proc->pid, thread->pid);
1699 return -EINVAL;
1700 }
1701 parent_buffer = (u8 *)(parent->buffer -
19c98724
TK
1702 binder_alloc_get_user_buffer_offset(
1703 &target_proc->alloc));
7980240b
MC
1704 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
1705
1706 return 0;
1707}
1708
355b0502
GKH
1709static void binder_transaction(struct binder_proc *proc,
1710 struct binder_thread *thread,
4bfac80a
MC
1711 struct binder_transaction_data *tr, int reply,
1712 binder_size_t extra_buffers_size)
355b0502 1713{
a056af42 1714 int ret;
355b0502
GKH
1715 struct binder_transaction *t;
1716 struct binder_work *tcomplete;
7980240b 1717 binder_size_t *offp, *off_end, *off_start;
212265e5 1718 binder_size_t off_min;
7980240b 1719 u8 *sg_bufp, *sg_buf_end;
7a4408c6 1720 struct binder_proc *target_proc = NULL;
355b0502
GKH
1721 struct binder_thread *target_thread = NULL;
1722 struct binder_node *target_node = NULL;
1723 struct list_head *target_list;
1724 wait_queue_head_t *target_wait;
1725 struct binder_transaction *in_reply_to = NULL;
1726 struct binder_transaction_log_entry *e;
57ada2fb
TK
1727 uint32_t return_error = 0;
1728 uint32_t return_error_param = 0;
1729 uint32_t return_error_line = 0;
7980240b
MC
1730 struct binder_buffer_object *last_fixup_obj = NULL;
1731 binder_size_t last_fixup_min_off = 0;
342e5c90 1732 struct binder_context *context = proc->context;
d99c7333 1733 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1734
1735 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 1736 e->debug_id = t_debug_id;
355b0502
GKH
1737 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
1738 e->from_proc = proc->pid;
1739 e->from_thread = thread->pid;
1740 e->target_handle = tr->target.handle;
1741 e->data_size = tr->data_size;
1742 e->offsets_size = tr->offsets_size;
14db3181 1743 e->context_name = proc->context->name;
355b0502
GKH
1744
1745 if (reply) {
1746 in_reply_to = thread->transaction_stack;
1747 if (in_reply_to == NULL) {
56b468fc 1748 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
1749 proc->pid, thread->pid);
1750 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1751 return_error_param = -EPROTO;
1752 return_error_line = __LINE__;
355b0502
GKH
1753 goto err_empty_call_stack;
1754 }
1755 binder_set_nice(in_reply_to->saved_priority);
1756 if (in_reply_to->to_thread != thread) {
7a4408c6 1757 spin_lock(&in_reply_to->lock);
56b468fc 1758 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1759 proc->pid, thread->pid, in_reply_to->debug_id,
1760 in_reply_to->to_proc ?
1761 in_reply_to->to_proc->pid : 0,
1762 in_reply_to->to_thread ?
1763 in_reply_to->to_thread->pid : 0);
7a4408c6 1764 spin_unlock(&in_reply_to->lock);
355b0502 1765 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1766 return_error_param = -EPROTO;
1767 return_error_line = __LINE__;
355b0502
GKH
1768 in_reply_to = NULL;
1769 goto err_bad_call_stack;
1770 }
1771 thread->transaction_stack = in_reply_to->to_parent;
7a4408c6 1772 target_thread = binder_get_txn_from(in_reply_to);
355b0502
GKH
1773 if (target_thread == NULL) {
1774 return_error = BR_DEAD_REPLY;
57ada2fb 1775 return_error_line = __LINE__;
355b0502
GKH
1776 goto err_dead_binder;
1777 }
1778 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 1779 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
1780 proc->pid, thread->pid,
1781 target_thread->transaction_stack ?
1782 target_thread->transaction_stack->debug_id : 0,
1783 in_reply_to->debug_id);
1784 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1785 return_error_param = -EPROTO;
1786 return_error_line = __LINE__;
355b0502
GKH
1787 in_reply_to = NULL;
1788 target_thread = NULL;
1789 goto err_dead_binder;
1790 }
1791 target_proc = target_thread->proc;
7a4408c6 1792 target_proc->tmp_ref++;
355b0502
GKH
1793 } else {
1794 if (tr->target.handle) {
1795 struct binder_ref *ref;
10f62861 1796
eb34983b
TK
1797 /*
1798 * There must already be a strong ref
1799 * on this node. If so, do a strong
1800 * increment on the node to ensure it
1801 * stays alive until the transaction is
1802 * done.
1803 */
0a3ffab9 1804 ref = binder_get_ref(proc, tr->target.handle, true);
eb34983b
TK
1805 if (ref) {
1806 binder_inc_node(ref->node, 1, 0, NULL);
1807 target_node = ref->node;
1808 }
1809 if (target_node == NULL) {
56b468fc 1810 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
1811 proc->pid, thread->pid);
1812 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1813 return_error_param = -EINVAL;
1814 return_error_line = __LINE__;
355b0502
GKH
1815 goto err_invalid_target_handle;
1816 }
355b0502 1817 } else {
c44b1231 1818 mutex_lock(&context->context_mgr_node_lock);
342e5c90 1819 target_node = context->binder_context_mgr_node;
355b0502
GKH
1820 if (target_node == NULL) {
1821 return_error = BR_DEAD_REPLY;
c44b1231 1822 mutex_unlock(&context->context_mgr_node_lock);
57ada2fb 1823 return_error_line = __LINE__;
355b0502
GKH
1824 goto err_no_context_mgr_node;
1825 }
eb34983b 1826 binder_inc_node(target_node, 1, 0, NULL);
c44b1231 1827 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
1828 }
1829 e->to_node = target_node->debug_id;
1830 target_proc = target_node->proc;
1831 if (target_proc == NULL) {
1832 return_error = BR_DEAD_REPLY;
57ada2fb 1833 return_error_line = __LINE__;
355b0502
GKH
1834 goto err_dead_binder;
1835 }
7a4408c6 1836 target_proc->tmp_ref++;
79af7307
SS
1837 if (security_binder_transaction(proc->tsk,
1838 target_proc->tsk) < 0) {
1839 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1840 return_error_param = -EPERM;
1841 return_error_line = __LINE__;
79af7307
SS
1842 goto err_invalid_target_handle;
1843 }
355b0502
GKH
1844 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
1845 struct binder_transaction *tmp;
10f62861 1846
355b0502
GKH
1847 tmp = thread->transaction_stack;
1848 if (tmp->to_thread != thread) {
7a4408c6 1849 spin_lock(&tmp->lock);
56b468fc 1850 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
1851 proc->pid, thread->pid, tmp->debug_id,
1852 tmp->to_proc ? tmp->to_proc->pid : 0,
1853 tmp->to_thread ?
1854 tmp->to_thread->pid : 0);
7a4408c6 1855 spin_unlock(&tmp->lock);
355b0502 1856 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1857 return_error_param = -EPROTO;
1858 return_error_line = __LINE__;
355b0502
GKH
1859 goto err_bad_call_stack;
1860 }
1861 while (tmp) {
7a4408c6
TK
1862 struct binder_thread *from;
1863
1864 spin_lock(&tmp->lock);
1865 from = tmp->from;
1866 if (from && from->proc == target_proc) {
1867 atomic_inc(&from->tmp_ref);
1868 target_thread = from;
1869 spin_unlock(&tmp->lock);
1870 break;
1871 }
1872 spin_unlock(&tmp->lock);
355b0502
GKH
1873 tmp = tmp->from_parent;
1874 }
1875 }
1876 }
1877 if (target_thread) {
1878 e->to_thread = target_thread->pid;
1879 target_list = &target_thread->todo;
1880 target_wait = &target_thread->wait;
1881 } else {
1882 target_list = &target_proc->todo;
1883 target_wait = &target_proc->wait;
1884 }
1885 e->to_proc = target_proc->pid;
1886
1887 /* TODO: reuse incoming transaction for reply */
1888 t = kzalloc(sizeof(*t), GFP_KERNEL);
1889 if (t == NULL) {
1890 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1891 return_error_param = -ENOMEM;
1892 return_error_line = __LINE__;
355b0502
GKH
1893 goto err_alloc_t_failed;
1894 }
1895 binder_stats_created(BINDER_STAT_TRANSACTION);
7a4408c6 1896 spin_lock_init(&t->lock);
355b0502
GKH
1897
1898 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
1899 if (tcomplete == NULL) {
1900 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1901 return_error_param = -ENOMEM;
1902 return_error_line = __LINE__;
355b0502
GKH
1903 goto err_alloc_tcomplete_failed;
1904 }
1905 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
1906
d99c7333 1907 t->debug_id = t_debug_id;
355b0502
GKH
1908
1909 if (reply)
1910 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1911 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1912 proc->pid, thread->pid, t->debug_id,
1913 target_proc->pid, target_thread->pid,
da49889d
AH
1914 (u64)tr->data.ptr.buffer,
1915 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1916 (u64)tr->data_size, (u64)tr->offsets_size,
1917 (u64)extra_buffers_size);
355b0502
GKH
1918 else
1919 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 1920 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
1921 proc->pid, thread->pid, t->debug_id,
1922 target_proc->pid, target_node->debug_id,
da49889d
AH
1923 (u64)tr->data.ptr.buffer,
1924 (u64)tr->data.ptr.offsets,
4bfac80a
MC
1925 (u64)tr->data_size, (u64)tr->offsets_size,
1926 (u64)extra_buffers_size);
355b0502
GKH
1927
1928 if (!reply && !(tr->flags & TF_ONE_WAY))
1929 t->from = thread;
1930 else
1931 t->from = NULL;
57bab7cb 1932 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
1933 t->to_proc = target_proc;
1934 t->to_thread = target_thread;
1935 t->code = tr->code;
1936 t->flags = tr->flags;
1937 t->priority = task_nice(current);
975a1ac9
AH
1938
1939 trace_binder_transaction(reply, t, target_node);
1940
19c98724 1941 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
1942 tr->offsets_size, extra_buffers_size,
1943 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
1944 if (IS_ERR(t->buffer)) {
1945 /*
1946 * -ESRCH indicates VMA cleared. The target is dying.
1947 */
1948 return_error_param = PTR_ERR(t->buffer);
1949 return_error = return_error_param == -ESRCH ?
1950 BR_DEAD_REPLY : BR_FAILED_REPLY;
1951 return_error_line = __LINE__;
1952 t->buffer = NULL;
355b0502
GKH
1953 goto err_binder_alloc_buf_failed;
1954 }
1955 t->buffer->allow_user_free = 0;
1956 t->buffer->debug_id = t->debug_id;
1957 t->buffer->transaction = t;
1958 t->buffer->target_node = target_node;
975a1ac9 1959 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
1960 off_start = (binder_size_t *)(t->buffer->data +
1961 ALIGN(tr->data_size, sizeof(void *)));
1962 offp = off_start;
355b0502 1963
da49889d
AH
1964 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
1965 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
1966 binder_user_error("%d:%d got transaction with invalid data ptr\n",
1967 proc->pid, thread->pid);
355b0502 1968 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1969 return_error_param = -EFAULT;
1970 return_error_line = __LINE__;
355b0502
GKH
1971 goto err_copy_data_failed;
1972 }
da49889d
AH
1973 if (copy_from_user(offp, (const void __user *)(uintptr_t)
1974 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
1975 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
1976 proc->pid, thread->pid);
355b0502 1977 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1978 return_error_param = -EFAULT;
1979 return_error_line = __LINE__;
355b0502
GKH
1980 goto err_copy_data_failed;
1981 }
da49889d
AH
1982 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
1983 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
1984 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 1985 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1986 return_error_param = -EINVAL;
1987 return_error_line = __LINE__;
355b0502
GKH
1988 goto err_bad_offset;
1989 }
7980240b
MC
1990 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
1991 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
1992 proc->pid, thread->pid,
1993 (u64)extra_buffers_size);
1994 return_error = BR_FAILED_REPLY;
57ada2fb
TK
1995 return_error_param = -EINVAL;
1996 return_error_line = __LINE__;
7980240b
MC
1997 goto err_bad_offset;
1998 }
1999 off_end = (void *)off_start + tr->offsets_size;
2000 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2001 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 2002 off_min = 0;
355b0502 2003 for (; offp < off_end; offp++) {
feba3900
MC
2004 struct binder_object_header *hdr;
2005 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 2006
feba3900
MC
2007 if (object_size == 0 || *offp < off_min) {
2008 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
2009 proc->pid, thread->pid, (u64)*offp,
2010 (u64)off_min,
feba3900 2011 (u64)t->buffer->data_size);
355b0502 2012 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2013 return_error_param = -EINVAL;
2014 return_error_line = __LINE__;
355b0502
GKH
2015 goto err_bad_offset;
2016 }
feba3900
MC
2017
2018 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2019 off_min = *offp + object_size;
2020 switch (hdr->type) {
355b0502
GKH
2021 case BINDER_TYPE_BINDER:
2022 case BINDER_TYPE_WEAK_BINDER: {
feba3900 2023 struct flat_binder_object *fp;
10f62861 2024
feba3900 2025 fp = to_flat_binder_object(hdr);
a056af42
MC
2026 ret = binder_translate_binder(fp, t, thread);
2027 if (ret < 0) {
355b0502 2028 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2029 return_error_param = ret;
2030 return_error_line = __LINE__;
a056af42 2031 goto err_translate_failed;
355b0502 2032 }
355b0502
GKH
2033 } break;
2034 case BINDER_TYPE_HANDLE:
2035 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 2036 struct flat_binder_object *fp;
0a3ffab9 2037
feba3900 2038 fp = to_flat_binder_object(hdr);
a056af42
MC
2039 ret = binder_translate_handle(fp, t, thread);
2040 if (ret < 0) {
79af7307 2041 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2042 return_error_param = ret;
2043 return_error_line = __LINE__;
a056af42 2044 goto err_translate_failed;
355b0502
GKH
2045 }
2046 } break;
2047
2048 case BINDER_TYPE_FD: {
feba3900 2049 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
2050 int target_fd = binder_translate_fd(fp->fd, t, thread,
2051 in_reply_to);
355b0502 2052
355b0502 2053 if (target_fd < 0) {
355b0502 2054 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2055 return_error_param = target_fd;
2056 return_error_line = __LINE__;
a056af42 2057 goto err_translate_failed;
355b0502 2058 }
feba3900
MC
2059 fp->pad_binder = 0;
2060 fp->fd = target_fd;
355b0502 2061 } break;
def95c73
MC
2062 case BINDER_TYPE_FDA: {
2063 struct binder_fd_array_object *fda =
2064 to_binder_fd_array_object(hdr);
2065 struct binder_buffer_object *parent =
2066 binder_validate_ptr(t->buffer, fda->parent,
2067 off_start,
2068 offp - off_start);
2069 if (!parent) {
2070 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2071 proc->pid, thread->pid);
2072 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2073 return_error_param = -EINVAL;
2074 return_error_line = __LINE__;
def95c73
MC
2075 goto err_bad_parent;
2076 }
2077 if (!binder_validate_fixup(t->buffer, off_start,
2078 parent, fda->parent_offset,
2079 last_fixup_obj,
2080 last_fixup_min_off)) {
2081 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2082 proc->pid, thread->pid);
2083 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2084 return_error_param = -EINVAL;
2085 return_error_line = __LINE__;
def95c73
MC
2086 goto err_bad_parent;
2087 }
2088 ret = binder_translate_fd_array(fda, parent, t, thread,
2089 in_reply_to);
2090 if (ret < 0) {
2091 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2092 return_error_param = ret;
2093 return_error_line = __LINE__;
def95c73
MC
2094 goto err_translate_failed;
2095 }
2096 last_fixup_obj = parent;
2097 last_fixup_min_off =
2098 fda->parent_offset + sizeof(u32) * fda->num_fds;
2099 } break;
7980240b
MC
2100 case BINDER_TYPE_PTR: {
2101 struct binder_buffer_object *bp =
2102 to_binder_buffer_object(hdr);
2103 size_t buf_left = sg_buf_end - sg_bufp;
2104
2105 if (bp->length > buf_left) {
2106 binder_user_error("%d:%d got transaction with too large buffer\n",
2107 proc->pid, thread->pid);
2108 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2109 return_error_param = -EINVAL;
2110 return_error_line = __LINE__;
7980240b
MC
2111 goto err_bad_offset;
2112 }
2113 if (copy_from_user(sg_bufp,
2114 (const void __user *)(uintptr_t)
2115 bp->buffer, bp->length)) {
2116 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2117 proc->pid, thread->pid);
57ada2fb 2118 return_error_param = -EFAULT;
7980240b 2119 return_error = BR_FAILED_REPLY;
57ada2fb 2120 return_error_line = __LINE__;
7980240b
MC
2121 goto err_copy_data_failed;
2122 }
2123 /* Fixup buffer pointer to target proc address space */
2124 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
2125 binder_alloc_get_user_buffer_offset(
2126 &target_proc->alloc);
7980240b
MC
2127 sg_bufp += ALIGN(bp->length, sizeof(u64));
2128
2129 ret = binder_fixup_parent(t, thread, bp, off_start,
2130 offp - off_start,
2131 last_fixup_obj,
2132 last_fixup_min_off);
2133 if (ret < 0) {
2134 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2135 return_error_param = ret;
2136 return_error_line = __LINE__;
7980240b
MC
2137 goto err_translate_failed;
2138 }
2139 last_fixup_obj = bp;
2140 last_fixup_min_off = 0;
2141 } break;
355b0502 2142 default:
64dcfe6b 2143 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 2144 proc->pid, thread->pid, hdr->type);
355b0502 2145 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2146 return_error_param = -EINVAL;
2147 return_error_line = __LINE__;
355b0502
GKH
2148 goto err_bad_object_type;
2149 }
2150 }
ccae6f67
TK
2151 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2152 list_add_tail(&tcomplete->entry, &thread->todo);
2153
355b0502 2154 if (reply) {
7a4408c6
TK
2155 if (target_thread->is_dead)
2156 goto err_dead_proc_or_thread;
355b0502
GKH
2157 BUG_ON(t->buffer->async_transaction != 0);
2158 binder_pop_transaction(target_thread, in_reply_to);
b6d282ce 2159 binder_free_transaction(in_reply_to);
355b0502
GKH
2160 } else if (!(t->flags & TF_ONE_WAY)) {
2161 BUG_ON(t->buffer->async_transaction != 0);
2162 t->need_reply = 1;
2163 t->from_parent = thread->transaction_stack;
2164 thread->transaction_stack = t;
7a4408c6
TK
2165 if (target_proc->is_dead ||
2166 (target_thread && target_thread->is_dead)) {
2167 binder_pop_transaction(thread, t);
2168 goto err_dead_proc_or_thread;
2169 }
355b0502
GKH
2170 } else {
2171 BUG_ON(target_node == NULL);
2172 BUG_ON(t->buffer->async_transaction != 1);
2173 if (target_node->has_async_transaction) {
2174 target_list = &target_node->async_todo;
2175 target_wait = NULL;
2176 } else
2177 target_node->has_async_transaction = 1;
7a4408c6
TK
2178 if (target_proc->is_dead ||
2179 (target_thread && target_thread->is_dead))
2180 goto err_dead_proc_or_thread;
355b0502
GKH
2181 }
2182 t->work.type = BINDER_WORK_TRANSACTION;
2183 list_add_tail(&t->work.entry, target_list);
00b40d61 2184 if (target_wait) {
ccae6f67 2185 if (reply || !(tr->flags & TF_ONE_WAY))
00b40d61
RA
2186 wake_up_interruptible_sync(target_wait);
2187 else
2188 wake_up_interruptible(target_wait);
2189 }
7a4408c6
TK
2190 if (target_thread)
2191 binder_thread_dec_tmpref(target_thread);
2192 binder_proc_dec_tmpref(target_proc);
d99c7333
TK
2193 /*
2194 * write barrier to synchronize with initialization
2195 * of log entry
2196 */
2197 smp_wmb();
2198 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
2199 return;
2200
7a4408c6
TK
2201err_dead_proc_or_thread:
2202 return_error = BR_DEAD_REPLY;
2203 return_error_line = __LINE__;
a056af42 2204err_translate_failed:
355b0502
GKH
2205err_bad_object_type:
2206err_bad_offset:
def95c73 2207err_bad_parent:
355b0502 2208err_copy_data_failed:
975a1ac9 2209 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 2210 binder_transaction_buffer_release(target_proc, t->buffer, offp);
eb34983b 2211 target_node = NULL;
355b0502 2212 t->buffer->transaction = NULL;
19c98724 2213 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
2214err_binder_alloc_buf_failed:
2215 kfree(tcomplete);
2216 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2217err_alloc_tcomplete_failed:
2218 kfree(t);
2219 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2220err_alloc_t_failed:
2221err_bad_call_stack:
2222err_empty_call_stack:
2223err_dead_binder:
2224err_invalid_target_handle:
2225err_no_context_mgr_node:
7a4408c6
TK
2226 if (target_thread)
2227 binder_thread_dec_tmpref(target_thread);
2228 if (target_proc)
2229 binder_proc_dec_tmpref(target_proc);
eb34983b
TK
2230 if (target_node)
2231 binder_dec_node(target_node, 1, 0);
2232
355b0502 2233 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
2234 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2235 proc->pid, thread->pid, return_error, return_error_param,
2236 (u64)tr->data_size, (u64)tr->offsets_size,
2237 return_error_line);
355b0502
GKH
2238
2239 {
2240 struct binder_transaction_log_entry *fe;
10f62861 2241
57ada2fb
TK
2242 e->return_error = return_error;
2243 e->return_error_param = return_error_param;
2244 e->return_error_line = return_error_line;
355b0502
GKH
2245 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2246 *fe = *e;
d99c7333
TK
2247 /*
2248 * write barrier to synchronize with initialization
2249 * of log entry
2250 */
2251 smp_wmb();
2252 WRITE_ONCE(e->debug_id_done, t_debug_id);
2253 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
2254 }
2255
26549d17 2256 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 2257 if (in_reply_to) {
26549d17
TK
2258 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2259 list_add_tail(&thread->return_error.work.entry,
2260 &thread->todo);
355b0502 2261 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
2262 } else {
2263 thread->return_error.cmd = return_error;
2264 list_add_tail(&thread->return_error.work.entry,
2265 &thread->todo);
2266 }
355b0502
GKH
2267}
2268
fb07ebc3
BP
2269static int binder_thread_write(struct binder_proc *proc,
2270 struct binder_thread *thread,
da49889d
AH
2271 binder_uintptr_t binder_buffer, size_t size,
2272 binder_size_t *consumed)
355b0502
GKH
2273{
2274 uint32_t cmd;
342e5c90 2275 struct binder_context *context = proc->context;
da49889d 2276 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2277 void __user *ptr = buffer + *consumed;
2278 void __user *end = buffer + size;
2279
26549d17 2280 while (ptr < end && thread->return_error.cmd == BR_OK) {
372e3147
TK
2281 int ret;
2282
355b0502
GKH
2283 if (get_user(cmd, (uint32_t __user *)ptr))
2284 return -EFAULT;
2285 ptr += sizeof(uint32_t);
975a1ac9 2286 trace_binder_command(cmd);
355b0502 2287 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
2288 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2289 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2290 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
2291 }
2292 switch (cmd) {
2293 case BC_INCREFS:
2294 case BC_ACQUIRE:
2295 case BC_RELEASE:
2296 case BC_DECREFS: {
2297 uint32_t target;
355b0502 2298 const char *debug_string;
372e3147
TK
2299 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2300 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2301 struct binder_ref_data rdata;
355b0502
GKH
2302
2303 if (get_user(target, (uint32_t __user *)ptr))
2304 return -EFAULT;
c44b1231 2305
355b0502 2306 ptr += sizeof(uint32_t);
372e3147
TK
2307 ret = -1;
2308 if (increment && !target) {
c44b1231 2309 struct binder_node *ctx_mgr_node;
c44b1231
TK
2310 mutex_lock(&context->context_mgr_node_lock);
2311 ctx_mgr_node = context->binder_context_mgr_node;
372e3147
TK
2312 if (ctx_mgr_node)
2313 ret = binder_inc_ref_for_node(
2314 proc, ctx_mgr_node,
2315 strong, NULL, &rdata);
c44b1231
TK
2316 mutex_unlock(&context->context_mgr_node_lock);
2317 }
372e3147
TK
2318 if (ret)
2319 ret = binder_update_ref_for_handle(
2320 proc, target, increment, strong,
2321 &rdata);
2322 if (!ret && rdata.desc != target) {
2323 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2324 proc->pid, thread->pid,
2325 target, rdata.desc);
355b0502
GKH
2326 }
2327 switch (cmd) {
2328 case BC_INCREFS:
2329 debug_string = "IncRefs";
355b0502
GKH
2330 break;
2331 case BC_ACQUIRE:
2332 debug_string = "Acquire";
355b0502
GKH
2333 break;
2334 case BC_RELEASE:
2335 debug_string = "Release";
355b0502
GKH
2336 break;
2337 case BC_DECREFS:
2338 default:
2339 debug_string = "DecRefs";
372e3147
TK
2340 break;
2341 }
2342 if (ret) {
2343 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2344 proc->pid, thread->pid, debug_string,
2345 strong, target, ret);
355b0502
GKH
2346 break;
2347 }
2348 binder_debug(BINDER_DEBUG_USER_REFS,
372e3147
TK
2349 "%d:%d %s ref %d desc %d s %d w %d\n",
2350 proc->pid, thread->pid, debug_string,
2351 rdata.debug_id, rdata.desc, rdata.strong,
2352 rdata.weak);
355b0502
GKH
2353 break;
2354 }
2355 case BC_INCREFS_DONE:
2356 case BC_ACQUIRE_DONE: {
da49889d
AH
2357 binder_uintptr_t node_ptr;
2358 binder_uintptr_t cookie;
355b0502
GKH
2359 struct binder_node *node;
2360
da49889d 2361 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2362 return -EFAULT;
da49889d
AH
2363 ptr += sizeof(binder_uintptr_t);
2364 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2365 return -EFAULT;
da49889d 2366 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2367 node = binder_get_node(proc, node_ptr);
2368 if (node == NULL) {
da49889d 2369 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
2370 proc->pid, thread->pid,
2371 cmd == BC_INCREFS_DONE ?
2372 "BC_INCREFS_DONE" :
2373 "BC_ACQUIRE_DONE",
da49889d 2374 (u64)node_ptr);
355b0502
GKH
2375 break;
2376 }
2377 if (cookie != node->cookie) {
da49889d 2378 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
2379 proc->pid, thread->pid,
2380 cmd == BC_INCREFS_DONE ?
2381 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
2382 (u64)node_ptr, node->debug_id,
2383 (u64)cookie, (u64)node->cookie);
355b0502
GKH
2384 break;
2385 }
2386 if (cmd == BC_ACQUIRE_DONE) {
2387 if (node->pending_strong_ref == 0) {
56b468fc 2388 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
2389 proc->pid, thread->pid,
2390 node->debug_id);
2391 break;
2392 }
2393 node->pending_strong_ref = 0;
2394 } else {
2395 if (node->pending_weak_ref == 0) {
56b468fc 2396 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
2397 proc->pid, thread->pid,
2398 node->debug_id);
2399 break;
2400 }
2401 node->pending_weak_ref = 0;
2402 }
2403 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2404 binder_debug(BINDER_DEBUG_USER_REFS,
56b468fc 2405 "%d:%d %s node %d ls %d lw %d\n",
355b0502
GKH
2406 proc->pid, thread->pid,
2407 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
2408 node->debug_id, node->local_strong_refs, node->local_weak_refs);
2409 break;
2410 }
2411 case BC_ATTEMPT_ACQUIRE:
56b468fc 2412 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
2413 return -EINVAL;
2414 case BC_ACQUIRE_RESULT:
56b468fc 2415 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
2416 return -EINVAL;
2417
2418 case BC_FREE_BUFFER: {
da49889d 2419 binder_uintptr_t data_ptr;
355b0502
GKH
2420 struct binder_buffer *buffer;
2421
da49889d 2422 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2423 return -EFAULT;
da49889d 2424 ptr += sizeof(binder_uintptr_t);
355b0502 2425
53d311cf
TK
2426 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2427 data_ptr);
355b0502 2428 if (buffer == NULL) {
da49889d
AH
2429 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2430 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2431 break;
2432 }
2433 if (!buffer->allow_user_free) {
da49889d
AH
2434 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2435 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2436 break;
2437 }
2438 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
2439 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2440 proc->pid, thread->pid, (u64)data_ptr,
2441 buffer->debug_id,
355b0502
GKH
2442 buffer->transaction ? "active" : "finished");
2443
2444 if (buffer->transaction) {
2445 buffer->transaction->buffer = NULL;
2446 buffer->transaction = NULL;
2447 }
2448 if (buffer->async_transaction && buffer->target_node) {
2449 BUG_ON(!buffer->target_node->has_async_transaction);
2450 if (list_empty(&buffer->target_node->async_todo))
2451 buffer->target_node->has_async_transaction = 0;
2452 else
2453 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2454 }
975a1ac9 2455 trace_binder_transaction_buffer_release(buffer);
355b0502 2456 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 2457 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
2458 break;
2459 }
2460
7980240b
MC
2461 case BC_TRANSACTION_SG:
2462 case BC_REPLY_SG: {
2463 struct binder_transaction_data_sg tr;
2464
2465 if (copy_from_user(&tr, ptr, sizeof(tr)))
2466 return -EFAULT;
2467 ptr += sizeof(tr);
2468 binder_transaction(proc, thread, &tr.transaction_data,
2469 cmd == BC_REPLY_SG, tr.buffers_size);
2470 break;
2471 }
355b0502
GKH
2472 case BC_TRANSACTION:
2473 case BC_REPLY: {
2474 struct binder_transaction_data tr;
2475
2476 if (copy_from_user(&tr, ptr, sizeof(tr)))
2477 return -EFAULT;
2478 ptr += sizeof(tr);
4bfac80a
MC
2479 binder_transaction(proc, thread, &tr,
2480 cmd == BC_REPLY, 0);
355b0502
GKH
2481 break;
2482 }
2483
2484 case BC_REGISTER_LOOPER:
2485 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2486 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
2487 proc->pid, thread->pid);
2488 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2489 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2490 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
2491 proc->pid, thread->pid);
2492 } else if (proc->requested_threads == 0) {
2493 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2494 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
2495 proc->pid, thread->pid);
2496 } else {
2497 proc->requested_threads--;
2498 proc->requested_threads_started++;
2499 }
2500 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2501 break;
2502 case BC_ENTER_LOOPER:
2503 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2504 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
2505 proc->pid, thread->pid);
2506 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2507 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2508 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
2509 proc->pid, thread->pid);
2510 }
2511 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2512 break;
2513 case BC_EXIT_LOOPER:
2514 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2515 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
2516 proc->pid, thread->pid);
2517 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2518 break;
2519
2520 case BC_REQUEST_DEATH_NOTIFICATION:
2521 case BC_CLEAR_DEATH_NOTIFICATION: {
2522 uint32_t target;
da49889d 2523 binder_uintptr_t cookie;
355b0502
GKH
2524 struct binder_ref *ref;
2525 struct binder_ref_death *death;
2526
2527 if (get_user(target, (uint32_t __user *)ptr))
2528 return -EFAULT;
2529 ptr += sizeof(uint32_t);
da49889d 2530 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2531 return -EFAULT;
da49889d 2532 ptr += sizeof(binder_uintptr_t);
0a3ffab9 2533 ref = binder_get_ref(proc, target, false);
355b0502 2534 if (ref == NULL) {
56b468fc 2535 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
2536 proc->pid, thread->pid,
2537 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2538 "BC_REQUEST_DEATH_NOTIFICATION" :
2539 "BC_CLEAR_DEATH_NOTIFICATION",
2540 target);
2541 break;
2542 }
2543
2544 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2545 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
2546 proc->pid, thread->pid,
2547 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2548 "BC_REQUEST_DEATH_NOTIFICATION" :
2549 "BC_CLEAR_DEATH_NOTIFICATION",
372e3147
TK
2550 (u64)cookie, ref->data.debug_id,
2551 ref->data.desc, ref->data.strong,
2552 ref->data.weak, ref->node->debug_id);
355b0502
GKH
2553
2554 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2555 if (ref->death) {
56b468fc 2556 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
2557 proc->pid, thread->pid);
2558 break;
2559 }
2560 death = kzalloc(sizeof(*death), GFP_KERNEL);
2561 if (death == NULL) {
26549d17
TK
2562 WARN_ON(thread->return_error.cmd !=
2563 BR_OK);
2564 thread->return_error.cmd = BR_ERROR;
2565 list_add_tail(
2566 &thread->return_error.work.entry,
2567 &thread->todo);
355b0502 2568 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 2569 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
2570 proc->pid, thread->pid);
2571 break;
2572 }
2573 binder_stats_created(BINDER_STAT_DEATH);
2574 INIT_LIST_HEAD(&death->work.entry);
2575 death->cookie = cookie;
2576 ref->death = death;
2577 if (ref->node->proc == NULL) {
2578 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2579 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2580 list_add_tail(&ref->death->work.entry, &thread->todo);
2581 } else {
2582 list_add_tail(&ref->death->work.entry, &proc->todo);
2583 wake_up_interruptible(&proc->wait);
2584 }
2585 }
2586 } else {
2587 if (ref->death == NULL) {
56b468fc 2588 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
2589 proc->pid, thread->pid);
2590 break;
2591 }
2592 death = ref->death;
2593 if (death->cookie != cookie) {
da49889d 2594 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 2595 proc->pid, thread->pid,
da49889d
AH
2596 (u64)death->cookie,
2597 (u64)cookie);
355b0502
GKH
2598 break;
2599 }
2600 ref->death = NULL;
2601 if (list_empty(&death->work.entry)) {
2602 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2603 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2604 list_add_tail(&death->work.entry, &thread->todo);
2605 } else {
2606 list_add_tail(&death->work.entry, &proc->todo);
2607 wake_up_interruptible(&proc->wait);
2608 }
2609 } else {
2610 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2611 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2612 }
2613 }
2614 } break;
2615 case BC_DEAD_BINDER_DONE: {
2616 struct binder_work *w;
da49889d 2617 binder_uintptr_t cookie;
355b0502 2618 struct binder_ref_death *death = NULL;
10f62861 2619
da49889d 2620 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
2621 return -EFAULT;
2622
7a64cd88 2623 ptr += sizeof(cookie);
355b0502
GKH
2624 list_for_each_entry(w, &proc->delivered_death, entry) {
2625 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
10f62861 2626
355b0502
GKH
2627 if (tmp_death->cookie == cookie) {
2628 death = tmp_death;
2629 break;
2630 }
2631 }
2632 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
2633 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2634 proc->pid, thread->pid, (u64)cookie,
2635 death);
355b0502 2636 if (death == NULL) {
da49889d
AH
2637 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2638 proc->pid, thread->pid, (u64)cookie);
355b0502
GKH
2639 break;
2640 }
2641
2642 list_del_init(&death->work.entry);
2643 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2644 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2645 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2646 list_add_tail(&death->work.entry, &thread->todo);
2647 } else {
2648 list_add_tail(&death->work.entry, &proc->todo);
2649 wake_up_interruptible(&proc->wait);
2650 }
2651 }
2652 } break;
2653
2654 default:
56b468fc 2655 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
2656 proc->pid, thread->pid, cmd);
2657 return -EINVAL;
2658 }
2659 *consumed = ptr - buffer;
2660 }
2661 return 0;
2662}
2663
fb07ebc3
BP
2664static void binder_stat_br(struct binder_proc *proc,
2665 struct binder_thread *thread, uint32_t cmd)
355b0502 2666{
975a1ac9 2667 trace_binder_return(cmd);
355b0502 2668 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
2669 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2670 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2671 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
2672 }
2673}
2674
2675static int binder_has_proc_work(struct binder_proc *proc,
2676 struct binder_thread *thread)
2677{
08dabcee 2678 return !list_empty(&proc->todo) || thread->looper_need_return;
355b0502
GKH
2679}
2680
2681static int binder_has_thread_work(struct binder_thread *thread)
2682{
26549d17 2683 return !list_empty(&thread->todo) || thread->looper_need_return;
355b0502
GKH
2684}
2685
26b47d8a
TK
2686static int binder_put_node_cmd(struct binder_proc *proc,
2687 struct binder_thread *thread,
2688 void __user **ptrp,
2689 binder_uintptr_t node_ptr,
2690 binder_uintptr_t node_cookie,
2691 int node_debug_id,
2692 uint32_t cmd, const char *cmd_name)
2693{
2694 void __user *ptr = *ptrp;
2695
2696 if (put_user(cmd, (uint32_t __user *)ptr))
2697 return -EFAULT;
2698 ptr += sizeof(uint32_t);
2699
2700 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
2701 return -EFAULT;
2702 ptr += sizeof(binder_uintptr_t);
2703
2704 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
2705 return -EFAULT;
2706 ptr += sizeof(binder_uintptr_t);
2707
2708 binder_stat_br(proc, thread, cmd);
2709 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
2710 proc->pid, thread->pid, cmd_name, node_debug_id,
2711 (u64)node_ptr, (u64)node_cookie);
2712
2713 *ptrp = ptr;
2714 return 0;
2715}
2716
355b0502
GKH
2717static int binder_thread_read(struct binder_proc *proc,
2718 struct binder_thread *thread,
da49889d
AH
2719 binder_uintptr_t binder_buffer, size_t size,
2720 binder_size_t *consumed, int non_block)
355b0502 2721{
da49889d 2722 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2723 void __user *ptr = buffer + *consumed;
2724 void __user *end = buffer + size;
2725
2726 int ret = 0;
2727 int wait_for_proc_work;
2728
2729 if (*consumed == 0) {
2730 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
2731 return -EFAULT;
2732 ptr += sizeof(uint32_t);
2733 }
2734
2735retry:
2736 wait_for_proc_work = thread->transaction_stack == NULL &&
2737 list_empty(&thread->todo);
2738
355b0502
GKH
2739 thread->looper |= BINDER_LOOPER_STATE_WAITING;
2740 if (wait_for_proc_work)
2741 proc->ready_threads++;
975a1ac9
AH
2742
2743 binder_unlock(__func__);
2744
2745 trace_binder_wait_for_work(wait_for_proc_work,
2746 !!thread->transaction_stack,
2747 !list_empty(&thread->todo));
355b0502
GKH
2748 if (wait_for_proc_work) {
2749 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
2750 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 2751 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
2752 proc->pid, thread->pid, thread->looper);
2753 wait_event_interruptible(binder_user_error_wait,
2754 binder_stop_on_user_error < 2);
2755 }
2756 binder_set_nice(proc->default_priority);
2757 if (non_block) {
2758 if (!binder_has_proc_work(proc, thread))
2759 ret = -EAGAIN;
2760 } else
e2610b26 2761 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
2762 } else {
2763 if (non_block) {
2764 if (!binder_has_thread_work(thread))
2765 ret = -EAGAIN;
2766 } else
e2610b26 2767 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 2768 }
975a1ac9
AH
2769
2770 binder_lock(__func__);
2771
355b0502
GKH
2772 if (wait_for_proc_work)
2773 proc->ready_threads--;
2774 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
2775
2776 if (ret)
2777 return ret;
2778
2779 while (1) {
2780 uint32_t cmd;
2781 struct binder_transaction_data tr;
2782 struct binder_work *w;
2783 struct binder_transaction *t = NULL;
7a4408c6 2784 struct binder_thread *t_from;
355b0502 2785
395262a9
DV
2786 if (!list_empty(&thread->todo)) {
2787 w = list_first_entry(&thread->todo, struct binder_work,
2788 entry);
2789 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
2790 w = list_first_entry(&proc->todo, struct binder_work,
2791 entry);
2792 } else {
2793 /* no data added */
08dabcee 2794 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
2795 goto retry;
2796 break;
2797 }
2798
2799 if (end - ptr < sizeof(tr) + 4)
2800 break;
2801
2802 switch (w->type) {
2803 case BINDER_WORK_TRANSACTION: {
2804 t = container_of(w, struct binder_transaction, work);
2805 } break;
26549d17
TK
2806 case BINDER_WORK_RETURN_ERROR: {
2807 struct binder_error *e = container_of(
2808 w, struct binder_error, work);
2809
2810 WARN_ON(e->cmd == BR_OK);
2811 if (put_user(e->cmd, (uint32_t __user *)ptr))
2812 return -EFAULT;
2813 e->cmd = BR_OK;
2814 ptr += sizeof(uint32_t);
2815
2816 binder_stat_br(proc, thread, cmd);
2817 list_del(&w->entry);
2818 } break;
355b0502
GKH
2819 case BINDER_WORK_TRANSACTION_COMPLETE: {
2820 cmd = BR_TRANSACTION_COMPLETE;
2821 if (put_user(cmd, (uint32_t __user *)ptr))
2822 return -EFAULT;
2823 ptr += sizeof(uint32_t);
2824
2825 binder_stat_br(proc, thread, cmd);
2826 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 2827 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502
GKH
2828 proc->pid, thread->pid);
2829
2830 list_del(&w->entry);
2831 kfree(w);
2832 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2833 } break;
2834 case BINDER_WORK_NODE: {
2835 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
2836 int strong, weak;
2837 binder_uintptr_t node_ptr = node->ptr;
2838 binder_uintptr_t node_cookie = node->cookie;
2839 int node_debug_id = node->debug_id;
2840 int has_weak_ref;
2841 int has_strong_ref;
2842 void __user *orig_ptr = ptr;
2843
2844 BUG_ON(proc != node->proc);
2845 strong = node->internal_strong_refs ||
2846 node->local_strong_refs;
2847 weak = !hlist_empty(&node->refs) ||
2848 node->local_weak_refs || strong;
2849 has_strong_ref = node->has_strong_ref;
2850 has_weak_ref = node->has_weak_ref;
2851
2852 if (weak && !has_weak_ref) {
355b0502
GKH
2853 node->has_weak_ref = 1;
2854 node->pending_weak_ref = 1;
2855 node->local_weak_refs++;
26b47d8a
TK
2856 }
2857 if (strong && !has_strong_ref) {
355b0502
GKH
2858 node->has_strong_ref = 1;
2859 node->pending_strong_ref = 1;
2860 node->local_strong_refs++;
26b47d8a
TK
2861 }
2862 if (!strong && has_strong_ref)
355b0502 2863 node->has_strong_ref = 0;
26b47d8a 2864 if (!weak && has_weak_ref)
355b0502 2865 node->has_weak_ref = 0;
26b47d8a
TK
2866 list_del(&w->entry);
2867
2868 if (!weak && !strong) {
2869 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2870 "%d:%d node %d u%016llx c%016llx deleted\n",
2871 proc->pid, thread->pid,
2872 node_debug_id,
2873 (u64)node_ptr,
2874 (u64)node_cookie);
2875 rb_erase(&node->rb_node, &proc->nodes);
2876 kfree(node);
2877 binder_stats_deleted(BINDER_STAT_NODE);
355b0502 2878 }
26b47d8a
TK
2879 if (weak && !has_weak_ref)
2880 ret = binder_put_node_cmd(
2881 proc, thread, &ptr, node_ptr,
2882 node_cookie, node_debug_id,
2883 BR_INCREFS, "BR_INCREFS");
2884 if (!ret && strong && !has_strong_ref)
2885 ret = binder_put_node_cmd(
2886 proc, thread, &ptr, node_ptr,
2887 node_cookie, node_debug_id,
2888 BR_ACQUIRE, "BR_ACQUIRE");
2889 if (!ret && !strong && has_strong_ref)
2890 ret = binder_put_node_cmd(
2891 proc, thread, &ptr, node_ptr,
2892 node_cookie, node_debug_id,
2893 BR_RELEASE, "BR_RELEASE");
2894 if (!ret && !weak && has_weak_ref)
2895 ret = binder_put_node_cmd(
2896 proc, thread, &ptr, node_ptr,
2897 node_cookie, node_debug_id,
2898 BR_DECREFS, "BR_DECREFS");
2899 if (orig_ptr == ptr)
2900 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
2901 "%d:%d node %d u%016llx c%016llx state unchanged\n",
2902 proc->pid, thread->pid,
2903 node_debug_id,
2904 (u64)node_ptr,
2905 (u64)node_cookie);
2906 if (ret)
2907 return ret;
355b0502
GKH
2908 } break;
2909 case BINDER_WORK_DEAD_BINDER:
2910 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
2911 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
2912 struct binder_ref_death *death;
2913 uint32_t cmd;
2914
2915 death = container_of(w, struct binder_ref_death, work);
2916 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
2917 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
2918 else
2919 cmd = BR_DEAD_BINDER;
2920 if (put_user(cmd, (uint32_t __user *)ptr))
2921 return -EFAULT;
2922 ptr += sizeof(uint32_t);
da49889d
AH
2923 if (put_user(death->cookie,
2924 (binder_uintptr_t __user *)ptr))
355b0502 2925 return -EFAULT;
da49889d 2926 ptr += sizeof(binder_uintptr_t);
89334ab4 2927 binder_stat_br(proc, thread, cmd);
355b0502 2928 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2929 "%d:%d %s %016llx\n",
355b0502
GKH
2930 proc->pid, thread->pid,
2931 cmd == BR_DEAD_BINDER ?
2932 "BR_DEAD_BINDER" :
2933 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 2934 (u64)death->cookie);
355b0502
GKH
2935
2936 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
2937 list_del(&w->entry);
2938 kfree(death);
2939 binder_stats_deleted(BINDER_STAT_DEATH);
2940 } else
2941 list_move(&w->entry, &proc->delivered_death);
2942 if (cmd == BR_DEAD_BINDER)
2943 goto done; /* DEAD_BINDER notifications can cause transactions */
2944 } break;
2945 }
2946
2947 if (!t)
2948 continue;
2949
2950 BUG_ON(t->buffer == NULL);
2951 if (t->buffer->target_node) {
2952 struct binder_node *target_node = t->buffer->target_node;
10f62861 2953
355b0502
GKH
2954 tr.target.ptr = target_node->ptr;
2955 tr.cookie = target_node->cookie;
2956 t->saved_priority = task_nice(current);
2957 if (t->priority < target_node->min_priority &&
2958 !(t->flags & TF_ONE_WAY))
2959 binder_set_nice(t->priority);
2960 else if (!(t->flags & TF_ONE_WAY) ||
2961 t->saved_priority > target_node->min_priority)
2962 binder_set_nice(target_node->min_priority);
2963 cmd = BR_TRANSACTION;
2964 } else {
da49889d
AH
2965 tr.target.ptr = 0;
2966 tr.cookie = 0;
355b0502
GKH
2967 cmd = BR_REPLY;
2968 }
2969 tr.code = t->code;
2970 tr.flags = t->flags;
4a2ebb93 2971 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 2972
7a4408c6
TK
2973 t_from = binder_get_txn_from(t);
2974 if (t_from) {
2975 struct task_struct *sender = t_from->proc->tsk;
10f62861 2976
355b0502 2977 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 2978 task_active_pid_ns(current));
355b0502
GKH
2979 } else {
2980 tr.sender_pid = 0;
2981 }
2982
2983 tr.data_size = t->buffer->data_size;
2984 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
2985 tr.data.ptr.buffer = (binder_uintptr_t)
2986 ((uintptr_t)t->buffer->data +
2987 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
2988 tr.data.ptr.offsets = tr.data.ptr.buffer +
2989 ALIGN(t->buffer->data_size,
2990 sizeof(void *));
2991
7a4408c6
TK
2992 if (put_user(cmd, (uint32_t __user *)ptr)) {
2993 if (t_from)
2994 binder_thread_dec_tmpref(t_from);
355b0502 2995 return -EFAULT;
7a4408c6 2996 }
355b0502 2997 ptr += sizeof(uint32_t);
7a4408c6
TK
2998 if (copy_to_user(ptr, &tr, sizeof(tr))) {
2999 if (t_from)
3000 binder_thread_dec_tmpref(t_from);
355b0502 3001 return -EFAULT;
7a4408c6 3002 }
355b0502
GKH
3003 ptr += sizeof(tr);
3004
975a1ac9 3005 trace_binder_transaction_received(t);
355b0502
GKH
3006 binder_stat_br(proc, thread, cmd);
3007 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 3008 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
3009 proc->pid, thread->pid,
3010 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3011 "BR_REPLY",
7a4408c6
TK
3012 t->debug_id, t_from ? t_from->proc->pid : 0,
3013 t_from ? t_from->pid : 0, cmd,
355b0502 3014 t->buffer->data_size, t->buffer->offsets_size,
da49889d 3015 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 3016
7a4408c6
TK
3017 if (t_from)
3018 binder_thread_dec_tmpref(t_from);
355b0502
GKH
3019 list_del(&t->work.entry);
3020 t->buffer->allow_user_free = 1;
3021 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3022 t->to_parent = thread->transaction_stack;
3023 t->to_thread = thread;
3024 thread->transaction_stack = t;
3025 } else {
b6d282ce 3026 binder_free_transaction(t);
355b0502
GKH
3027 }
3028 break;
3029 }
3030
3031done:
3032
3033 *consumed = ptr - buffer;
3034 if (proc->requested_threads + proc->ready_threads == 0 &&
3035 proc->requested_threads_started < proc->max_threads &&
3036 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3037 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3038 /*spawn a new thread if we leave this out */) {
3039 proc->requested_threads++;
3040 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3041 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
3042 proc->pid, thread->pid);
3043 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3044 return -EFAULT;
89334ab4 3045 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
3046 }
3047 return 0;
3048}
3049
3050static void binder_release_work(struct list_head *list)
3051{
3052 struct binder_work *w;
10f62861 3053
355b0502
GKH
3054 while (!list_empty(list)) {
3055 w = list_first_entry(list, struct binder_work, entry);
3056 list_del_init(&w->entry);
3057 switch (w->type) {
3058 case BINDER_WORK_TRANSACTION: {
3059 struct binder_transaction *t;
3060
3061 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
3062 if (t->buffer->target_node &&
3063 !(t->flags & TF_ONE_WAY)) {
355b0502 3064 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
3065 } else {
3066 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3067 "undelivered transaction %d\n",
675d66b0 3068 t->debug_id);
b6d282ce 3069 binder_free_transaction(t);
675d66b0 3070 }
355b0502 3071 } break;
26549d17
TK
3072 case BINDER_WORK_RETURN_ERROR: {
3073 struct binder_error *e = container_of(
3074 w, struct binder_error, work);
3075
3076 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3077 "undelivered TRANSACTION_ERROR: %u\n",
3078 e->cmd);
3079 } break;
355b0502 3080 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 3081 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3082 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
3083 kfree(w);
3084 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3085 } break;
675d66b0
AH
3086 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3087 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3088 struct binder_ref_death *death;
3089
3090 death = container_of(w, struct binder_ref_death, work);
3091 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
3092 "undelivered death notification, %016llx\n",
3093 (u64)death->cookie);
675d66b0
AH
3094 kfree(death);
3095 binder_stats_deleted(BINDER_STAT_DEATH);
3096 } break;
355b0502 3097 default:
56b468fc 3098 pr_err("unexpected work type, %d, not freed\n",
675d66b0 3099 w->type);
355b0502
GKH
3100 break;
3101 }
3102 }
3103
3104}
3105
3106static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3107{
3108 struct binder_thread *thread = NULL;
3109 struct rb_node *parent = NULL;
3110 struct rb_node **p = &proc->threads.rb_node;
3111
3112 while (*p) {
3113 parent = *p;
3114 thread = rb_entry(parent, struct binder_thread, rb_node);
3115
3116 if (current->pid < thread->pid)
3117 p = &(*p)->rb_left;
3118 else if (current->pid > thread->pid)
3119 p = &(*p)->rb_right;
3120 else
3121 break;
3122 }
3123 if (*p == NULL) {
3124 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3125 if (thread == NULL)
3126 return NULL;
3127 binder_stats_created(BINDER_STAT_THREAD);
3128 thread->proc = proc;
3129 thread->pid = current->pid;
7a4408c6 3130 atomic_set(&thread->tmp_ref, 0);
355b0502
GKH
3131 init_waitqueue_head(&thread->wait);
3132 INIT_LIST_HEAD(&thread->todo);
3133 rb_link_node(&thread->rb_node, parent, p);
3134 rb_insert_color(&thread->rb_node, &proc->threads);
08dabcee 3135 thread->looper_need_return = true;
26549d17
TK
3136 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3137 thread->return_error.cmd = BR_OK;
3138 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3139 thread->reply_error.cmd = BR_OK;
355b0502
GKH
3140 }
3141 return thread;
3142}
3143
7a4408c6
TK
3144static void binder_free_proc(struct binder_proc *proc)
3145{
3146 BUG_ON(!list_empty(&proc->todo));
3147 BUG_ON(!list_empty(&proc->delivered_death));
3148 binder_alloc_deferred_release(&proc->alloc);
3149 put_task_struct(proc->tsk);
3150 binder_stats_deleted(BINDER_STAT_PROC);
3151 kfree(proc);
3152}
3153
3154static void binder_free_thread(struct binder_thread *thread)
3155{
3156 BUG_ON(!list_empty(&thread->todo));
3157 binder_stats_deleted(BINDER_STAT_THREAD);
3158 binder_proc_dec_tmpref(thread->proc);
3159 kfree(thread);
3160}
3161
3162static int binder_thread_release(struct binder_proc *proc,
3163 struct binder_thread *thread)
355b0502
GKH
3164{
3165 struct binder_transaction *t;
3166 struct binder_transaction *send_reply = NULL;
3167 int active_transactions = 0;
7a4408c6 3168 struct binder_transaction *last_t = NULL;
355b0502 3169
7a4408c6
TK
3170 /*
3171 * take a ref on the proc so it survives
3172 * after we remove this thread from proc->threads.
3173 * The corresponding dec is when we actually
3174 * free the thread in binder_free_thread()
3175 */
3176 proc->tmp_ref++;
3177 /*
3178 * take a ref on this thread to ensure it
3179 * survives while we are releasing it
3180 */
3181 atomic_inc(&thread->tmp_ref);
355b0502
GKH
3182 rb_erase(&thread->rb_node, &proc->threads);
3183 t = thread->transaction_stack;
7a4408c6
TK
3184 if (t) {
3185 spin_lock(&t->lock);
3186 if (t->to_thread == thread)
3187 send_reply = t;
3188 }
3189 thread->is_dead = true;
3190
355b0502 3191 while (t) {
7a4408c6 3192 last_t = t;
355b0502
GKH
3193 active_transactions++;
3194 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
3195 "release %d:%d transaction %d %s, still active\n",
3196 proc->pid, thread->pid,
355b0502
GKH
3197 t->debug_id,
3198 (t->to_thread == thread) ? "in" : "out");
3199
3200 if (t->to_thread == thread) {
3201 t->to_proc = NULL;
3202 t->to_thread = NULL;
3203 if (t->buffer) {
3204 t->buffer->transaction = NULL;
3205 t->buffer = NULL;
3206 }
3207 t = t->to_parent;
3208 } else if (t->from == thread) {
3209 t->from = NULL;
3210 t = t->from_parent;
3211 } else
3212 BUG();
7a4408c6
TK
3213 spin_unlock(&last_t->lock);
3214 if (t)
3215 spin_lock(&t->lock);
355b0502 3216 }
7a4408c6 3217
355b0502
GKH
3218 if (send_reply)
3219 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3220 binder_release_work(&thread->todo);
7a4408c6 3221 binder_thread_dec_tmpref(thread);
355b0502
GKH
3222 return active_transactions;
3223}
3224
3225static unsigned int binder_poll(struct file *filp,
3226 struct poll_table_struct *wait)
3227{
3228 struct binder_proc *proc = filp->private_data;
3229 struct binder_thread *thread = NULL;
3230 int wait_for_proc_work;
3231
975a1ac9
AH
3232 binder_lock(__func__);
3233
355b0502
GKH
3234 thread = binder_get_thread(proc);
3235
3236 wait_for_proc_work = thread->transaction_stack == NULL &&
26549d17 3237 list_empty(&thread->todo);
975a1ac9
AH
3238
3239 binder_unlock(__func__);
355b0502
GKH
3240
3241 if (wait_for_proc_work) {
3242 if (binder_has_proc_work(proc, thread))
3243 return POLLIN;
3244 poll_wait(filp, &proc->wait, wait);
3245 if (binder_has_proc_work(proc, thread))
3246 return POLLIN;
3247 } else {
3248 if (binder_has_thread_work(thread))
3249 return POLLIN;
3250 poll_wait(filp, &thread->wait, wait);
3251 if (binder_has_thread_work(thread))
3252 return POLLIN;
3253 }
3254 return 0;
3255}
3256
78260ac6
TR
3257static int binder_ioctl_write_read(struct file *filp,
3258 unsigned int cmd, unsigned long arg,
3259 struct binder_thread *thread)
3260{
3261 int ret = 0;
3262 struct binder_proc *proc = filp->private_data;
3263 unsigned int size = _IOC_SIZE(cmd);
3264 void __user *ubuf = (void __user *)arg;
3265 struct binder_write_read bwr;
3266
3267 if (size != sizeof(struct binder_write_read)) {
3268 ret = -EINVAL;
3269 goto out;
3270 }
3271 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3272 ret = -EFAULT;
3273 goto out;
3274 }
3275 binder_debug(BINDER_DEBUG_READ_WRITE,
3276 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3277 proc->pid, thread->pid,
3278 (u64)bwr.write_size, (u64)bwr.write_buffer,
3279 (u64)bwr.read_size, (u64)bwr.read_buffer);
3280
3281 if (bwr.write_size > 0) {
3282 ret = binder_thread_write(proc, thread,
3283 bwr.write_buffer,
3284 bwr.write_size,
3285 &bwr.write_consumed);
3286 trace_binder_write_done(ret);
3287 if (ret < 0) {
3288 bwr.read_consumed = 0;
3289 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3290 ret = -EFAULT;
3291 goto out;
3292 }
3293 }
3294 if (bwr.read_size > 0) {
3295 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3296 bwr.read_size,
3297 &bwr.read_consumed,
3298 filp->f_flags & O_NONBLOCK);
3299 trace_binder_read_done(ret);
3300 if (!list_empty(&proc->todo))
3301 wake_up_interruptible(&proc->wait);
3302 if (ret < 0) {
3303 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3304 ret = -EFAULT;
3305 goto out;
3306 }
3307 }
3308 binder_debug(BINDER_DEBUG_READ_WRITE,
3309 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3310 proc->pid, thread->pid,
3311 (u64)bwr.write_consumed, (u64)bwr.write_size,
3312 (u64)bwr.read_consumed, (u64)bwr.read_size);
3313 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3314 ret = -EFAULT;
3315 goto out;
3316 }
3317out:
3318 return ret;
3319}
3320
3321static int binder_ioctl_set_ctx_mgr(struct file *filp)
3322{
3323 int ret = 0;
3324 struct binder_proc *proc = filp->private_data;
342e5c90 3325 struct binder_context *context = proc->context;
c44b1231 3326 struct binder_node *new_node;
78260ac6
TR
3327 kuid_t curr_euid = current_euid();
3328
c44b1231 3329 mutex_lock(&context->context_mgr_node_lock);
342e5c90 3330 if (context->binder_context_mgr_node) {
78260ac6
TR
3331 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3332 ret = -EBUSY;
3333 goto out;
3334 }
79af7307
SS
3335 ret = security_binder_set_context_mgr(proc->tsk);
3336 if (ret < 0)
3337 goto out;
342e5c90
MC
3338 if (uid_valid(context->binder_context_mgr_uid)) {
3339 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
3340 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3341 from_kuid(&init_user_ns, curr_euid),
3342 from_kuid(&init_user_ns,
342e5c90 3343 context->binder_context_mgr_uid));
78260ac6
TR
3344 ret = -EPERM;
3345 goto out;
3346 }
3347 } else {
342e5c90 3348 context->binder_context_mgr_uid = curr_euid;
78260ac6 3349 }
c44b1231
TK
3350 new_node = binder_new_node(proc, 0, 0);
3351 if (!new_node) {
78260ac6
TR
3352 ret = -ENOMEM;
3353 goto out;
3354 }
c44b1231
TK
3355 new_node->local_weak_refs++;
3356 new_node->local_strong_refs++;
3357 new_node->has_strong_ref = 1;
3358 new_node->has_weak_ref = 1;
3359 context->binder_context_mgr_node = new_node;
78260ac6 3360out:
c44b1231 3361 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
3362 return ret;
3363}
3364
355b0502
GKH
3365static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3366{
3367 int ret;
3368 struct binder_proc *proc = filp->private_data;
3369 struct binder_thread *thread;
3370 unsigned int size = _IOC_SIZE(cmd);
3371 void __user *ubuf = (void __user *)arg;
3372
78260ac6
TR
3373 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3374 proc->pid, current->pid, cmd, arg);*/
355b0502 3375
975a1ac9
AH
3376 trace_binder_ioctl(cmd, arg);
3377
355b0502
GKH
3378 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3379 if (ret)
975a1ac9 3380 goto err_unlocked;
355b0502 3381
975a1ac9 3382 binder_lock(__func__);
355b0502
GKH
3383 thread = binder_get_thread(proc);
3384 if (thread == NULL) {
3385 ret = -ENOMEM;
3386 goto err;
3387 }
3388
3389 switch (cmd) {
78260ac6
TR
3390 case BINDER_WRITE_READ:
3391 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3392 if (ret)
355b0502 3393 goto err;
355b0502 3394 break;
355b0502
GKH
3395 case BINDER_SET_MAX_THREADS:
3396 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3397 ret = -EINVAL;
3398 goto err;
3399 }
3400 break;
3401 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
3402 ret = binder_ioctl_set_ctx_mgr(filp);
3403 if (ret)
355b0502 3404 goto err;
355b0502
GKH
3405 break;
3406 case BINDER_THREAD_EXIT:
56b468fc 3407 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 3408 proc->pid, thread->pid);
7a4408c6 3409 binder_thread_release(proc, thread);
355b0502
GKH
3410 thread = NULL;
3411 break;
36c89c0a
MM
3412 case BINDER_VERSION: {
3413 struct binder_version __user *ver = ubuf;
3414
355b0502
GKH
3415 if (size != sizeof(struct binder_version)) {
3416 ret = -EINVAL;
3417 goto err;
3418 }
36c89c0a
MM
3419 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3420 &ver->protocol_version)) {
355b0502
GKH
3421 ret = -EINVAL;
3422 goto err;
3423 }
3424 break;
36c89c0a 3425 }
355b0502
GKH
3426 default:
3427 ret = -EINVAL;
3428 goto err;
3429 }
3430 ret = 0;
3431err:
3432 if (thread)
08dabcee 3433 thread->looper_need_return = false;
975a1ac9 3434 binder_unlock(__func__);
355b0502
GKH
3435 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3436 if (ret && ret != -ERESTARTSYS)
56b468fc 3437 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
3438err_unlocked:
3439 trace_binder_ioctl_done(ret);
355b0502
GKH
3440 return ret;
3441}
3442
3443static void binder_vma_open(struct vm_area_struct *vma)
3444{
3445 struct binder_proc *proc = vma->vm_private_data;
10f62861 3446
355b0502 3447 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3448 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3449 proc->pid, vma->vm_start, vma->vm_end,
3450 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3451 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
3452}
3453
3454static void binder_vma_close(struct vm_area_struct *vma)
3455{
3456 struct binder_proc *proc = vma->vm_private_data;
10f62861 3457
355b0502 3458 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3459 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3460 proc->pid, vma->vm_start, vma->vm_end,
3461 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3462 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 3463 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
3464 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3465}
3466
11bac800 3467static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
3468{
3469 return VM_FAULT_SIGBUS;
3470}
3471
7cbea8dc 3472static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
3473 .open = binder_vma_open,
3474 .close = binder_vma_close,
ddac7d5f 3475 .fault = binder_vm_fault,
355b0502
GKH
3476};
3477
19c98724
TK
3478static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3479{
3480 int ret;
3481 struct binder_proc *proc = filp->private_data;
3482 const char *failure_string;
3483
3484 if (proc->tsk != current->group_leader)
3485 return -EINVAL;
3486
3487 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3488 vma->vm_end = vma->vm_start + SZ_4M;
3489
3490 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3491 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3492 __func__, proc->pid, vma->vm_start, vma->vm_end,
3493 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3494 (unsigned long)pgprot_val(vma->vm_page_prot));
3495
3496 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3497 ret = -EPERM;
3498 failure_string = "bad vm_flags";
3499 goto err_bad_arg;
3500 }
3501 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3502 vma->vm_ops = &binder_vm_ops;
3503 vma->vm_private_data = proc;
3504
3505 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3506 if (ret)
3507 return ret;
3508 proc->files = get_files_struct(current);
3509 return 0;
3510
355b0502 3511err_bad_arg:
258767fe 3512 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
3513 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3514 return ret;
3515}
3516
3517static int binder_open(struct inode *nodp, struct file *filp)
3518{
3519 struct binder_proc *proc;
ac4812c5 3520 struct binder_device *binder_dev;
355b0502
GKH
3521
3522 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3523 current->group_leader->pid, current->pid);
3524
3525 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3526 if (proc == NULL)
3527 return -ENOMEM;
c4ea41ba
TK
3528 get_task_struct(current->group_leader);
3529 proc->tsk = current->group_leader;
355b0502
GKH
3530 INIT_LIST_HEAD(&proc->todo);
3531 init_waitqueue_head(&proc->wait);
3532 proc->default_priority = task_nice(current);
ac4812c5
MC
3533 binder_dev = container_of(filp->private_data, struct binder_device,
3534 miscdev);
3535 proc->context = &binder_dev->context;
19c98724 3536 binder_alloc_init(&proc->alloc);
975a1ac9
AH
3537
3538 binder_lock(__func__);
3539
355b0502 3540 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
3541 proc->pid = current->group_leader->pid;
3542 INIT_LIST_HEAD(&proc->delivered_death);
3543 filp->private_data = proc;
975a1ac9
AH
3544
3545 binder_unlock(__func__);
355b0502 3546
c44b1231
TK
3547 mutex_lock(&binder_procs_lock);
3548 hlist_add_head(&proc->proc_node, &binder_procs);
3549 mutex_unlock(&binder_procs_lock);
3550
16b66554 3551 if (binder_debugfs_dir_entry_proc) {
355b0502 3552 char strbuf[11];
10f62861 3553
355b0502 3554 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
3555 /*
3556 * proc debug entries are shared between contexts, so
3557 * this will fail if the process tries to open the driver
3558 * again with a different context. The priting code will
3559 * anyway print all contexts that a given PID has, so this
3560 * is not a problem.
3561 */
16b66554 3562 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
3563 binder_debugfs_dir_entry_proc,
3564 (void *)(unsigned long)proc->pid,
3565 &binder_proc_fops);
355b0502
GKH
3566 }
3567
3568 return 0;
3569}
3570
3571static int binder_flush(struct file *filp, fl_owner_t id)
3572{
3573 struct binder_proc *proc = filp->private_data;
3574
3575 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3576
3577 return 0;
3578}
3579
3580static void binder_deferred_flush(struct binder_proc *proc)
3581{
3582 struct rb_node *n;
3583 int wake_count = 0;
10f62861 3584
355b0502
GKH
3585 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3586 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 3587
08dabcee 3588 thread->looper_need_return = true;
355b0502
GKH
3589 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3590 wake_up_interruptible(&thread->wait);
3591 wake_count++;
3592 }
3593 }
3594 wake_up_interruptible_all(&proc->wait);
3595
3596 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3597 "binder_flush: %d woke %d threads\n", proc->pid,
3598 wake_count);
3599}
3600
3601static int binder_release(struct inode *nodp, struct file *filp)
3602{
3603 struct binder_proc *proc = filp->private_data;
10f62861 3604
16b66554 3605 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
3606 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3607
3608 return 0;
3609}
3610
008fa749
ME
3611static int binder_node_release(struct binder_node *node, int refs)
3612{
3613 struct binder_ref *ref;
3614 int death = 0;
3615
3616 list_del_init(&node->work.entry);
3617 binder_release_work(&node->async_todo);
3618
3619 if (hlist_empty(&node->refs)) {
3620 kfree(node);
3621 binder_stats_deleted(BINDER_STAT_NODE);
3622
3623 return refs;
3624 }
3625
3626 node->proc = NULL;
3627 node->local_strong_refs = 0;
3628 node->local_weak_refs = 0;
c44b1231
TK
3629
3630 spin_lock(&binder_dead_nodes_lock);
008fa749 3631 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 3632 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
3633
3634 hlist_for_each_entry(ref, &node->refs, node_entry) {
3635 refs++;
3636
3637 if (!ref->death)
e194fd8a 3638 continue;
008fa749
ME
3639
3640 death++;
3641
3642 if (list_empty(&ref->death->work.entry)) {
3643 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3644 list_add_tail(&ref->death->work.entry,
3645 &ref->proc->todo);
3646 wake_up_interruptible(&ref->proc->wait);
3647 } else
3648 BUG();
3649 }
3650
008fa749
ME
3651 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3652 "node %d now dead, refs %d, death %d\n",
3653 node->debug_id, refs, death);
3654
3655 return refs;
3656}
3657
355b0502
GKH
3658static void binder_deferred_release(struct binder_proc *proc)
3659{
342e5c90 3660 struct binder_context *context = proc->context;
355b0502 3661 struct rb_node *n;
19c98724 3662 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 3663
355b0502
GKH
3664 BUG_ON(proc->files);
3665
c44b1231 3666 mutex_lock(&binder_procs_lock);
355b0502 3667 hlist_del(&proc->proc_node);
c44b1231 3668 mutex_unlock(&binder_procs_lock);
53413e7d 3669
c44b1231 3670 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
3671 if (context->binder_context_mgr_node &&
3672 context->binder_context_mgr_node->proc == proc) {
355b0502 3673 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
3674 "%s: %d context_mgr_node gone\n",
3675 __func__, proc->pid);
342e5c90 3676 context->binder_context_mgr_node = NULL;
355b0502 3677 }
c44b1231 3678 mutex_unlock(&context->context_mgr_node_lock);
7a4408c6
TK
3679 /*
3680 * Make sure proc stays alive after we
3681 * remove all the threads
3682 */
3683 proc->tmp_ref++;
355b0502 3684
7a4408c6 3685 proc->is_dead = true;
355b0502
GKH
3686 threads = 0;
3687 active_transactions = 0;
3688 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
3689 struct binder_thread *thread;
3690
3691 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502 3692 threads++;
7a4408c6 3693 active_transactions += binder_thread_release(proc, thread);
355b0502 3694 }
53413e7d 3695
355b0502
GKH
3696 nodes = 0;
3697 incoming_refs = 0;
3698 while ((n = rb_first(&proc->nodes))) {
53413e7d 3699 struct binder_node *node;
355b0502 3700
53413e7d 3701 node = rb_entry(n, struct binder_node, rb_node);
355b0502
GKH
3702 nodes++;
3703 rb_erase(&node->rb_node, &proc->nodes);
008fa749 3704 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 3705 }
53413e7d 3706
355b0502
GKH
3707 outgoing_refs = 0;
3708 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
3709 struct binder_ref *ref;
3710
3711 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 3712 outgoing_refs++;
372e3147
TK
3713 binder_cleanup_ref(ref);
3714 binder_free_ref(ref);
355b0502 3715 }
53413e7d 3716
355b0502 3717 binder_release_work(&proc->todo);
675d66b0 3718 binder_release_work(&proc->delivered_death);
355b0502 3719
355b0502 3720 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 3721 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 3722 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 3723 outgoing_refs, active_transactions);
355b0502 3724
7a4408c6 3725 binder_proc_dec_tmpref(proc);
355b0502
GKH
3726}
3727
3728static void binder_deferred_func(struct work_struct *work)
3729{
3730 struct binder_proc *proc;
3731 struct files_struct *files;
3732
3733 int defer;
10f62861 3734
355b0502 3735 do {
975a1ac9 3736 binder_lock(__func__);
355b0502
GKH
3737 mutex_lock(&binder_deferred_lock);
3738 if (!hlist_empty(&binder_deferred_list)) {
3739 proc = hlist_entry(binder_deferred_list.first,
3740 struct binder_proc, deferred_work_node);
3741 hlist_del_init(&proc->deferred_work_node);
3742 defer = proc->deferred_work;
3743 proc->deferred_work = 0;
3744 } else {
3745 proc = NULL;
3746 defer = 0;
3747 }
3748 mutex_unlock(&binder_deferred_lock);
3749
3750 files = NULL;
3751 if (defer & BINDER_DEFERRED_PUT_FILES) {
3752 files = proc->files;
3753 if (files)
3754 proc->files = NULL;
3755 }
3756
3757 if (defer & BINDER_DEFERRED_FLUSH)
3758 binder_deferred_flush(proc);
3759
3760 if (defer & BINDER_DEFERRED_RELEASE)
3761 binder_deferred_release(proc); /* frees proc */
3762
975a1ac9 3763 binder_unlock(__func__);
355b0502
GKH
3764 if (files)
3765 put_files_struct(files);
3766 } while (proc);
3767}
3768static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
3769
3770static void
3771binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
3772{
3773 mutex_lock(&binder_deferred_lock);
3774 proc->deferred_work |= defer;
3775 if (hlist_unhashed(&proc->deferred_work_node)) {
3776 hlist_add_head(&proc->deferred_work_node,
3777 &binder_deferred_list);
1beba52d 3778 schedule_work(&binder_deferred_work);
355b0502
GKH
3779 }
3780 mutex_unlock(&binder_deferred_lock);
3781}
3782
5249f488
AH
3783static void print_binder_transaction(struct seq_file *m, const char *prefix,
3784 struct binder_transaction *t)
3785{
7a4408c6 3786 spin_lock(&t->lock);
5249f488
AH
3787 seq_printf(m,
3788 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
3789 prefix, t->debug_id, t,
3790 t->from ? t->from->proc->pid : 0,
3791 t->from ? t->from->pid : 0,
3792 t->to_proc ? t->to_proc->pid : 0,
3793 t->to_thread ? t->to_thread->pid : 0,
3794 t->code, t->flags, t->priority, t->need_reply);
7a4408c6
TK
3795 spin_unlock(&t->lock);
3796
355b0502 3797 if (t->buffer == NULL) {
5249f488
AH
3798 seq_puts(m, " buffer free\n");
3799 return;
355b0502 3800 }
5249f488
AH
3801 if (t->buffer->target_node)
3802 seq_printf(m, " node %d",
3803 t->buffer->target_node->debug_id);
3804 seq_printf(m, " size %zd:%zd data %p\n",
3805 t->buffer->data_size, t->buffer->offsets_size,
3806 t->buffer->data);
355b0502
GKH
3807}
3808
5249f488
AH
3809static void print_binder_work(struct seq_file *m, const char *prefix,
3810 const char *transaction_prefix,
3811 struct binder_work *w)
355b0502
GKH
3812{
3813 struct binder_node *node;
3814 struct binder_transaction *t;
3815
3816 switch (w->type) {
3817 case BINDER_WORK_TRANSACTION:
3818 t = container_of(w, struct binder_transaction, work);
5249f488 3819 print_binder_transaction(m, transaction_prefix, t);
355b0502 3820 break;
26549d17
TK
3821 case BINDER_WORK_RETURN_ERROR: {
3822 struct binder_error *e = container_of(
3823 w, struct binder_error, work);
3824
3825 seq_printf(m, "%stransaction error: %u\n",
3826 prefix, e->cmd);
3827 } break;
355b0502 3828 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 3829 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
3830 break;
3831 case BINDER_WORK_NODE:
3832 node = container_of(w, struct binder_node, work);
da49889d
AH
3833 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
3834 prefix, node->debug_id,
3835 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
3836 break;
3837 case BINDER_WORK_DEAD_BINDER:
5249f488 3838 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
3839 break;
3840 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 3841 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
3842 break;
3843 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 3844 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
3845 break;
3846 default:
5249f488 3847 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
3848 break;
3849 }
355b0502
GKH
3850}
3851
5249f488
AH
3852static void print_binder_thread(struct seq_file *m,
3853 struct binder_thread *thread,
3854 int print_always)
355b0502
GKH
3855{
3856 struct binder_transaction *t;
3857 struct binder_work *w;
5249f488
AH
3858 size_t start_pos = m->count;
3859 size_t header_pos;
355b0502 3860
7a4408c6 3861 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
08dabcee 3862 thread->pid, thread->looper,
7a4408c6
TK
3863 thread->looper_need_return,
3864 atomic_read(&thread->tmp_ref));
5249f488 3865 header_pos = m->count;
355b0502
GKH
3866 t = thread->transaction_stack;
3867 while (t) {
355b0502 3868 if (t->from == thread) {
5249f488
AH
3869 print_binder_transaction(m,
3870 " outgoing transaction", t);
355b0502
GKH
3871 t = t->from_parent;
3872 } else if (t->to_thread == thread) {
5249f488
AH
3873 print_binder_transaction(m,
3874 " incoming transaction", t);
355b0502
GKH
3875 t = t->to_parent;
3876 } else {
5249f488 3877 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
3878 t = NULL;
3879 }
3880 }
3881 list_for_each_entry(w, &thread->todo, entry) {
5249f488 3882 print_binder_work(m, " ", " pending transaction", w);
355b0502 3883 }
5249f488
AH
3884 if (!print_always && m->count == header_pos)
3885 m->count = start_pos;
355b0502
GKH
3886}
3887
5249f488 3888static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
3889{
3890 struct binder_ref *ref;
355b0502
GKH
3891 struct binder_work *w;
3892 int count;
3893
3894 count = 0;
b67bfe0d 3895 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
3896 count++;
3897
da49889d
AH
3898 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
3899 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
3900 node->has_strong_ref, node->has_weak_ref,
3901 node->local_strong_refs, node->local_weak_refs,
3902 node->internal_strong_refs, count);
355b0502 3903 if (count) {
5249f488 3904 seq_puts(m, " proc");
b67bfe0d 3905 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 3906 seq_printf(m, " %d", ref->proc->pid);
355b0502 3907 }
5249f488
AH
3908 seq_puts(m, "\n");
3909 list_for_each_entry(w, &node->async_todo, entry)
3910 print_binder_work(m, " ",
3911 " pending async transaction", w);
355b0502
GKH
3912}
3913
5249f488 3914static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 3915{
372e3147
TK
3916 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
3917 ref->data.debug_id, ref->data.desc,
3918 ref->node->proc ? "" : "dead ",
3919 ref->node->debug_id, ref->data.strong,
3920 ref->data.weak, ref->death);
355b0502
GKH
3921}
3922
5249f488
AH
3923static void print_binder_proc(struct seq_file *m,
3924 struct binder_proc *proc, int print_all)
355b0502
GKH
3925{
3926 struct binder_work *w;
3927 struct rb_node *n;
5249f488
AH
3928 size_t start_pos = m->count;
3929 size_t header_pos;
3930
3931 seq_printf(m, "proc %d\n", proc->pid);
14db3181 3932 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
3933 header_pos = m->count;
3934
3935 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
3936 print_binder_thread(m, rb_entry(n, struct binder_thread,
3937 rb_node), print_all);
3938 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
3939 struct binder_node *node = rb_entry(n, struct binder_node,
3940 rb_node);
3941 if (print_all || node->has_async_transaction)
5249f488 3942 print_binder_node(m, node);
355b0502
GKH
3943 }
3944 if (print_all) {
3945 for (n = rb_first(&proc->refs_by_desc);
5249f488 3946 n != NULL;
355b0502 3947 n = rb_next(n))
5249f488
AH
3948 print_binder_ref(m, rb_entry(n, struct binder_ref,
3949 rb_node_desc));
355b0502 3950 }
19c98724 3951 binder_alloc_print_allocated(m, &proc->alloc);
5249f488
AH
3952 list_for_each_entry(w, &proc->todo, entry)
3953 print_binder_work(m, " ", " pending transaction", w);
355b0502 3954 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 3955 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
3956 break;
3957 }
5249f488
AH
3958 if (!print_all && m->count == header_pos)
3959 m->count = start_pos;
355b0502
GKH
3960}
3961
167bccbd 3962static const char * const binder_return_strings[] = {
355b0502
GKH
3963 "BR_ERROR",
3964 "BR_OK",
3965 "BR_TRANSACTION",
3966 "BR_REPLY",
3967 "BR_ACQUIRE_RESULT",
3968 "BR_DEAD_REPLY",
3969 "BR_TRANSACTION_COMPLETE",
3970 "BR_INCREFS",
3971 "BR_ACQUIRE",
3972 "BR_RELEASE",
3973 "BR_DECREFS",
3974 "BR_ATTEMPT_ACQUIRE",
3975 "BR_NOOP",
3976 "BR_SPAWN_LOOPER",
3977 "BR_FINISHED",
3978 "BR_DEAD_BINDER",
3979 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3980 "BR_FAILED_REPLY"
3981};
3982
167bccbd 3983static const char * const binder_command_strings[] = {
355b0502
GKH
3984 "BC_TRANSACTION",
3985 "BC_REPLY",
3986 "BC_ACQUIRE_RESULT",
3987 "BC_FREE_BUFFER",
3988 "BC_INCREFS",
3989 "BC_ACQUIRE",
3990 "BC_RELEASE",
3991 "BC_DECREFS",
3992 "BC_INCREFS_DONE",
3993 "BC_ACQUIRE_DONE",
3994 "BC_ATTEMPT_ACQUIRE",
3995 "BC_REGISTER_LOOPER",
3996 "BC_ENTER_LOOPER",
3997 "BC_EXIT_LOOPER",
3998 "BC_REQUEST_DEATH_NOTIFICATION",
3999 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
4000 "BC_DEAD_BINDER_DONE",
4001 "BC_TRANSACTION_SG",
4002 "BC_REPLY_SG",
355b0502
GKH
4003};
4004
167bccbd 4005static const char * const binder_objstat_strings[] = {
355b0502
GKH
4006 "proc",
4007 "thread",
4008 "node",
4009 "ref",
4010 "death",
4011 "transaction",
4012 "transaction_complete"
4013};
4014
5249f488
AH
4015static void print_binder_stats(struct seq_file *m, const char *prefix,
4016 struct binder_stats *stats)
355b0502
GKH
4017{
4018 int i;
4019
4020 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 4021 ARRAY_SIZE(binder_command_strings));
355b0502 4022 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
4023 int temp = atomic_read(&stats->bc[i]);
4024
4025 if (temp)
5249f488 4026 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4027 binder_command_strings[i], temp);
355b0502
GKH
4028 }
4029
4030 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 4031 ARRAY_SIZE(binder_return_strings));
355b0502 4032 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
4033 int temp = atomic_read(&stats->br[i]);
4034
4035 if (temp)
5249f488 4036 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4037 binder_return_strings[i], temp);
355b0502
GKH
4038 }
4039
4040 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4041 ARRAY_SIZE(binder_objstat_strings));
355b0502 4042 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4043 ARRAY_SIZE(stats->obj_deleted));
355b0502 4044 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
4045 int created = atomic_read(&stats->obj_created[i]);
4046 int deleted = atomic_read(&stats->obj_deleted[i]);
4047
4048 if (created || deleted)
4049 seq_printf(m, "%s%s: active %d total %d\n",
4050 prefix,
5249f488 4051 binder_objstat_strings[i],
0953c797
BJS
4052 created - deleted,
4053 created);
355b0502 4054 }
355b0502
GKH
4055}
4056
5249f488
AH
4057static void print_binder_proc_stats(struct seq_file *m,
4058 struct binder_proc *proc)
355b0502
GKH
4059{
4060 struct binder_work *w;
4061 struct rb_node *n;
4062 int count, strong, weak;
4063
5249f488 4064 seq_printf(m, "proc %d\n", proc->pid);
14db3181 4065 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
4066 count = 0;
4067 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4068 count++;
5249f488
AH
4069 seq_printf(m, " threads: %d\n", count);
4070 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
4071 " ready threads %d\n"
4072 " free async space %zd\n", proc->requested_threads,
4073 proc->requested_threads_started, proc->max_threads,
19c98724
TK
4074 proc->ready_threads,
4075 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
4076 count = 0;
4077 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4078 count++;
5249f488 4079 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
4080 count = 0;
4081 strong = 0;
4082 weak = 0;
4083 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4084 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4085 rb_node_desc);
4086 count++;
372e3147
TK
4087 strong += ref->data.strong;
4088 weak += ref->data.weak;
355b0502 4089 }
5249f488 4090 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 4091
19c98724 4092 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 4093 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
4094
4095 count = 0;
4096 list_for_each_entry(w, &proc->todo, entry) {
4097 switch (w->type) {
4098 case BINDER_WORK_TRANSACTION:
4099 count++;
4100 break;
4101 default:
4102 break;
4103 }
4104 }
5249f488 4105 seq_printf(m, " pending transactions: %d\n", count);
355b0502 4106
5249f488 4107 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
4108}
4109
4110
5249f488 4111static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
4112{
4113 struct binder_proc *proc;
355b0502 4114 struct binder_node *node;
355b0502 4115
1cf29cf4 4116 binder_lock(__func__);
355b0502 4117
5249f488 4118 seq_puts(m, "binder state:\n");
355b0502 4119
c44b1231 4120 spin_lock(&binder_dead_nodes_lock);
355b0502 4121 if (!hlist_empty(&binder_dead_nodes))
5249f488 4122 seq_puts(m, "dead nodes:\n");
b67bfe0d 4123 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
5249f488 4124 print_binder_node(m, node);
c44b1231 4125 spin_unlock(&binder_dead_nodes_lock);
355b0502 4126
c44b1231 4127 mutex_lock(&binder_procs_lock);
b67bfe0d 4128 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4129 print_binder_proc(m, proc, 1);
c44b1231 4130 mutex_unlock(&binder_procs_lock);
1cf29cf4 4131 binder_unlock(__func__);
5249f488 4132 return 0;
355b0502
GKH
4133}
4134
5249f488 4135static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
4136{
4137 struct binder_proc *proc;
355b0502 4138
1cf29cf4 4139 binder_lock(__func__);
355b0502 4140
5249f488 4141 seq_puts(m, "binder stats:\n");
355b0502 4142
5249f488 4143 print_binder_stats(m, "", &binder_stats);
355b0502 4144
c44b1231 4145 mutex_lock(&binder_procs_lock);
b67bfe0d 4146 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4147 print_binder_proc_stats(m, proc);
c44b1231 4148 mutex_unlock(&binder_procs_lock);
1cf29cf4 4149 binder_unlock(__func__);
5249f488 4150 return 0;
355b0502
GKH
4151}
4152
5249f488 4153static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
4154{
4155 struct binder_proc *proc;
355b0502 4156
1cf29cf4 4157 binder_lock(__func__);
355b0502 4158
5249f488 4159 seq_puts(m, "binder transactions:\n");
c44b1231 4160 mutex_lock(&binder_procs_lock);
b67bfe0d 4161 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4162 print_binder_proc(m, proc, 0);
c44b1231 4163 mutex_unlock(&binder_procs_lock);
1cf29cf4 4164 binder_unlock(__func__);
5249f488 4165 return 0;
355b0502
GKH
4166}
4167
5249f488 4168static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 4169{
83050a4e 4170 struct binder_proc *itr;
14db3181 4171 int pid = (unsigned long)m->private;
355b0502 4172
1cf29cf4 4173 binder_lock(__func__);
83050a4e 4174
c44b1231 4175 mutex_lock(&binder_procs_lock);
83050a4e 4176 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
4177 if (itr->pid == pid) {
4178 seq_puts(m, "binder proc state:\n");
4179 print_binder_proc(m, itr, 1);
83050a4e
RA
4180 }
4181 }
c44b1231
TK
4182 mutex_unlock(&binder_procs_lock);
4183
1cf29cf4 4184 binder_unlock(__func__);
5249f488 4185 return 0;
355b0502
GKH
4186}
4187
5249f488 4188static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
4189 struct binder_transaction_log_entry *e)
4190{
d99c7333
TK
4191 int debug_id = READ_ONCE(e->debug_id_done);
4192 /*
4193 * read barrier to guarantee debug_id_done read before
4194 * we print the log values
4195 */
4196 smp_rmb();
5249f488 4197 seq_printf(m,
d99c7333 4198 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
4199 e->debug_id, (e->call_type == 2) ? "reply" :
4200 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 4201 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
4202 e->to_node, e->target_handle, e->data_size, e->offsets_size,
4203 e->return_error, e->return_error_param,
4204 e->return_error_line);
d99c7333
TK
4205 /*
4206 * read-barrier to guarantee read of debug_id_done after
4207 * done printing the fields of the entry
4208 */
4209 smp_rmb();
4210 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
4211 "\n" : " (incomplete)\n");
355b0502
GKH
4212}
4213
5249f488 4214static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 4215{
5249f488 4216 struct binder_transaction_log *log = m->private;
d99c7333
TK
4217 unsigned int log_cur = atomic_read(&log->cur);
4218 unsigned int count;
4219 unsigned int cur;
355b0502 4220 int i;
355b0502 4221
d99c7333
TK
4222 count = log_cur + 1;
4223 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
4224 0 : count % ARRAY_SIZE(log->entry);
4225 if (count > ARRAY_SIZE(log->entry) || log->full)
4226 count = ARRAY_SIZE(log->entry);
4227 for (i = 0; i < count; i++) {
4228 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
4229
4230 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 4231 }
5249f488 4232 return 0;
355b0502
GKH
4233}
4234
4235static const struct file_operations binder_fops = {
4236 .owner = THIS_MODULE,
4237 .poll = binder_poll,
4238 .unlocked_ioctl = binder_ioctl,
da49889d 4239 .compat_ioctl = binder_ioctl,
355b0502
GKH
4240 .mmap = binder_mmap,
4241 .open = binder_open,
4242 .flush = binder_flush,
4243 .release = binder_release,
4244};
4245
5249f488
AH
4246BINDER_DEBUG_ENTRY(state);
4247BINDER_DEBUG_ENTRY(stats);
4248BINDER_DEBUG_ENTRY(transactions);
4249BINDER_DEBUG_ENTRY(transaction_log);
4250
ac4812c5
MC
4251static int __init init_binder_device(const char *name)
4252{
4253 int ret;
4254 struct binder_device *binder_device;
4255
4256 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4257 if (!binder_device)
4258 return -ENOMEM;
4259
4260 binder_device->miscdev.fops = &binder_fops;
4261 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4262 binder_device->miscdev.name = name;
4263
4264 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4265 binder_device->context.name = name;
c44b1231 4266 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
4267
4268 ret = misc_register(&binder_device->miscdev);
4269 if (ret < 0) {
4270 kfree(binder_device);
4271 return ret;
4272 }
4273
4274 hlist_add_head(&binder_device->hlist, &binder_devices);
4275
4276 return ret;
4277}
4278
355b0502
GKH
4279static int __init binder_init(void)
4280{
4281 int ret;
ac4812c5
MC
4282 char *device_name, *device_names;
4283 struct binder_device *device;
4284 struct hlist_node *tmp;
355b0502 4285
d99c7333
TK
4286 atomic_set(&binder_transaction_log.cur, ~0U);
4287 atomic_set(&binder_transaction_log_failed.cur, ~0U);
4288
16b66554
AH
4289 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4290 if (binder_debugfs_dir_entry_root)
4291 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4292 binder_debugfs_dir_entry_root);
ac4812c5 4293
16b66554
AH
4294 if (binder_debugfs_dir_entry_root) {
4295 debugfs_create_file("state",
4296 S_IRUGO,
4297 binder_debugfs_dir_entry_root,
4298 NULL,
4299 &binder_state_fops);
4300 debugfs_create_file("stats",
4301 S_IRUGO,
4302 binder_debugfs_dir_entry_root,
4303 NULL,
4304 &binder_stats_fops);
4305 debugfs_create_file("transactions",
4306 S_IRUGO,
4307 binder_debugfs_dir_entry_root,
4308 NULL,
4309 &binder_transactions_fops);
4310 debugfs_create_file("transaction_log",
4311 S_IRUGO,
4312 binder_debugfs_dir_entry_root,
4313 &binder_transaction_log,
4314 &binder_transaction_log_fops);
4315 debugfs_create_file("failed_transaction_log",
4316 S_IRUGO,
4317 binder_debugfs_dir_entry_root,
4318 &binder_transaction_log_failed,
4319 &binder_transaction_log_fops);
355b0502 4320 }
ac4812c5
MC
4321
4322 /*
4323 * Copy the module_parameter string, because we don't want to
4324 * tokenize it in-place.
4325 */
4326 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4327 if (!device_names) {
4328 ret = -ENOMEM;
4329 goto err_alloc_device_names_failed;
4330 }
4331 strcpy(device_names, binder_devices_param);
4332
4333 while ((device_name = strsep(&device_names, ","))) {
4334 ret = init_binder_device(device_name);
4335 if (ret)
4336 goto err_init_binder_device_failed;
4337 }
4338
4339 return ret;
4340
4341err_init_binder_device_failed:
4342 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4343 misc_deregister(&device->miscdev);
4344 hlist_del(&device->hlist);
4345 kfree(device);
4346 }
4347err_alloc_device_names_failed:
4348 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4349
355b0502
GKH
4350 return ret;
4351}
4352
4353device_initcall(binder_init);
4354
975a1ac9
AH
4355#define CREATE_TRACE_POINTS
4356#include "binder_trace.h"
4357
355b0502 4358MODULE_LICENSE("GPL v2");