FROMLIST: binder: introduce locking helper functions
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
b0f59d6d
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
35 * are used to acq/rel
36 *
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
39 *
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
42 *
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
48 * ...
49 */
50
56b468fc
AS
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
355b0502
GKH
53#include <asm/cacheflush.h>
54#include <linux/fdtable.h>
55#include <linux/file.h>
e2610b26 56#include <linux/freezer.h>
355b0502
GKH
57#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
355b0502
GKH
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
16b66554 64#include <linux/debugfs.h>
355b0502
GKH
65#include <linux/rbtree.h>
66#include <linux/sched.h>
5249f488 67#include <linux/seq_file.h>
355b0502 68#include <linux/uaccess.h>
17cf22c3 69#include <linux/pid_namespace.h>
79af7307 70#include <linux/security.h>
b0f59d6d 71#include <linux/spinlock.h>
355b0502 72
9246a4a9
GKH
73#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
74#define BINDER_IPC_32BIT 1
75#endif
76
77#include <uapi/linux/android/binder.h>
2324f70c 78#include "binder_alloc.h"
975a1ac9 79#include "binder_trace.h"
355b0502 80
ec49bb00 81static DEFINE_MUTEX(binder_main_lock);
3490fdcb
TK
82
83static HLIST_HEAD(binder_deferred_list);
ec49bb00 84static DEFINE_MUTEX(binder_deferred_lock);
ec49bb00 85
04e3812e 86static HLIST_HEAD(binder_devices);
ec49bb00 87static HLIST_HEAD(binder_procs);
3490fdcb
TK
88static DEFINE_MUTEX(binder_procs_lock);
89
ec49bb00 90static HLIST_HEAD(binder_dead_nodes);
3490fdcb 91static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 92
16b66554
AH
93static struct dentry *binder_debugfs_dir_entry_root;
94static struct dentry *binder_debugfs_dir_entry_proc;
be4dde1f 95static atomic_t binder_last_id;
ec49bb00 96static struct workqueue_struct *binder_deferred_workqueue;
355b0502 97
5249f488
AH
98#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
16b66554 101 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
467545d8 142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
b0f59d6d 143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
04e3812e
MC
149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, S_IRUGO);
151
355b0502
GKH
152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
10f62861 159
355b0502
GKH
160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
258767fe 171 pr_info(x); \
355b0502
GKH
172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 177 pr_info(x); \
355b0502
GKH
178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
ce0c6598
MC
182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
dd9bc4f9
MC
187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
e124de38
MC
190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
355b0502
GKH
193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
f716ecfc
BJS
205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
209};
210
ec49bb00 211static struct binder_stats binder_stats;
355b0502
GKH
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
f716ecfc 215 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
f716ecfc 220 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
0f32aeb3 225 int debug_id_done;
355b0502
GKH
226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
0a0fdc1f
TK
235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
8b980bee 238 const char *context_name;
355b0502
GKH
239};
240struct binder_transaction_log {
0f32aeb3
TK
241 atomic_t cur;
242 bool full;
355b0502
GKH
243 struct binder_transaction_log_entry entry[32];
244};
ec49bb00
TK
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
355b0502
GKH
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
0f32aeb3 252 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 253
0f32aeb3 254 if (cur >= ARRAY_SIZE(log->entry))
355b0502 255 log->full = 1;
0f32aeb3
TK
256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
355b0502
GKH
265 return e;
266}
267
803df563
MC
268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
3490fdcb
TK
270 struct mutex context_mgr_node_lock;
271
803df563 272 kuid_t binder_context_mgr_uid;
8b980bee 273 const char *name;
803df563
MC
274};
275
04e3812e
MC
276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
803df563
MC
280};
281
355b0502
GKH
282struct binder_work {
283 struct list_head entry;
284 enum {
285 BINDER_WORK_TRANSACTION = 1,
286 BINDER_WORK_TRANSACTION_COMPLETE,
3a822b33 287 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
288 BINDER_WORK_NODE,
289 BINDER_WORK_DEAD_BINDER,
290 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
291 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
292 } type;
293};
294
3a822b33
TK
295struct binder_error {
296 struct binder_work work;
297 uint32_t cmd;
298};
299
b0f59d6d
TK
300/**
301 * struct binder_node - binder node bookkeeping
302 * @debug_id: unique ID for debugging
303 * (invariant after initialized)
304 * @lock: lock for node fields
305 * @work: worklist element for node work
306 * @rb_node: element for proc->nodes tree
307 * @dead_node: element for binder_dead_nodes list
308 * (protected by binder_dead_nodes_lock)
309 * @proc: binder_proc that owns this node
310 * (invariant after initialized)
311 * @refs: list of references on this node
312 * @internal_strong_refs: used to take strong references when
313 * initiating a transaction
314 * @local_weak_refs: weak user refs from local process
315 * @local_strong_refs: strong user refs from local process
316 * @tmp_refs: temporary kernel refs
317 * @ptr: userspace pointer for node
318 * (invariant, no lock needed)
319 * @cookie: userspace cookie for node
320 * (invariant, no lock needed)
321 * @has_strong_ref: userspace notified of strong ref
322 * @pending_strong_ref: userspace has acked notification of strong ref
323 * @has_weak_ref: userspace notified of weak ref
324 * @pending_weak_ref: userspace has acked notification of weak ref
325 * @has_async_transaction: async transaction to node in progress
326 * @accept_fds: file descriptor operations supported for node
327 * (invariant after initialized)
328 * @min_priority: minimum scheduling priority
329 * (invariant after initialized)
330 * @async_todo: list of async work items
331 *
332 * Bookkeeping structure for binder nodes.
333 */
355b0502
GKH
334struct binder_node {
335 int debug_id;
b0f59d6d 336 spinlock_t lock;
355b0502
GKH
337 struct binder_work work;
338 union {
339 struct rb_node rb_node;
340 struct hlist_node dead_node;
341 };
342 struct binder_proc *proc;
343 struct hlist_head refs;
344 int internal_strong_refs;
345 int local_weak_refs;
346 int local_strong_refs;
96dd75d9 347 int tmp_refs;
da49889d
AH
348 binder_uintptr_t ptr;
349 binder_uintptr_t cookie;
355b0502
GKH
350 unsigned has_strong_ref:1;
351 unsigned pending_strong_ref:1;
352 unsigned has_weak_ref:1;
353 unsigned pending_weak_ref:1;
354 unsigned has_async_transaction:1;
355 unsigned accept_fds:1;
356 unsigned min_priority:8;
357 struct list_head async_todo;
358};
359
360struct binder_ref_death {
361 struct binder_work work;
da49889d 362 binder_uintptr_t cookie;
355b0502
GKH
363};
364
f7d87412
TK
365/**
366 * struct binder_ref_data - binder_ref counts and id
367 * @debug_id: unique ID for the ref
368 * @desc: unique userspace handle for ref
369 * @strong: strong ref count (debugging only if not locked)
370 * @weak: weak ref count (debugging only if not locked)
371 *
372 * Structure to hold ref count and ref id information. Since
373 * the actual ref can only be accessed with a lock, this structure
374 * is used to return information about the ref to callers of
375 * ref inc/dec functions.
376 */
377struct binder_ref_data {
378 int debug_id;
379 uint32_t desc;
380 int strong;
381 int weak;
382};
383
384/**
385 * struct binder_ref - struct to track references on nodes
386 * @data: binder_ref_data containing id, handle, and current refcounts
387 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
388 * @rb_node_node: node for lookup by @node in proc's rb_tree
389 * @node_entry: list entry for node->refs list in target node
390 * @proc: binder_proc containing ref
391 * @node: binder_node of target node. When cleaning up a
392 * ref for deletion in binder_cleanup_ref, a non-NULL
393 * @node indicates the node must be freed
394 * @death: pointer to death notification (ref_death) if requested
395 *
396 * Structure to track references from procA to target node (on procB). This
397 * structure is unsafe to access without holding @proc->outer_lock.
398 */
355b0502
GKH
399struct binder_ref {
400 /* Lookups needed: */
401 /* node + proc => ref (transaction) */
402 /* desc + proc => ref (transaction, inc/dec ref) */
403 /* node => refs + procs (proc exit) */
f7d87412 404 struct binder_ref_data data;
355b0502
GKH
405 struct rb_node rb_node_desc;
406 struct rb_node rb_node_node;
407 struct hlist_node node_entry;
408 struct binder_proc *proc;
409 struct binder_node *node;
355b0502
GKH
410 struct binder_ref_death *death;
411};
412
355b0502
GKH
413enum binder_deferred_state {
414 BINDER_DEFERRED_PUT_FILES = 0x01,
415 BINDER_DEFERRED_FLUSH = 0x02,
416 BINDER_DEFERRED_RELEASE = 0x04,
417};
418
b0f59d6d
TK
419/**
420 * struct binder_proc - binder process bookkeeping
421 * @proc_node: element for binder_procs list
422 * @threads: rbtree of binder_threads in this proc
423 * @nodes: rbtree of binder nodes associated with
424 * this proc ordered by node->ptr
425 * @refs_by_desc: rbtree of refs ordered by ref->desc
426 * @refs_by_node: rbtree of refs ordered by ref->node
427 * @pid PID of group_leader of process
428 * (invariant after initialized)
429 * @tsk task_struct for group_leader of process
430 * (invariant after initialized)
431 * @files files_struct for process
432 * (invariant after initialized)
433 * @deferred_work_node: element for binder_deferred_list
434 * (protected by binder_deferred_lock)
435 * @deferred_work: bitmap of deferred work to perform
436 * (protected by binder_deferred_lock)
437 * @is_dead: process is dead and awaiting free
438 * when outstanding transactions are cleaned up
439 * @todo: list of work for this process
440 * @wait: wait queue head to wait for proc work
441 * (invariant after initialized)
442 * @stats: per-process binder statistics
443 * (atomics, no lock needed)
444 * @delivered_death: list of delivered death notification
445 * @max_threads: cap on number of binder threads
446 * @requested_threads: number of binder threads requested but not
447 * yet started. In current implementation, can
448 * only be 0 or 1.
449 * @requested_threads_started: number binder threads started
450 * @ready_threads: number of threads waiting for proc work
451 * @tmp_ref: temporary reference to indicate proc is in use
452 * @default_priority: default scheduler priority
453 * (invariant after initialized)
454 * @debugfs_entry: debugfs node
455 * @alloc: binder allocator bookkeeping
456 * @context: binder_context for this proc
457 * (invariant after initialized)
458 * @inner_lock: can nest under outer_lock and/or node lock
459 * @outer_lock: no nesting under innor or node lock
460 * Lock order: 1) outer, 2) node, 3) inner
461 *
462 * Bookkeeping structure for binder processes
463 */
355b0502
GKH
464struct binder_proc {
465 struct hlist_node proc_node;
466 struct rb_root threads;
467 struct rb_root nodes;
468 struct rb_root refs_by_desc;
469 struct rb_root refs_by_node;
470 int pid;
355b0502
GKH
471 struct task_struct *tsk;
472 struct files_struct *files;
473 struct hlist_node deferred_work_node;
474 int deferred_work;
e482ec39 475 bool is_dead;
355b0502 476
355b0502
GKH
477 struct list_head todo;
478 wait_queue_head_t wait;
479 struct binder_stats stats;
480 struct list_head delivered_death;
481 int max_threads;
482 int requested_threads;
483 int requested_threads_started;
484 int ready_threads;
e482ec39 485 int tmp_ref;
355b0502 486 long default_priority;
16b66554 487 struct dentry *debugfs_entry;
19a3948b 488 struct binder_alloc alloc;
803df563 489 struct binder_context *context;
b0f59d6d
TK
490 spinlock_t inner_lock;
491 spinlock_t outer_lock;
355b0502
GKH
492};
493
494enum {
495 BINDER_LOOPER_STATE_REGISTERED = 0x01,
496 BINDER_LOOPER_STATE_ENTERED = 0x02,
497 BINDER_LOOPER_STATE_EXITED = 0x04,
498 BINDER_LOOPER_STATE_INVALID = 0x08,
499 BINDER_LOOPER_STATE_WAITING = 0x10,
355b0502
GKH
500};
501
b0f59d6d
TK
502/**
503 * struct binder_thread - binder thread bookkeeping
504 * @proc: binder process for this thread
505 * (invariant after initialization)
506 * @rb_node: element for proc->threads rbtree
507 * @pid: PID for this thread
508 * (invariant after initialization)
509 * @looper: bitmap of looping state
510 * (only accessed by this thread)
511 * @looper_needs_return: looping thread needs to exit driver
512 * (no lock needed)
513 * @transaction_stack: stack of in-progress transactions for this thread
514 * @todo: list of work to do for this thread
515 * @return_error: transaction errors reported by this thread
516 * (only accessed by this thread)
517 * @reply_error: transaction errors reported by target thread
518 * @wait: wait queue for thread work
519 * @stats: per-thread statistics
520 * (atomics, no lock needed)
521 * @tmp_ref: temporary reference to indicate thread is in use
522 * (atomic since @proc->inner_lock cannot
523 * always be acquired)
524 * @is_dead: thread is dead and awaiting free
525 * when outstanding transactions are cleaned up
526 *
527 * Bookkeeping structure for binder threads.
528 */
355b0502
GKH
529struct binder_thread {
530 struct binder_proc *proc;
531 struct rb_node rb_node;
532 int pid;
afda44d0
TK
533 int looper; /* only modified by this thread */
534 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
535 struct binder_transaction *transaction_stack;
536 struct list_head todo;
3a822b33
TK
537 struct binder_error return_error;
538 struct binder_error reply_error;
355b0502
GKH
539 wait_queue_head_t wait;
540 struct binder_stats stats;
e482ec39
TK
541 atomic_t tmp_ref;
542 bool is_dead;
355b0502
GKH
543};
544
545struct binder_transaction {
546 int debug_id;
547 struct binder_work work;
548 struct binder_thread *from;
549 struct binder_transaction *from_parent;
550 struct binder_proc *to_proc;
551 struct binder_thread *to_thread;
552 struct binder_transaction *to_parent;
553 unsigned need_reply:1;
554 /* unsigned is_dead:1; */ /* not used at the moment */
555
556 struct binder_buffer *buffer;
557 unsigned int code;
558 unsigned int flags;
559 long priority;
560 long saved_priority;
4a2ebb93 561 kuid_t sender_euid;
e482ec39
TK
562 /**
563 * @lock: protects @from, @to_proc, and @to_thread
564 *
565 * @from, @to_proc, and @to_thread can be set to NULL
566 * during thread teardown
567 */
568 spinlock_t lock;
355b0502
GKH
569};
570
b0f59d6d
TK
571/**
572 * binder_proc_lock() - Acquire outer lock for given binder_proc
573 * @proc: struct binder_proc to acquire
574 *
575 * Acquires proc->outer_lock. Used to protect binder_ref
576 * structures associated with the given proc.
577 */
578#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
579static void
580_binder_proc_lock(struct binder_proc *proc, int line)
581{
582 binder_debug(BINDER_DEBUG_SPINLOCKS,
583 "%s: line=%d\n", __func__, line);
584 spin_lock(&proc->outer_lock);
585}
586
587/**
588 * binder_proc_unlock() - Release spinlock for given binder_proc
589 * @proc: struct binder_proc to acquire
590 *
591 * Release lock acquired via binder_proc_lock()
592 */
593#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
594static void
595_binder_proc_unlock(struct binder_proc *proc, int line)
596{
597 binder_debug(BINDER_DEBUG_SPINLOCKS,
598 "%s: line=%d\n", __func__, line);
599 spin_unlock(&proc->outer_lock);
600}
601
602/**
603 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
604 * @proc: struct binder_proc to acquire
605 *
606 * Acquires proc->inner_lock. Used to protect todo lists
607 */
608#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
609static void
610_binder_inner_proc_lock(struct binder_proc *proc, int line)
611{
612 binder_debug(BINDER_DEBUG_SPINLOCKS,
613 "%s: line=%d\n", __func__, line);
614 spin_lock(&proc->inner_lock);
615}
616
617/**
618 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
619 * @proc: struct binder_proc to acquire
620 *
621 * Release lock acquired via binder_inner_proc_lock()
622 */
623#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
624static void
625_binder_inner_proc_unlock(struct binder_proc *proc, int line)
626{
627 binder_debug(BINDER_DEBUG_SPINLOCKS,
628 "%s: line=%d\n", __func__, line);
629 spin_unlock(&proc->inner_lock);
630}
631
632/**
633 * binder_node_lock() - Acquire spinlock for given binder_node
634 * @node: struct binder_node to acquire
635 *
636 * Acquires node->lock. Used to protect binder_node fields
637 */
638#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
639static void
640_binder_node_lock(struct binder_node *node, int line)
641{
642 binder_debug(BINDER_DEBUG_SPINLOCKS,
643 "%s: line=%d\n", __func__, line);
644 spin_lock(&node->lock);
645}
646
647/**
648 * binder_node_unlock() - Release spinlock for given binder_proc
649 * @node: struct binder_node to acquire
650 *
651 * Release lock acquired via binder_node_lock()
652 */
653#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
654static void
655_binder_node_unlock(struct binder_node *node, int line)
656{
657 binder_debug(BINDER_DEBUG_SPINLOCKS,
658 "%s: line=%d\n", __func__, line);
659 spin_unlock(&node->lock);
660}
661
355b0502
GKH
662static void
663binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
e482ec39
TK
664static void binder_free_thread(struct binder_thread *thread);
665static void binder_free_proc(struct binder_proc *proc);
96dd75d9 666static void binder_inc_node_tmpref(struct binder_node *node);
355b0502 667
efde99cd 668static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
669{
670 struct files_struct *files = proc->files;
355b0502
GKH
671 unsigned long rlim_cur;
672 unsigned long irqs;
673
674 if (files == NULL)
675 return -ESRCH;
676
dcfadfa4
AV
677 if (!lock_task_sighand(proc->tsk, &irqs))
678 return -EMFILE;
bf202361 679
dcfadfa4
AV
680 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
681 unlock_task_sighand(proc->tsk, &irqs);
355b0502 682
84b60019 683 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
684}
685
686/*
687 * copied from fd_install
688 */
689static void task_fd_install(
690 struct binder_proc *proc, unsigned int fd, struct file *file)
691{
84b60019 692 if (proc->files)
f869e8a7 693 __fd_install(proc->files, fd, file);
355b0502
GKH
694}
695
696/*
697 * copied from sys_close
698 */
699static long task_close_fd(struct binder_proc *proc, unsigned int fd)
700{
355b0502
GKH
701 int retval;
702
483ce1d4 703 if (proc->files == NULL)
355b0502
GKH
704 return -ESRCH;
705
483ce1d4 706 retval = __close_fd(proc->files, fd);
355b0502
GKH
707 /* can't restart close syscall because file table entry was cleared */
708 if (unlikely(retval == -ERESTARTSYS ||
709 retval == -ERESTARTNOINTR ||
710 retval == -ERESTARTNOHAND ||
711 retval == -ERESTART_RESTARTBLOCK))
712 retval = -EINTR;
713
714 return retval;
355b0502
GKH
715}
716
ec49bb00 717static inline void binder_lock(const char *tag)
975a1ac9
AH
718{
719 trace_binder_lock(tag);
ec49bb00 720 mutex_lock(&binder_main_lock);
975a1ac9
AH
721 trace_binder_locked(tag);
722}
723
ec49bb00 724static inline void binder_unlock(const char *tag)
975a1ac9
AH
725{
726 trace_binder_unlock(tag);
ec49bb00 727 mutex_unlock(&binder_main_lock);
975a1ac9
AH
728}
729
355b0502
GKH
730static void binder_set_nice(long nice)
731{
732 long min_nice;
10f62861 733
355b0502
GKH
734 if (can_nice(current, nice)) {
735 set_user_nice(current, nice);
736 return;
737 }
7aa2c016 738 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 739 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
740 "%d: nice value %ld not allowed use %ld instead\n",
741 current->pid, nice, min_nice);
355b0502 742 set_user_nice(current, min_nice);
8698a745 743 if (min_nice <= MAX_NICE)
355b0502 744 return;
56b468fc 745 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
746}
747
355b0502 748static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 749 binder_uintptr_t ptr)
355b0502
GKH
750{
751 struct rb_node *n = proc->nodes.rb_node;
752 struct binder_node *node;
753
754 while (n) {
755 node = rb_entry(n, struct binder_node, rb_node);
756
757 if (ptr < node->ptr)
758 n = n->rb_left;
759 else if (ptr > node->ptr)
760 n = n->rb_right;
96dd75d9
TK
761 else {
762 /*
763 * take an implicit weak reference
764 * to ensure node stays alive until
765 * call to binder_put_node()
766 */
767 binder_inc_node_tmpref(node);
355b0502 768 return node;
96dd75d9 769 }
355b0502
GKH
770 }
771 return NULL;
772}
773
774static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
775 binder_uintptr_t ptr,
776 binder_uintptr_t cookie)
355b0502
GKH
777{
778 struct rb_node **p = &proc->nodes.rb_node;
779 struct rb_node *parent = NULL;
780 struct binder_node *node;
781
782 while (*p) {
783 parent = *p;
784 node = rb_entry(parent, struct binder_node, rb_node);
785
786 if (ptr < node->ptr)
787 p = &(*p)->rb_left;
788 else if (ptr > node->ptr)
789 p = &(*p)->rb_right;
790 else
791 return NULL;
792 }
793
794 node = kzalloc(sizeof(*node), GFP_KERNEL);
795 if (node == NULL)
796 return NULL;
797 binder_stats_created(BINDER_STAT_NODE);
96dd75d9 798 node->tmp_refs++;
355b0502
GKH
799 rb_link_node(&node->rb_node, parent, p);
800 rb_insert_color(&node->rb_node, &proc->nodes);
be4dde1f 801 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
802 node->proc = proc;
803 node->ptr = ptr;
804 node->cookie = cookie;
805 node->work.type = BINDER_WORK_NODE;
b0f59d6d 806 spin_lock_init(&node->lock);
355b0502
GKH
807 INIT_LIST_HEAD(&node->work.entry);
808 INIT_LIST_HEAD(&node->async_todo);
809 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 810 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 811 proc->pid, current->pid, node->debug_id,
da49889d 812 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
813 return node;
814}
815
816static int binder_inc_node(struct binder_node *node, int strong, int internal,
817 struct list_head *target_list)
818{
819 if (strong) {
820 if (internal) {
821 if (target_list == NULL &&
822 node->internal_strong_refs == 0 &&
803df563
MC
823 !(node->proc &&
824 node == node->proc->context->
825 binder_context_mgr_node &&
826 node->has_strong_ref)) {
56b468fc
AS
827 pr_err("invalid inc strong node for %d\n",
828 node->debug_id);
355b0502
GKH
829 return -EINVAL;
830 }
831 node->internal_strong_refs++;
832 } else
833 node->local_strong_refs++;
834 if (!node->has_strong_ref && target_list) {
835 list_del_init(&node->work.entry);
836 list_add_tail(&node->work.entry, target_list);
837 }
838 } else {
839 if (!internal)
840 node->local_weak_refs++;
841 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
842 if (target_list == NULL) {
56b468fc
AS
843 pr_err("invalid inc weak node for %d\n",
844 node->debug_id);
355b0502
GKH
845 return -EINVAL;
846 }
847 list_add_tail(&node->work.entry, target_list);
848 }
849 }
850 return 0;
851}
852
853static int binder_dec_node(struct binder_node *node, int strong, int internal)
854{
855 if (strong) {
856 if (internal)
857 node->internal_strong_refs--;
858 else
859 node->local_strong_refs--;
860 if (node->local_strong_refs || node->internal_strong_refs)
861 return 0;
862 } else {
863 if (!internal)
864 node->local_weak_refs--;
96dd75d9
TK
865 if (node->local_weak_refs || node->tmp_refs ||
866 !hlist_empty(&node->refs))
355b0502
GKH
867 return 0;
868 }
869 if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
870 if (list_empty(&node->work.entry)) {
871 list_add_tail(&node->work.entry, &node->proc->todo);
872 wake_up_interruptible(&node->proc->wait);
873 }
874 } else {
875 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
96dd75d9 876 !node->local_weak_refs && !node->tmp_refs) {
355b0502
GKH
877 list_del_init(&node->work.entry);
878 if (node->proc) {
879 rb_erase(&node->rb_node, &node->proc->nodes);
880 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 881 "refless node %d deleted\n",
355b0502
GKH
882 node->debug_id);
883 } else {
3490fdcb 884 spin_lock(&binder_dead_nodes_lock);
355b0502 885 hlist_del(&node->dead_node);
3490fdcb 886 spin_unlock(&binder_dead_nodes_lock);
355b0502 887 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 888 "dead node %d deleted\n",
355b0502
GKH
889 node->debug_id);
890 }
891 kfree(node);
892 binder_stats_deleted(BINDER_STAT_NODE);
893 }
894 }
895
896 return 0;
897}
898
96dd75d9
TK
899/**
900 * binder_inc_node_tmpref() - take a temporary reference on node
901 * @node: node to reference
902 *
903 * Take reference on node to prevent the node from being freed
904 * while referenced only by a local variable
905 */
906static void binder_inc_node_tmpref(struct binder_node *node)
907{
908 /*
909 * No call to binder_inc_node() is needed since we
910 * don't need to inform userspace of any changes to
911 * tmp_refs
912 */
913 node->tmp_refs++;
914}
915
916/**
917 * binder_dec_node_tmpref() - remove a temporary reference on node
918 * @node: node to reference
919 *
920 * Release temporary reference on node taken via binder_inc_node_tmpref()
921 */
922static void binder_dec_node_tmpref(struct binder_node *node)
923{
924 node->tmp_refs--;
925 BUG_ON(node->tmp_refs < 0);
926 /*
927 * Call binder_dec_node() to check if all refcounts are 0
928 * and cleanup is needed. Calling with strong=0 and internal=1
929 * causes no actual reference to be released in binder_dec_node().
930 * If that changes, a change is needed here too.
931 */
932 binder_dec_node(node, 0, 1);
933}
934
935static void binder_put_node(struct binder_node *node)
936{
937 binder_dec_node_tmpref(node);
938}
355b0502
GKH
939
940static struct binder_ref *binder_get_ref(struct binder_proc *proc,
14f09e8e 941 u32 desc, bool need_strong_ref)
355b0502
GKH
942{
943 struct rb_node *n = proc->refs_by_desc.rb_node;
944 struct binder_ref *ref;
945
946 while (n) {
947 ref = rb_entry(n, struct binder_ref, rb_node_desc);
948
f7d87412 949 if (desc < ref->data.desc) {
355b0502 950 n = n->rb_left;
f7d87412 951 } else if (desc > ref->data.desc) {
355b0502 952 n = n->rb_right;
f7d87412 953 } else if (need_strong_ref && !ref->data.strong) {
b81f4c5f
AH
954 binder_user_error("tried to use weak ref as strong ref\n");
955 return NULL;
956 } else {
355b0502 957 return ref;
b81f4c5f 958 }
355b0502
GKH
959 }
960 return NULL;
961}
962
f7d87412
TK
963/**
964 * binder_get_ref_for_node() - get the ref associated with given node
965 * @proc: binder_proc that owns the ref
966 * @node: binder_node of target
967 * @new_ref: newly allocated binder_ref to be initialized or %NULL
968 *
969 * Look up the ref for the given node and return it if it exists
970 *
971 * If it doesn't exist and the caller provides a newly allocated
972 * ref, initialize the fields of the newly allocated ref and insert
973 * into the given proc rb_trees and node refs list.
974 *
975 * Return: the ref for node. It is possible that another thread
976 * allocated/initialized the ref first in which case the
977 * returned ref would be different than the passed-in
978 * new_ref. new_ref must be kfree'd by the caller in
979 * this case.
980 */
355b0502 981static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
f7d87412
TK
982 struct binder_node *node,
983 struct binder_ref *new_ref)
355b0502 984{
f7d87412 985 struct binder_context *context = proc->context;
355b0502
GKH
986 struct rb_node **p = &proc->refs_by_node.rb_node;
987 struct rb_node *parent = NULL;
f7d87412
TK
988 struct binder_ref *ref;
989 struct rb_node *n;
355b0502
GKH
990
991 while (*p) {
992 parent = *p;
993 ref = rb_entry(parent, struct binder_ref, rb_node_node);
994
995 if (node < ref->node)
996 p = &(*p)->rb_left;
997 else if (node > ref->node)
998 p = &(*p)->rb_right;
999 else
1000 return ref;
1001 }
f7d87412 1002 if (!new_ref)
355b0502 1003 return NULL;
f7d87412 1004
355b0502 1005 binder_stats_created(BINDER_STAT_REF);
f7d87412 1006 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1007 new_ref->proc = proc;
1008 new_ref->node = node;
1009 rb_link_node(&new_ref->rb_node_node, parent, p);
1010 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1011
f7d87412 1012 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1013 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1014 ref = rb_entry(n, struct binder_ref, rb_node_desc);
f7d87412 1015 if (ref->data.desc > new_ref->data.desc)
355b0502 1016 break;
f7d87412 1017 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1018 }
1019
1020 p = &proc->refs_by_desc.rb_node;
1021 while (*p) {
1022 parent = *p;
1023 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1024
f7d87412 1025 if (new_ref->data.desc < ref->data.desc)
355b0502 1026 p = &(*p)->rb_left;
f7d87412 1027 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1028 p = &(*p)->rb_right;
1029 else
1030 BUG();
1031 }
1032 rb_link_node(&new_ref->rb_node_desc, parent, p);
1033 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
ce9b7747 1034 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1035
ce9b7747
TK
1036 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1037 "%d new ref %d desc %d for node %d\n",
f7d87412 1038 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
ce9b7747 1039 node->debug_id);
355b0502
GKH
1040 return new_ref;
1041}
1042
f7d87412 1043static void binder_cleanup_ref(struct binder_ref *ref)
355b0502
GKH
1044{
1045 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1046 "%d delete ref %d desc %d for node %d\n",
f7d87412 1047 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1048 ref->node->debug_id);
355b0502
GKH
1049
1050 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1051 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
f7d87412
TK
1052
1053 if (ref->data.strong)
355b0502 1054 binder_dec_node(ref->node, 1, 1);
f7d87412 1055
355b0502
GKH
1056 hlist_del(&ref->node_entry);
1057 binder_dec_node(ref->node, 0, 1);
f7d87412 1058
355b0502
GKH
1059 if (ref->death) {
1060 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1061 "%d delete ref %d desc %d has death notification\n",
f7d87412
TK
1062 ref->proc->pid, ref->data.debug_id,
1063 ref->data.desc);
355b0502 1064 list_del(&ref->death->work.entry);
355b0502
GKH
1065 binder_stats_deleted(BINDER_STAT_DEATH);
1066 }
355b0502
GKH
1067 binder_stats_deleted(BINDER_STAT_REF);
1068}
1069
f7d87412
TK
1070/**
1071 * binder_inc_ref() - increment the ref for given handle
1072 * @ref: ref to be incremented
1073 * @strong: if true, strong increment, else weak
1074 * @target_list: list to queue node work on
1075 *
1076 * Increment the ref.
1077 *
1078 * Return: 0, if successful, else errno
1079 */
355b0502
GKH
1080static int binder_inc_ref(struct binder_ref *ref, int strong,
1081 struct list_head *target_list)
1082{
1083 int ret;
10f62861 1084
355b0502 1085 if (strong) {
f7d87412 1086 if (ref->data.strong == 0) {
355b0502
GKH
1087 ret = binder_inc_node(ref->node, 1, 1, target_list);
1088 if (ret)
1089 return ret;
1090 }
f7d87412 1091 ref->data.strong++;
355b0502 1092 } else {
f7d87412 1093 if (ref->data.weak == 0) {
355b0502
GKH
1094 ret = binder_inc_node(ref->node, 0, 1, target_list);
1095 if (ret)
1096 return ret;
1097 }
f7d87412 1098 ref->data.weak++;
355b0502
GKH
1099 }
1100 return 0;
1101}
1102
f7d87412
TK
1103/**
1104 * binder_dec_ref() - dec the ref for given handle
1105 * @ref: ref to be decremented
1106 * @strong: if true, strong decrement, else weak
1107 *
1108 * Decrement the ref.
1109 *
1110 * TODO: kfree is avoided here since an upcoming patch
1111 * will put this under a lock.
1112 *
1113 * Return: true if ref is cleaned up and ready to be freed
1114 */
1115static bool binder_dec_ref(struct binder_ref *ref, int strong)
355b0502
GKH
1116{
1117 if (strong) {
f7d87412 1118 if (ref->data.strong == 0) {
56b468fc 1119 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
f7d87412
TK
1120 ref->proc->pid, ref->data.debug_id,
1121 ref->data.desc, ref->data.strong,
1122 ref->data.weak);
1123 return false;
355b0502 1124 }
f7d87412
TK
1125 ref->data.strong--;
1126 if (ref->data.strong == 0) {
355b0502 1127 int ret;
10f62861 1128
355b0502
GKH
1129 ret = binder_dec_node(ref->node, strong, 1);
1130 if (ret)
f7d87412 1131 return false;
355b0502
GKH
1132 }
1133 } else {
f7d87412 1134 if (ref->data.weak == 0) {
56b468fc 1135 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
f7d87412
TK
1136 ref->proc->pid, ref->data.debug_id,
1137 ref->data.desc, ref->data.strong,
1138 ref->data.weak);
1139 return false;
355b0502 1140 }
f7d87412 1141 ref->data.weak--;
355b0502 1142 }
f7d87412
TK
1143 if (ref->data.strong == 0 && ref->data.weak == 0) {
1144 binder_cleanup_ref(ref);
1145 /*
1146 * TODO: we could kfree(ref) here, but an upcoming
1147 * patch will call this with a lock held, so we
1148 * return an indication that the ref should be
1149 * freed.
1150 */
1151 return true;
1152 }
1153 return false;
1154}
1155
1156/**
1157 * binder_get_node_from_ref() - get the node from the given proc/desc
1158 * @proc: proc containing the ref
1159 * @desc: the handle associated with the ref
1160 * @need_strong_ref: if true, only return node if ref is strong
1161 * @rdata: the id/refcount data for the ref
1162 *
1163 * Given a proc and ref handle, return the associated binder_node
1164 *
1165 * Return: a binder_node or NULL if not found or not strong when strong required
1166 */
1167static struct binder_node *binder_get_node_from_ref(
1168 struct binder_proc *proc,
1169 u32 desc, bool need_strong_ref,
1170 struct binder_ref_data *rdata)
1171{
1172 struct binder_node *node;
1173 struct binder_ref *ref;
1174
1175 ref = binder_get_ref(proc, desc, need_strong_ref);
1176 if (!ref)
1177 goto err_no_ref;
1178 node = ref->node;
96dd75d9
TK
1179 /*
1180 * Take an implicit reference on the node to ensure
1181 * it stays alive until the call to binder_put_node()
1182 */
1183 binder_inc_node_tmpref(node);
f7d87412
TK
1184 if (rdata)
1185 *rdata = ref->data;
1186
1187 return node;
1188
1189err_no_ref:
1190 return NULL;
1191}
1192
1193/**
1194 * binder_free_ref() - free the binder_ref
1195 * @ref: ref to free
1196 *
1197 * Free the binder_ref and the binder_ref_death indicated by ref->death.
1198 */
1199static void binder_free_ref(struct binder_ref *ref)
1200{
1201 kfree(ref->death);
1202 kfree(ref);
1203}
1204
1205/**
1206 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1207 * @proc: proc containing the ref
1208 * @desc: the handle associated with the ref
1209 * @increment: true=inc reference, false=dec reference
1210 * @strong: true=strong reference, false=weak reference
1211 * @rdata: the id/refcount data for the ref
1212 *
1213 * Given a proc and ref handle, increment or decrement the ref
1214 * according to "increment" arg.
1215 *
1216 * Return: 0 if successful, else errno
1217 */
1218static int binder_update_ref_for_handle(struct binder_proc *proc,
1219 uint32_t desc, bool increment, bool strong,
1220 struct binder_ref_data *rdata)
1221{
1222 int ret = 0;
1223 struct binder_ref *ref;
1224 bool delete_ref = false;
1225
1226 ref = binder_get_ref(proc, desc, strong);
1227 if (!ref) {
1228 ret = -EINVAL;
1229 goto err_no_ref;
1230 }
1231 if (increment)
1232 ret = binder_inc_ref(ref, strong, NULL);
1233 else
1234 delete_ref = binder_dec_ref(ref, strong);
1235
1236 if (rdata)
1237 *rdata = ref->data;
1238
1239 if (delete_ref)
1240 binder_free_ref(ref);
1241 return ret;
1242
1243err_no_ref:
1244 return ret;
1245}
1246
1247/**
1248 * binder_dec_ref_for_handle() - dec the ref for given handle
1249 * @proc: proc containing the ref
1250 * @desc: the handle associated with the ref
1251 * @strong: true=strong reference, false=weak reference
1252 * @rdata: the id/refcount data for the ref
1253 *
1254 * Just calls binder_update_ref_for_handle() to decrement the ref.
1255 *
1256 * Return: 0 if successful, else errno
1257 */
1258static int binder_dec_ref_for_handle(struct binder_proc *proc,
1259 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1260{
1261 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1262}
1263
1264
1265/**
1266 * binder_inc_ref_for_node() - increment the ref for given proc/node
1267 * @proc: proc containing the ref
1268 * @node: target node
1269 * @strong: true=strong reference, false=weak reference
1270 * @target_list: worklist to use if node is incremented
1271 * @rdata: the id/refcount data for the ref
1272 *
1273 * Given a proc and node, increment the ref. Create the ref if it
1274 * doesn't already exist
1275 *
1276 * Return: 0 if successful, else errno
1277 */
1278static int binder_inc_ref_for_node(struct binder_proc *proc,
1279 struct binder_node *node,
1280 bool strong,
1281 struct list_head *target_list,
1282 struct binder_ref_data *rdata)
1283{
1284 struct binder_ref *ref;
1285 struct binder_ref *new_ref = NULL;
1286 int ret = 0;
1287
1288 ref = binder_get_ref_for_node(proc, node, NULL);
1289 if (!ref) {
1290 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1291 if (!new_ref)
1292 return -ENOMEM;
1293 ref = binder_get_ref_for_node(proc, node, new_ref);
1294 }
1295 ret = binder_inc_ref(ref, strong, target_list);
1296 *rdata = ref->data;
1297 if (new_ref && ref != new_ref)
1298 /*
1299 * Another thread created the ref first so
1300 * free the one we allocated
1301 */
1302 kfree(new_ref);
1303 return ret;
355b0502
GKH
1304}
1305
1306static void binder_pop_transaction(struct binder_thread *target_thread,
1307 struct binder_transaction *t)
1308{
16273538
TK
1309 BUG_ON(!target_thread);
1310 BUG_ON(target_thread->transaction_stack != t);
1311 BUG_ON(target_thread->transaction_stack->from != target_thread);
1312 target_thread->transaction_stack =
1313 target_thread->transaction_stack->from_parent;
1314 t->from = NULL;
1315}
1316
e482ec39
TK
1317/**
1318 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1319 * @thread: thread to decrement
1320 *
1321 * A thread needs to be kept alive while being used to create or
1322 * handle a transaction. binder_get_txn_from() is used to safely
1323 * extract t->from from a binder_transaction and keep the thread
1324 * indicated by t->from from being freed. When done with that
1325 * binder_thread, this function is called to decrement the
1326 * tmp_ref and free if appropriate (thread has been released
1327 * and no transaction being processed by the driver)
1328 */
1329static void binder_thread_dec_tmpref(struct binder_thread *thread)
1330{
1331 /*
1332 * atomic is used to protect the counter value while
1333 * it cannot reach zero or thread->is_dead is false
1334 *
1335 * TODO: future patch adds locking to ensure that the
1336 * check of tmp_ref and is_dead is done with a lock held
1337 */
1338 atomic_dec(&thread->tmp_ref);
1339 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1340 binder_free_thread(thread);
1341 return;
1342 }
1343}
1344
1345/**
1346 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1347 * @proc: proc to decrement
1348 *
1349 * A binder_proc needs to be kept alive while being used to create or
1350 * handle a transaction. proc->tmp_ref is incremented when
1351 * creating a new transaction or the binder_proc is currently in-use
1352 * by threads that are being released. When done with the binder_proc,
1353 * this function is called to decrement the counter and free the
1354 * proc if appropriate (proc has been released, all threads have
1355 * been released and not currenly in-use to process a transaction).
1356 */
1357static void binder_proc_dec_tmpref(struct binder_proc *proc)
1358{
1359 proc->tmp_ref--;
1360 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1361 !proc->tmp_ref) {
1362 binder_free_proc(proc);
1363 return;
1364 }
1365}
1366
1367/**
1368 * binder_get_txn_from() - safely extract the "from" thread in transaction
1369 * @t: binder transaction for t->from
1370 *
1371 * Atomically return the "from" thread and increment the tmp_ref
1372 * count for the thread to ensure it stays alive until
1373 * binder_thread_dec_tmpref() is called.
1374 *
1375 * Return: the value of t->from
1376 */
1377static struct binder_thread *binder_get_txn_from(
1378 struct binder_transaction *t)
1379{
1380 struct binder_thread *from;
1381
1382 spin_lock(&t->lock);
1383 from = t->from;
1384 if (from)
1385 atomic_inc(&from->tmp_ref);
1386 spin_unlock(&t->lock);
1387 return from;
1388}
1389
16273538
TK
1390static void binder_free_transaction(struct binder_transaction *t)
1391{
355b0502
GKH
1392 if (t->buffer)
1393 t->buffer->transaction = NULL;
1394 kfree(t);
1395 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1396}
1397
1398static void binder_send_failed_reply(struct binder_transaction *t,
1399 uint32_t error_code)
1400{
1401 struct binder_thread *target_thread;
d4ec15e1 1402 struct binder_transaction *next;
10f62861 1403
355b0502
GKH
1404 BUG_ON(t->flags & TF_ONE_WAY);
1405 while (1) {
e482ec39 1406 target_thread = binder_get_txn_from(t);
355b0502 1407 if (target_thread) {
3a822b33
TK
1408 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1409 "send failed reply for transaction %d to %d:%d\n",
1410 t->debug_id,
1411 target_thread->proc->pid,
1412 target_thread->pid);
1413
1414 binder_pop_transaction(target_thread, t);
1415 if (target_thread->reply_error.cmd == BR_OK) {
1416 target_thread->reply_error.cmd = error_code;
1417 list_add_tail(
1418 &target_thread->reply_error.work.entry,
1419 &target_thread->todo);
355b0502
GKH
1420 wake_up_interruptible(&target_thread->wait);
1421 } else {
3a822b33
TK
1422 WARN(1, "Unexpected reply error: %u\n",
1423 target_thread->reply_error.cmd);
355b0502 1424 }
e482ec39 1425 binder_thread_dec_tmpref(target_thread);
3a822b33 1426 binder_free_transaction(t);
355b0502 1427 return;
d4ec15e1
LT
1428 }
1429 next = t->from_parent;
1430
1431 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1432 "send failed reply for transaction %d, target dead\n",
1433 t->debug_id);
1434
16273538 1435 binder_free_transaction(t);
d4ec15e1 1436 if (next == NULL) {
355b0502 1437 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
1438 "reply failed, no target thread at root\n");
1439 return;
355b0502 1440 }
d4ec15e1
LT
1441 t = next;
1442 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1443 "reply failed, no target thread -- retry %d\n",
1444 t->debug_id);
355b0502
GKH
1445 }
1446}
1447
ce0c6598
MC
1448/**
1449 * binder_validate_object() - checks for a valid metadata object in a buffer.
1450 * @buffer: binder_buffer that we're parsing.
1451 * @offset: offset in the buffer at which to validate an object.
1452 *
1453 * Return: If there's a valid metadata object at @offset in @buffer, the
1454 * size of that object. Otherwise, it returns zero.
1455 */
1456static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1457{
1458 /* Check if we can read a header first */
1459 struct binder_object_header *hdr;
1460 size_t object_size = 0;
1461
1462 if (offset > buffer->data_size - sizeof(*hdr) ||
1463 buffer->data_size < sizeof(*hdr) ||
1464 !IS_ALIGNED(offset, sizeof(u32)))
1465 return 0;
1466
1467 /* Ok, now see if we can read a complete object. */
1468 hdr = (struct binder_object_header *)(buffer->data + offset);
1469 switch (hdr->type) {
1470 case BINDER_TYPE_BINDER:
1471 case BINDER_TYPE_WEAK_BINDER:
1472 case BINDER_TYPE_HANDLE:
1473 case BINDER_TYPE_WEAK_HANDLE:
1474 object_size = sizeof(struct flat_binder_object);
1475 break;
1476 case BINDER_TYPE_FD:
1477 object_size = sizeof(struct binder_fd_object);
1478 break;
dd9bc4f9
MC
1479 case BINDER_TYPE_PTR:
1480 object_size = sizeof(struct binder_buffer_object);
1481 break;
e124de38
MC
1482 case BINDER_TYPE_FDA:
1483 object_size = sizeof(struct binder_fd_array_object);
1484 break;
ce0c6598
MC
1485 default:
1486 return 0;
1487 }
1488 if (offset <= buffer->data_size - object_size &&
1489 buffer->data_size >= object_size)
1490 return object_size;
1491 else
1492 return 0;
1493}
1494
dd9bc4f9
MC
1495/**
1496 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1497 * @b: binder_buffer containing the object
1498 * @index: index in offset array at which the binder_buffer_object is
1499 * located
1500 * @start: points to the start of the offset array
1501 * @num_valid: the number of valid offsets in the offset array
1502 *
1503 * Return: If @index is within the valid range of the offset array
1504 * described by @start and @num_valid, and if there's a valid
1505 * binder_buffer_object at the offset found in index @index
1506 * of the offset array, that object is returned. Otherwise,
1507 * %NULL is returned.
1508 * Note that the offset found in index @index itself is not
1509 * verified; this function assumes that @num_valid elements
1510 * from @start were previously verified to have valid offsets.
1511 */
1512static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1513 binder_size_t index,
1514 binder_size_t *start,
1515 binder_size_t num_valid)
1516{
1517 struct binder_buffer_object *buffer_obj;
1518 binder_size_t *offp;
1519
1520 if (index >= num_valid)
1521 return NULL;
1522
1523 offp = start + index;
1524 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1525 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1526 return NULL;
1527
1528 return buffer_obj;
1529}
1530
1531/**
1532 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1533 * @b: transaction buffer
1534 * @objects_start start of objects buffer
1535 * @buffer: binder_buffer_object in which to fix up
1536 * @offset: start offset in @buffer to fix up
1537 * @last_obj: last binder_buffer_object that we fixed up in
1538 * @last_min_offset: minimum fixup offset in @last_obj
1539 *
1540 * Return: %true if a fixup in buffer @buffer at offset @offset is
1541 * allowed.
1542 *
1543 * For safety reasons, we only allow fixups inside a buffer to happen
1544 * at increasing offsets; additionally, we only allow fixup on the last
1545 * buffer object that was verified, or one of its parents.
1546 *
1547 * Example of what is allowed:
1548 *
1549 * A
1550 * B (parent = A, offset = 0)
1551 * C (parent = A, offset = 16)
1552 * D (parent = C, offset = 0)
1553 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1554 *
1555 * Examples of what is not allowed:
1556 *
1557 * Decreasing offsets within the same parent:
1558 * A
1559 * C (parent = A, offset = 16)
1560 * B (parent = A, offset = 0) // decreasing offset within A
1561 *
1562 * Referring to a parent that wasn't the last object or any of its parents:
1563 * A
1564 * B (parent = A, offset = 0)
1565 * C (parent = A, offset = 0)
1566 * C (parent = A, offset = 16)
1567 * D (parent = B, offset = 0) // B is not A or any of A's parents
1568 */
1569static bool binder_validate_fixup(struct binder_buffer *b,
1570 binder_size_t *objects_start,
1571 struct binder_buffer_object *buffer,
1572 binder_size_t fixup_offset,
1573 struct binder_buffer_object *last_obj,
1574 binder_size_t last_min_offset)
1575{
1576 if (!last_obj) {
1577 /* Nothing to fix up in */
1578 return false;
1579 }
1580
1581 while (last_obj != buffer) {
1582 /*
1583 * Safe to retrieve the parent of last_obj, since it
1584 * was already previously verified by the driver.
1585 */
1586 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1587 return false;
1588 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1589 last_obj = (struct binder_buffer_object *)
1590 (b->data + *(objects_start + last_obj->parent));
1591 }
1592 return (fixup_offset >= last_min_offset);
1593}
1594
355b0502
GKH
1595static void binder_transaction_buffer_release(struct binder_proc *proc,
1596 struct binder_buffer *buffer,
da49889d 1597 binder_size_t *failed_at)
355b0502 1598{
dd9bc4f9 1599 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
1600 int debug_id = buffer->debug_id;
1601
1602 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 1603 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
1604 proc->pid, buffer->debug_id,
1605 buffer->data_size, buffer->offsets_size, failed_at);
1606
1607 if (buffer->target_node)
1608 binder_dec_node(buffer->target_node, 1, 0);
1609
dd9bc4f9
MC
1610 off_start = (binder_size_t *)(buffer->data +
1611 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
1612 if (failed_at)
1613 off_end = failed_at;
1614 else
dd9bc4f9
MC
1615 off_end = (void *)off_start + buffer->offsets_size;
1616 for (offp = off_start; offp < off_end; offp++) {
ce0c6598
MC
1617 struct binder_object_header *hdr;
1618 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1619
ce0c6598
MC
1620 if (object_size == 0) {
1621 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1622 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1623 continue;
1624 }
ce0c6598
MC
1625 hdr = (struct binder_object_header *)(buffer->data + *offp);
1626 switch (hdr->type) {
355b0502
GKH
1627 case BINDER_TYPE_BINDER:
1628 case BINDER_TYPE_WEAK_BINDER: {
ce0c6598
MC
1629 struct flat_binder_object *fp;
1630 struct binder_node *node;
10f62861 1631
ce0c6598
MC
1632 fp = to_flat_binder_object(hdr);
1633 node = binder_get_node(proc, fp->binder);
355b0502 1634 if (node == NULL) {
da49889d
AH
1635 pr_err("transaction release %d bad node %016llx\n",
1636 debug_id, (u64)fp->binder);
355b0502
GKH
1637 break;
1638 }
1639 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1640 " node %d u%016llx\n",
1641 node->debug_id, (u64)node->ptr);
ce0c6598
MC
1642 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1643 0);
96dd75d9 1644 binder_put_node(node);
355b0502
GKH
1645 } break;
1646 case BINDER_TYPE_HANDLE:
1647 case BINDER_TYPE_WEAK_HANDLE: {
ce0c6598 1648 struct flat_binder_object *fp;
f7d87412
TK
1649 struct binder_ref_data rdata;
1650 int ret;
10f62861 1651
ce0c6598 1652 fp = to_flat_binder_object(hdr);
f7d87412
TK
1653 ret = binder_dec_ref_for_handle(proc, fp->handle,
1654 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1655
1656 if (ret) {
1657 pr_err("transaction release %d bad handle %d, ret = %d\n",
1658 debug_id, fp->handle, ret);
355b0502
GKH
1659 break;
1660 }
1661 binder_debug(BINDER_DEBUG_TRANSACTION,
f7d87412
TK
1662 " ref %d desc %d\n",
1663 rdata.debug_id, rdata.desc);
355b0502
GKH
1664 } break;
1665
ce0c6598
MC
1666 case BINDER_TYPE_FD: {
1667 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1668
355b0502 1669 binder_debug(BINDER_DEBUG_TRANSACTION,
ce0c6598 1670 " fd %d\n", fp->fd);
355b0502 1671 if (failed_at)
ce0c6598
MC
1672 task_close_fd(proc, fp->fd);
1673 } break;
dd9bc4f9
MC
1674 case BINDER_TYPE_PTR:
1675 /*
1676 * Nothing to do here, this will get cleaned up when the
1677 * transaction buffer gets freed
1678 */
355b0502 1679 break;
e124de38
MC
1680 case BINDER_TYPE_FDA: {
1681 struct binder_fd_array_object *fda;
1682 struct binder_buffer_object *parent;
1683 uintptr_t parent_buffer;
1684 u32 *fd_array;
1685 size_t fd_index;
1686 binder_size_t fd_buf_size;
1687
1688 fda = to_binder_fd_array_object(hdr);
1689 parent = binder_validate_ptr(buffer, fda->parent,
1690 off_start,
1691 offp - off_start);
1692 if (!parent) {
1693 pr_err("transaction release %d bad parent offset",
1694 debug_id);
1695 continue;
1696 }
1697 /*
1698 * Since the parent was already fixed up, convert it
1699 * back to kernel address space to access it
1700 */
1701 parent_buffer = parent->buffer -
467545d8
TK
1702 binder_alloc_get_user_buffer_offset(
1703 &proc->alloc);
e124de38
MC
1704
1705 fd_buf_size = sizeof(u32) * fda->num_fds;
1706 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1707 pr_err("transaction release %d invalid number of fds (%lld)\n",
1708 debug_id, (u64)fda->num_fds);
1709 continue;
1710 }
1711 if (fd_buf_size > parent->length ||
1712 fda->parent_offset > parent->length - fd_buf_size) {
1713 /* No space for all file descriptors here. */
1714 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1715 debug_id, (u64)fda->num_fds);
1716 continue;
1717 }
1718 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1719 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1720 task_close_fd(proc, fd_array[fd_index]);
1721 } break;
355b0502 1722 default:
64dcfe6b 1723 pr_err("transaction release %d bad object type %x\n",
ce0c6598 1724 debug_id, hdr->type);
355b0502
GKH
1725 break;
1726 }
1727 }
1728}
1729
bfd49fea
MC
1730static int binder_translate_binder(struct flat_binder_object *fp,
1731 struct binder_transaction *t,
1732 struct binder_thread *thread)
1733{
1734 struct binder_node *node;
bfd49fea
MC
1735 struct binder_proc *proc = thread->proc;
1736 struct binder_proc *target_proc = t->to_proc;
f7d87412 1737 struct binder_ref_data rdata;
96dd75d9 1738 int ret = 0;
bfd49fea
MC
1739
1740 node = binder_get_node(proc, fp->binder);
1741 if (!node) {
1742 node = binder_new_node(proc, fp->binder, fp->cookie);
1743 if (!node)
1744 return -ENOMEM;
1745
1746 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1747 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1748 }
1749 if (fp->cookie != node->cookie) {
1750 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
1751 proc->pid, thread->pid, (u64)fp->binder,
1752 node->debug_id, (u64)fp->cookie,
1753 (u64)node->cookie);
96dd75d9
TK
1754 ret = -EINVAL;
1755 goto done;
1756 }
1757 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1758 ret = -EPERM;
1759 goto done;
bfd49fea 1760 }
bfd49fea 1761
f7d87412
TK
1762 ret = binder_inc_ref_for_node(target_proc, node,
1763 fp->hdr.type == BINDER_TYPE_BINDER,
1764 &thread->todo, &rdata);
1765 if (ret)
96dd75d9 1766 goto done;
bfd49fea
MC
1767
1768 if (fp->hdr.type == BINDER_TYPE_BINDER)
1769 fp->hdr.type = BINDER_TYPE_HANDLE;
1770 else
1771 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
1772 fp->binder = 0;
f7d87412 1773 fp->handle = rdata.desc;
bfd49fea 1774 fp->cookie = 0;
bfd49fea 1775
f7d87412 1776 trace_binder_transaction_node_to_ref(t, node, &rdata);
bfd49fea
MC
1777 binder_debug(BINDER_DEBUG_TRANSACTION,
1778 " node %d u%016llx -> ref %d desc %d\n",
1779 node->debug_id, (u64)node->ptr,
f7d87412 1780 rdata.debug_id, rdata.desc);
96dd75d9
TK
1781done:
1782 binder_put_node(node);
1783 return ret;
bfd49fea
MC
1784}
1785
1786static int binder_translate_handle(struct flat_binder_object *fp,
1787 struct binder_transaction *t,
1788 struct binder_thread *thread)
1789{
bfd49fea
MC
1790 struct binder_proc *proc = thread->proc;
1791 struct binder_proc *target_proc = t->to_proc;
f7d87412
TK
1792 struct binder_node *node;
1793 struct binder_ref_data src_rdata;
96dd75d9 1794 int ret = 0;
bfd49fea 1795
f7d87412
TK
1796 node = binder_get_node_from_ref(proc, fp->handle,
1797 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
1798 if (!node) {
bfd49fea
MC
1799 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
1800 proc->pid, thread->pid, fp->handle);
1801 return -EINVAL;
1802 }
96dd75d9
TK
1803 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
1804 ret = -EPERM;
1805 goto done;
1806 }
bfd49fea 1807
f7d87412 1808 if (node->proc == target_proc) {
bfd49fea
MC
1809 if (fp->hdr.type == BINDER_TYPE_HANDLE)
1810 fp->hdr.type = BINDER_TYPE_BINDER;
1811 else
1812 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
f7d87412
TK
1813 fp->binder = node->ptr;
1814 fp->cookie = node->cookie;
1815 binder_inc_node(node,
1816 fp->hdr.type == BINDER_TYPE_BINDER,
bfd49fea 1817 0, NULL);
f7d87412 1818 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
bfd49fea
MC
1819 binder_debug(BINDER_DEBUG_TRANSACTION,
1820 " ref %d desc %d -> node %d u%016llx\n",
f7d87412
TK
1821 src_rdata.debug_id, src_rdata.desc, node->debug_id,
1822 (u64)node->ptr);
bfd49fea 1823 } else {
f7d87412
TK
1824 int ret;
1825 struct binder_ref_data dest_rdata;
bfd49fea 1826
f7d87412
TK
1827 ret = binder_inc_ref_for_node(target_proc, node,
1828 fp->hdr.type == BINDER_TYPE_HANDLE,
1829 NULL, &dest_rdata);
1830 if (ret)
96dd75d9 1831 goto done;
bfd49fea
MC
1832
1833 fp->binder = 0;
f7d87412 1834 fp->handle = dest_rdata.desc;
bfd49fea 1835 fp->cookie = 0;
f7d87412
TK
1836 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
1837 &dest_rdata);
bfd49fea
MC
1838 binder_debug(BINDER_DEBUG_TRANSACTION,
1839 " ref %d desc %d -> ref %d desc %d (node %d)\n",
f7d87412
TK
1840 src_rdata.debug_id, src_rdata.desc,
1841 dest_rdata.debug_id, dest_rdata.desc,
1842 node->debug_id);
bfd49fea 1843 }
96dd75d9
TK
1844done:
1845 binder_put_node(node);
1846 return ret;
bfd49fea
MC
1847}
1848
1849static int binder_translate_fd(int fd,
1850 struct binder_transaction *t,
1851 struct binder_thread *thread,
1852 struct binder_transaction *in_reply_to)
1853{
1854 struct binder_proc *proc = thread->proc;
1855 struct binder_proc *target_proc = t->to_proc;
1856 int target_fd;
1857 struct file *file;
1858 int ret;
1859 bool target_allows_fd;
1860
1861 if (in_reply_to)
1862 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
1863 else
1864 target_allows_fd = t->buffer->target_node->accept_fds;
1865 if (!target_allows_fd) {
1866 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
1867 proc->pid, thread->pid,
1868 in_reply_to ? "reply" : "transaction",
1869 fd);
1870 ret = -EPERM;
1871 goto err_fd_not_accepted;
1872 }
1873
1874 file = fget(fd);
1875 if (!file) {
1876 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
1877 proc->pid, thread->pid, fd);
1878 ret = -EBADF;
1879 goto err_fget;
1880 }
1881 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
1882 if (ret < 0) {
1883 ret = -EPERM;
1884 goto err_security;
1885 }
1886
1887 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
1888 if (target_fd < 0) {
1889 ret = -ENOMEM;
1890 goto err_get_unused_fd;
1891 }
1892 task_fd_install(target_proc, target_fd, file);
1893 trace_binder_transaction_fd(t, fd, target_fd);
1894 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
1895 fd, target_fd);
1896
1897 return target_fd;
1898
1899err_get_unused_fd:
1900err_security:
1901 fput(file);
1902err_fget:
1903err_fd_not_accepted:
1904 return ret;
1905}
1906
e124de38
MC
1907static int binder_translate_fd_array(struct binder_fd_array_object *fda,
1908 struct binder_buffer_object *parent,
1909 struct binder_transaction *t,
1910 struct binder_thread *thread,
1911 struct binder_transaction *in_reply_to)
1912{
1913 binder_size_t fdi, fd_buf_size, num_installed_fds;
1914 int target_fd;
1915 uintptr_t parent_buffer;
1916 u32 *fd_array;
1917 struct binder_proc *proc = thread->proc;
1918 struct binder_proc *target_proc = t->to_proc;
1919
1920 fd_buf_size = sizeof(u32) * fda->num_fds;
1921 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1922 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
1923 proc->pid, thread->pid, (u64)fda->num_fds);
1924 return -EINVAL;
1925 }
1926 if (fd_buf_size > parent->length ||
1927 fda->parent_offset > parent->length - fd_buf_size) {
1928 /* No space for all file descriptors here. */
1929 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
1930 proc->pid, thread->pid, (u64)fda->num_fds);
1931 return -EINVAL;
1932 }
1933 /*
1934 * Since the parent was already fixed up, convert it
1935 * back to the kernel address space to access it
1936 */
467545d8
TK
1937 parent_buffer = parent->buffer -
1938 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
e124de38
MC
1939 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1940 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
1941 binder_user_error("%d:%d parent offset not aligned correctly.\n",
1942 proc->pid, thread->pid);
1943 return -EINVAL;
1944 }
1945 for (fdi = 0; fdi < fda->num_fds; fdi++) {
1946 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
1947 in_reply_to);
1948 if (target_fd < 0)
1949 goto err_translate_fd_failed;
1950 fd_array[fdi] = target_fd;
1951 }
1952 return 0;
1953
1954err_translate_fd_failed:
1955 /*
1956 * Failed to allocate fd or security error, free fds
1957 * installed so far.
1958 */
1959 num_installed_fds = fdi;
1960 for (fdi = 0; fdi < num_installed_fds; fdi++)
1961 task_close_fd(target_proc, fd_array[fdi]);
1962 return target_fd;
1963}
1964
dd9bc4f9
MC
1965static int binder_fixup_parent(struct binder_transaction *t,
1966 struct binder_thread *thread,
1967 struct binder_buffer_object *bp,
1968 binder_size_t *off_start,
1969 binder_size_t num_valid,
1970 struct binder_buffer_object *last_fixup_obj,
1971 binder_size_t last_fixup_min_off)
1972{
1973 struct binder_buffer_object *parent;
1974 u8 *parent_buffer;
1975 struct binder_buffer *b = t->buffer;
1976 struct binder_proc *proc = thread->proc;
1977 struct binder_proc *target_proc = t->to_proc;
1978
1979 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
1980 return 0;
1981
1982 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
1983 if (!parent) {
1984 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
1985 proc->pid, thread->pid);
1986 return -EINVAL;
1987 }
1988
1989 if (!binder_validate_fixup(b, off_start,
1990 parent, bp->parent_offset,
1991 last_fixup_obj,
1992 last_fixup_min_off)) {
1993 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
1994 proc->pid, thread->pid);
1995 return -EINVAL;
1996 }
1997
1998 if (parent->length < sizeof(binder_uintptr_t) ||
1999 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2000 /* No space for a pointer here! */
2001 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2002 proc->pid, thread->pid);
2003 return -EINVAL;
2004 }
2005 parent_buffer = (u8 *)(parent->buffer -
467545d8
TK
2006 binder_alloc_get_user_buffer_offset(
2007 &target_proc->alloc));
dd9bc4f9
MC
2008 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2009
2010 return 0;
2011}
2012
355b0502
GKH
2013static void binder_transaction(struct binder_proc *proc,
2014 struct binder_thread *thread,
843a2578
MC
2015 struct binder_transaction_data *tr, int reply,
2016 binder_size_t extra_buffers_size)
355b0502 2017{
bfd49fea 2018 int ret;
355b0502
GKH
2019 struct binder_transaction *t;
2020 struct binder_work *tcomplete;
dd9bc4f9 2021 binder_size_t *offp, *off_end, *off_start;
52354e59 2022 binder_size_t off_min;
dd9bc4f9 2023 u8 *sg_bufp, *sg_buf_end;
e482ec39 2024 struct binder_proc *target_proc = NULL;
355b0502
GKH
2025 struct binder_thread *target_thread = NULL;
2026 struct binder_node *target_node = NULL;
2027 struct list_head *target_list;
2028 wait_queue_head_t *target_wait;
2029 struct binder_transaction *in_reply_to = NULL;
2030 struct binder_transaction_log_entry *e;
0a0fdc1f
TK
2031 uint32_t return_error = 0;
2032 uint32_t return_error_param = 0;
2033 uint32_t return_error_line = 0;
dd9bc4f9
MC
2034 struct binder_buffer_object *last_fixup_obj = NULL;
2035 binder_size_t last_fixup_min_off = 0;
803df563 2036 struct binder_context *context = proc->context;
0f32aeb3 2037 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502 2038
ec49bb00 2039 e = binder_transaction_log_add(&binder_transaction_log);
0f32aeb3 2040 e->debug_id = t_debug_id;
355b0502
GKH
2041 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2042 e->from_proc = proc->pid;
2043 e->from_thread = thread->pid;
2044 e->target_handle = tr->target.handle;
2045 e->data_size = tr->data_size;
2046 e->offsets_size = tr->offsets_size;
8b980bee 2047 e->context_name = proc->context->name;
355b0502
GKH
2048
2049 if (reply) {
2050 in_reply_to = thread->transaction_stack;
2051 if (in_reply_to == NULL) {
56b468fc 2052 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2053 proc->pid, thread->pid);
2054 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2055 return_error_param = -EPROTO;
2056 return_error_line = __LINE__;
355b0502
GKH
2057 goto err_empty_call_stack;
2058 }
2059 binder_set_nice(in_reply_to->saved_priority);
2060 if (in_reply_to->to_thread != thread) {
e482ec39 2061 spin_lock(&in_reply_to->lock);
56b468fc 2062 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2063 proc->pid, thread->pid, in_reply_to->debug_id,
2064 in_reply_to->to_proc ?
2065 in_reply_to->to_proc->pid : 0,
2066 in_reply_to->to_thread ?
2067 in_reply_to->to_thread->pid : 0);
e482ec39 2068 spin_unlock(&in_reply_to->lock);
355b0502 2069 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2070 return_error_param = -EPROTO;
2071 return_error_line = __LINE__;
355b0502
GKH
2072 in_reply_to = NULL;
2073 goto err_bad_call_stack;
2074 }
2075 thread->transaction_stack = in_reply_to->to_parent;
e482ec39 2076 target_thread = binder_get_txn_from(in_reply_to);
355b0502
GKH
2077 if (target_thread == NULL) {
2078 return_error = BR_DEAD_REPLY;
0a0fdc1f 2079 return_error_line = __LINE__;
355b0502
GKH
2080 goto err_dead_binder;
2081 }
2082 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2083 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2084 proc->pid, thread->pid,
2085 target_thread->transaction_stack ?
2086 target_thread->transaction_stack->debug_id : 0,
2087 in_reply_to->debug_id);
2088 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2089 return_error_param = -EPROTO;
2090 return_error_line = __LINE__;
355b0502
GKH
2091 in_reply_to = NULL;
2092 target_thread = NULL;
2093 goto err_dead_binder;
2094 }
2095 target_proc = target_thread->proc;
e482ec39 2096 target_proc->tmp_ref++;
355b0502
GKH
2097 } else {
2098 if (tr->target.handle) {
2099 struct binder_ref *ref;
10f62861 2100
f80cbc72
TK
2101 /*
2102 * There must already be a strong ref
2103 * on this node. If so, do a strong
2104 * increment on the node to ensure it
2105 * stays alive until the transaction is
2106 * done.
2107 */
b81f4c5f 2108 ref = binder_get_ref(proc, tr->target.handle, true);
f80cbc72
TK
2109 if (ref) {
2110 binder_inc_node(ref->node, 1, 0, NULL);
2111 target_node = ref->node;
2112 }
2113 if (target_node == NULL) {
56b468fc 2114 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
2115 proc->pid, thread->pid);
2116 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2117 return_error_param = -EINVAL;
2118 return_error_line = __LINE__;
355b0502
GKH
2119 goto err_invalid_target_handle;
2120 }
355b0502 2121 } else {
3490fdcb 2122 mutex_lock(&context->context_mgr_node_lock);
803df563 2123 target_node = context->binder_context_mgr_node;
355b0502
GKH
2124 if (target_node == NULL) {
2125 return_error = BR_DEAD_REPLY;
3490fdcb 2126 mutex_unlock(&context->context_mgr_node_lock);
0a0fdc1f 2127 return_error_line = __LINE__;
355b0502
GKH
2128 goto err_no_context_mgr_node;
2129 }
f80cbc72 2130 binder_inc_node(target_node, 1, 0, NULL);
3490fdcb 2131 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
2132 }
2133 e->to_node = target_node->debug_id;
2134 target_proc = target_node->proc;
2135 if (target_proc == NULL) {
2136 return_error = BR_DEAD_REPLY;
0a0fdc1f 2137 return_error_line = __LINE__;
355b0502
GKH
2138 goto err_dead_binder;
2139 }
e482ec39 2140 target_proc->tmp_ref++;
79af7307
SS
2141 if (security_binder_transaction(proc->tsk,
2142 target_proc->tsk) < 0) {
2143 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2144 return_error_param = -EPERM;
2145 return_error_line = __LINE__;
79af7307
SS
2146 goto err_invalid_target_handle;
2147 }
355b0502
GKH
2148 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2149 struct binder_transaction *tmp;
10f62861 2150
355b0502
GKH
2151 tmp = thread->transaction_stack;
2152 if (tmp->to_thread != thread) {
e482ec39 2153 spin_lock(&tmp->lock);
56b468fc 2154 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2155 proc->pid, thread->pid, tmp->debug_id,
2156 tmp->to_proc ? tmp->to_proc->pid : 0,
2157 tmp->to_thread ?
2158 tmp->to_thread->pid : 0);
e482ec39 2159 spin_unlock(&tmp->lock);
355b0502 2160 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2161 return_error_param = -EPROTO;
2162 return_error_line = __LINE__;
355b0502
GKH
2163 goto err_bad_call_stack;
2164 }
2165 while (tmp) {
e482ec39
TK
2166 struct binder_thread *from;
2167
2168 spin_lock(&tmp->lock);
2169 from = tmp->from;
2170 if (from && from->proc == target_proc) {
2171 atomic_inc(&from->tmp_ref);
2172 target_thread = from;
2173 spin_unlock(&tmp->lock);
2174 break;
2175 }
2176 spin_unlock(&tmp->lock);
355b0502
GKH
2177 tmp = tmp->from_parent;
2178 }
2179 }
2180 }
2181 if (target_thread) {
2182 e->to_thread = target_thread->pid;
2183 target_list = &target_thread->todo;
2184 target_wait = &target_thread->wait;
2185 } else {
2186 target_list = &target_proc->todo;
2187 target_wait = &target_proc->wait;
2188 }
2189 e->to_proc = target_proc->pid;
2190
2191 /* TODO: reuse incoming transaction for reply */
2192 t = kzalloc(sizeof(*t), GFP_KERNEL);
2193 if (t == NULL) {
2194 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2195 return_error_param = -ENOMEM;
2196 return_error_line = __LINE__;
355b0502
GKH
2197 goto err_alloc_t_failed;
2198 }
2199 binder_stats_created(BINDER_STAT_TRANSACTION);
e482ec39 2200 spin_lock_init(&t->lock);
355b0502
GKH
2201
2202 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2203 if (tcomplete == NULL) {
2204 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2205 return_error_param = -ENOMEM;
2206 return_error_line = __LINE__;
355b0502
GKH
2207 goto err_alloc_tcomplete_failed;
2208 }
2209 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2210
0f32aeb3 2211 t->debug_id = t_debug_id;
355b0502
GKH
2212
2213 if (reply)
2214 binder_debug(BINDER_DEBUG_TRANSACTION,
843a2578 2215 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2216 proc->pid, thread->pid, t->debug_id,
2217 target_proc->pid, target_thread->pid,
da49889d
AH
2218 (u64)tr->data.ptr.buffer,
2219 (u64)tr->data.ptr.offsets,
843a2578
MC
2220 (u64)tr->data_size, (u64)tr->offsets_size,
2221 (u64)extra_buffers_size);
355b0502
GKH
2222 else
2223 binder_debug(BINDER_DEBUG_TRANSACTION,
843a2578 2224 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2225 proc->pid, thread->pid, t->debug_id,
2226 target_proc->pid, target_node->debug_id,
da49889d
AH
2227 (u64)tr->data.ptr.buffer,
2228 (u64)tr->data.ptr.offsets,
843a2578
MC
2229 (u64)tr->data_size, (u64)tr->offsets_size,
2230 (u64)extra_buffers_size);
355b0502
GKH
2231
2232 if (!reply && !(tr->flags & TF_ONE_WAY))
2233 t->from = thread;
2234 else
2235 t->from = NULL;
57bab7cb 2236 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
2237 t->to_proc = target_proc;
2238 t->to_thread = target_thread;
2239 t->code = tr->code;
2240 t->flags = tr->flags;
2241 t->priority = task_nice(current);
975a1ac9
AH
2242
2243 trace_binder_transaction(reply, t, target_node);
2244
467545d8 2245 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
843a2578
MC
2246 tr->offsets_size, extra_buffers_size,
2247 !reply && (t->flags & TF_ONE_WAY));
0a0fdc1f
TK
2248 if (IS_ERR(t->buffer)) {
2249 /*
2250 * -ESRCH indicates VMA cleared. The target is dying.
2251 */
2252 return_error_param = PTR_ERR(t->buffer);
2253 return_error = return_error_param == -ESRCH ?
2254 BR_DEAD_REPLY : BR_FAILED_REPLY;
2255 return_error_line = __LINE__;
2256 t->buffer = NULL;
355b0502
GKH
2257 goto err_binder_alloc_buf_failed;
2258 }
2259 t->buffer->allow_user_free = 0;
2260 t->buffer->debug_id = t->debug_id;
2261 t->buffer->transaction = t;
2262 t->buffer->target_node = target_node;
975a1ac9 2263 trace_binder_transaction_alloc_buf(t->buffer);
dd9bc4f9
MC
2264 off_start = (binder_size_t *)(t->buffer->data +
2265 ALIGN(tr->data_size, sizeof(void *)));
2266 offp = off_start;
355b0502 2267
da49889d
AH
2268 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2269 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
2270 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2271 proc->pid, thread->pid);
355b0502 2272 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2273 return_error_param = -EFAULT;
2274 return_error_line = __LINE__;
355b0502
GKH
2275 goto err_copy_data_failed;
2276 }
da49889d
AH
2277 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2278 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
2279 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2280 proc->pid, thread->pid);
355b0502 2281 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2282 return_error_param = -EFAULT;
2283 return_error_line = __LINE__;
355b0502
GKH
2284 goto err_copy_data_failed;
2285 }
da49889d
AH
2286 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2287 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2288 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 2289 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2290 return_error_param = -EINVAL;
2291 return_error_line = __LINE__;
355b0502
GKH
2292 goto err_bad_offset;
2293 }
dd9bc4f9
MC
2294 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2295 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2296 proc->pid, thread->pid,
df3087d4 2297 (u64)extra_buffers_size);
dd9bc4f9 2298 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2299 return_error_param = -EINVAL;
2300 return_error_line = __LINE__;
dd9bc4f9
MC
2301 goto err_bad_offset;
2302 }
2303 off_end = (void *)off_start + tr->offsets_size;
2304 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2305 sg_buf_end = sg_bufp + extra_buffers_size;
52354e59 2306 off_min = 0;
355b0502 2307 for (; offp < off_end; offp++) {
ce0c6598
MC
2308 struct binder_object_header *hdr;
2309 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 2310
ce0c6598
MC
2311 if (object_size == 0 || *offp < off_min) {
2312 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
52354e59
AH
2313 proc->pid, thread->pid, (u64)*offp,
2314 (u64)off_min,
ce0c6598 2315 (u64)t->buffer->data_size);
355b0502 2316 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2317 return_error_param = -EINVAL;
2318 return_error_line = __LINE__;
355b0502
GKH
2319 goto err_bad_offset;
2320 }
ce0c6598
MC
2321
2322 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2323 off_min = *offp + object_size;
2324 switch (hdr->type) {
355b0502
GKH
2325 case BINDER_TYPE_BINDER:
2326 case BINDER_TYPE_WEAK_BINDER: {
ce0c6598 2327 struct flat_binder_object *fp;
10f62861 2328
ce0c6598 2329 fp = to_flat_binder_object(hdr);
bfd49fea
MC
2330 ret = binder_translate_binder(fp, t, thread);
2331 if (ret < 0) {
79af7307 2332 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2333 return_error_param = ret;
2334 return_error_line = __LINE__;
bfd49fea 2335 goto err_translate_failed;
79af7307 2336 }
355b0502
GKH
2337 } break;
2338 case BINDER_TYPE_HANDLE:
2339 case BINDER_TYPE_WEAK_HANDLE: {
ce0c6598 2340 struct flat_binder_object *fp;
10f62861 2341
ce0c6598 2342 fp = to_flat_binder_object(hdr);
bfd49fea
MC
2343 ret = binder_translate_handle(fp, t, thread);
2344 if (ret < 0) {
79af7307 2345 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2346 return_error_param = ret;
2347 return_error_line = __LINE__;
bfd49fea 2348 goto err_translate_failed;
355b0502
GKH
2349 }
2350 } break;
2351
2352 case BINDER_TYPE_FD: {
ce0c6598 2353 struct binder_fd_object *fp = to_binder_fd_object(hdr);
bfd49fea
MC
2354 int target_fd = binder_translate_fd(fp->fd, t, thread,
2355 in_reply_to);
355b0502 2356
355b0502 2357 if (target_fd < 0) {
355b0502 2358 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2359 return_error_param = target_fd;
2360 return_error_line = __LINE__;
bfd49fea 2361 goto err_translate_failed;
355b0502 2362 }
ce0c6598
MC
2363 fp->pad_binder = 0;
2364 fp->fd = target_fd;
355b0502 2365 } break;
e124de38
MC
2366 case BINDER_TYPE_FDA: {
2367 struct binder_fd_array_object *fda =
2368 to_binder_fd_array_object(hdr);
2369 struct binder_buffer_object *parent =
2370 binder_validate_ptr(t->buffer, fda->parent,
2371 off_start,
2372 offp - off_start);
2373 if (!parent) {
2374 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2375 proc->pid, thread->pid);
355b0502 2376 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2377 return_error_param = -EINVAL;
2378 return_error_line = __LINE__;
e124de38 2379 goto err_bad_parent;
355b0502 2380 }
e124de38
MC
2381 if (!binder_validate_fixup(t->buffer, off_start,
2382 parent, fda->parent_offset,
2383 last_fixup_obj,
2384 last_fixup_min_off)) {
2385 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2386 proc->pid, thread->pid);
79af7307 2387 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2388 return_error_param = -EINVAL;
2389 return_error_line = __LINE__;
e124de38 2390 goto err_bad_parent;
79af7307 2391 }
e124de38
MC
2392 ret = binder_translate_fd_array(fda, parent, t, thread,
2393 in_reply_to);
2394 if (ret < 0) {
355b0502 2395 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2396 return_error_param = ret;
2397 return_error_line = __LINE__;
e124de38 2398 goto err_translate_failed;
355b0502 2399 }
e124de38
MC
2400 last_fixup_obj = parent;
2401 last_fixup_min_off =
2402 fda->parent_offset + sizeof(u32) * fda->num_fds;
2403 } break;
dd9bc4f9
MC
2404 case BINDER_TYPE_PTR: {
2405 struct binder_buffer_object *bp =
2406 to_binder_buffer_object(hdr);
2407 size_t buf_left = sg_buf_end - sg_bufp;
2408
2409 if (bp->length > buf_left) {
2410 binder_user_error("%d:%d got transaction with too large buffer\n",
2411 proc->pid, thread->pid);
2412 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2413 return_error_param = -EINVAL;
2414 return_error_line = __LINE__;
dd9bc4f9
MC
2415 goto err_bad_offset;
2416 }
2417 if (copy_from_user(sg_bufp,
2418 (const void __user *)(uintptr_t)
2419 bp->buffer, bp->length)) {
2420 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2421 proc->pid, thread->pid);
0a0fdc1f 2422 return_error_param = -EFAULT;
dd9bc4f9 2423 return_error = BR_FAILED_REPLY;
0a0fdc1f 2424 return_error_line = __LINE__;
dd9bc4f9
MC
2425 goto err_copy_data_failed;
2426 }
2427 /* Fixup buffer pointer to target proc address space */
2428 bp->buffer = (uintptr_t)sg_bufp +
467545d8
TK
2429 binder_alloc_get_user_buffer_offset(
2430 &target_proc->alloc);
dd9bc4f9
MC
2431 sg_bufp += ALIGN(bp->length, sizeof(u64));
2432
2433 ret = binder_fixup_parent(t, thread, bp, off_start,
2434 offp - off_start,
2435 last_fixup_obj,
2436 last_fixup_min_off);
2437 if (ret < 0) {
2438 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2439 return_error_param = ret;
2440 return_error_line = __LINE__;
dd9bc4f9
MC
2441 goto err_translate_failed;
2442 }
2443 last_fixup_obj = bp;
2444 last_fixup_min_off = 0;
355b0502 2445 } break;
355b0502 2446 default:
64dcfe6b 2447 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
ce0c6598 2448 proc->pid, thread->pid, hdr->type);
355b0502 2449 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2450 return_error_param = -EINVAL;
2451 return_error_line = __LINE__;
355b0502
GKH
2452 goto err_bad_object_type;
2453 }
2454 }
6ea60271
TK
2455 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
2456 list_add_tail(&tcomplete->entry, &thread->todo);
2457
355b0502 2458 if (reply) {
e482ec39
TK
2459 if (target_thread->is_dead)
2460 goto err_dead_proc_or_thread;
355b0502
GKH
2461 BUG_ON(t->buffer->async_transaction != 0);
2462 binder_pop_transaction(target_thread, in_reply_to);
16273538 2463 binder_free_transaction(in_reply_to);
355b0502
GKH
2464 } else if (!(t->flags & TF_ONE_WAY)) {
2465 BUG_ON(t->buffer->async_transaction != 0);
2466 t->need_reply = 1;
2467 t->from_parent = thread->transaction_stack;
2468 thread->transaction_stack = t;
e482ec39
TK
2469 if (target_proc->is_dead ||
2470 (target_thread && target_thread->is_dead)) {
2471 binder_pop_transaction(thread, t);
2472 goto err_dead_proc_or_thread;
2473 }
355b0502
GKH
2474 } else {
2475 BUG_ON(target_node == NULL);
2476 BUG_ON(t->buffer->async_transaction != 1);
2477 if (target_node->has_async_transaction) {
2478 target_list = &target_node->async_todo;
2479 target_wait = NULL;
2480 } else
2481 target_node->has_async_transaction = 1;
e482ec39
TK
2482 if (target_proc->is_dead ||
2483 (target_thread && target_thread->is_dead))
2484 goto err_dead_proc_or_thread;
355b0502
GKH
2485 }
2486 t->work.type = BINDER_WORK_TRANSACTION;
2487 list_add_tail(&t->work.entry, target_list);
0cebb407 2488 if (target_wait) {
6ea60271 2489 if (reply || !(tr->flags & TF_ONE_WAY))
0cebb407
RA
2490 wake_up_interruptible_sync(target_wait);
2491 else
2492 wake_up_interruptible(target_wait);
2493 }
e482ec39
TK
2494 if (target_thread)
2495 binder_thread_dec_tmpref(target_thread);
2496 binder_proc_dec_tmpref(target_proc);
0f32aeb3
TK
2497 /*
2498 * write barrier to synchronize with initialization
2499 * of log entry
2500 */
2501 smp_wmb();
2502 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
2503 return;
2504
e482ec39
TK
2505err_dead_proc_or_thread:
2506 return_error = BR_DEAD_REPLY;
2507 return_error_line = __LINE__;
bfd49fea 2508err_translate_failed:
355b0502
GKH
2509err_bad_object_type:
2510err_bad_offset:
e124de38 2511err_bad_parent:
355b0502 2512err_copy_data_failed:
975a1ac9 2513 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 2514 binder_transaction_buffer_release(target_proc, t->buffer, offp);
f80cbc72 2515 target_node = NULL;
355b0502 2516 t->buffer->transaction = NULL;
467545d8 2517 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
2518err_binder_alloc_buf_failed:
2519 kfree(tcomplete);
2520 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2521err_alloc_tcomplete_failed:
2522 kfree(t);
2523 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2524err_alloc_t_failed:
2525err_bad_call_stack:
2526err_empty_call_stack:
2527err_dead_binder:
2528err_invalid_target_handle:
2529err_no_context_mgr_node:
e482ec39
TK
2530 if (target_thread)
2531 binder_thread_dec_tmpref(target_thread);
2532 if (target_proc)
2533 binder_proc_dec_tmpref(target_proc);
f80cbc72
TK
2534 if (target_node)
2535 binder_dec_node(target_node, 1, 0);
2536
355b0502 2537 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
0a0fdc1f
TK
2538 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2539 proc->pid, thread->pid, return_error, return_error_param,
2540 (u64)tr->data_size, (u64)tr->offsets_size,
2541 return_error_line);
355b0502
GKH
2542
2543 {
2544 struct binder_transaction_log_entry *fe;
10f62861 2545
0a0fdc1f
TK
2546 e->return_error = return_error;
2547 e->return_error_param = return_error_param;
2548 e->return_error_line = return_error_line;
ec49bb00 2549 fe = binder_transaction_log_add(&binder_transaction_log_failed);
355b0502 2550 *fe = *e;
0f32aeb3
TK
2551 /*
2552 * write barrier to synchronize with initialization
2553 * of log entry
2554 */
2555 smp_wmb();
2556 WRITE_ONCE(e->debug_id_done, t_debug_id);
2557 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
2558 }
2559
3a822b33 2560 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 2561 if (in_reply_to) {
3a822b33
TK
2562 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
2563 list_add_tail(&thread->return_error.work.entry,
2564 &thread->todo);
355b0502 2565 binder_send_failed_reply(in_reply_to, return_error);
3a822b33
TK
2566 } else {
2567 thread->return_error.cmd = return_error;
2568 list_add_tail(&thread->return_error.work.entry,
2569 &thread->todo);
2570 }
355b0502
GKH
2571}
2572
fb07ebc3
BP
2573static int binder_thread_write(struct binder_proc *proc,
2574 struct binder_thread *thread,
da49889d
AH
2575 binder_uintptr_t binder_buffer, size_t size,
2576 binder_size_t *consumed)
355b0502
GKH
2577{
2578 uint32_t cmd;
803df563 2579 struct binder_context *context = proc->context;
da49889d 2580 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2581 void __user *ptr = buffer + *consumed;
2582 void __user *end = buffer + size;
2583
3a822b33 2584 while (ptr < end && thread->return_error.cmd == BR_OK) {
f7d87412
TK
2585 int ret;
2586
355b0502
GKH
2587 if (get_user(cmd, (uint32_t __user *)ptr))
2588 return -EFAULT;
2589 ptr += sizeof(uint32_t);
975a1ac9 2590 trace_binder_command(cmd);
ec49bb00 2591 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
f716ecfc
BJS
2592 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2593 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2594 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
2595 }
2596 switch (cmd) {
2597 case BC_INCREFS:
2598 case BC_ACQUIRE:
2599 case BC_RELEASE:
2600 case BC_DECREFS: {
2601 uint32_t target;
355b0502 2602 const char *debug_string;
f7d87412
TK
2603 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2604 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2605 struct binder_ref_data rdata;
355b0502
GKH
2606
2607 if (get_user(target, (uint32_t __user *)ptr))
2608 return -EFAULT;
3490fdcb 2609
355b0502 2610 ptr += sizeof(uint32_t);
f7d87412
TK
2611 ret = -1;
2612 if (increment && !target) {
3490fdcb 2613 struct binder_node *ctx_mgr_node;
3490fdcb
TK
2614 mutex_lock(&context->context_mgr_node_lock);
2615 ctx_mgr_node = context->binder_context_mgr_node;
f7d87412
TK
2616 if (ctx_mgr_node)
2617 ret = binder_inc_ref_for_node(
2618 proc, ctx_mgr_node,
2619 strong, NULL, &rdata);
3490fdcb
TK
2620 mutex_unlock(&context->context_mgr_node_lock);
2621 }
f7d87412
TK
2622 if (ret)
2623 ret = binder_update_ref_for_handle(
2624 proc, target, increment, strong,
2625 &rdata);
2626 if (!ret && rdata.desc != target) {
2627 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2628 proc->pid, thread->pid,
2629 target, rdata.desc);
355b0502
GKH
2630 }
2631 switch (cmd) {
2632 case BC_INCREFS:
2633 debug_string = "IncRefs";
355b0502
GKH
2634 break;
2635 case BC_ACQUIRE:
2636 debug_string = "Acquire";
355b0502
GKH
2637 break;
2638 case BC_RELEASE:
2639 debug_string = "Release";
355b0502
GKH
2640 break;
2641 case BC_DECREFS:
2642 default:
2643 debug_string = "DecRefs";
f7d87412
TK
2644 break;
2645 }
2646 if (ret) {
2647 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2648 proc->pid, thread->pid, debug_string,
2649 strong, target, ret);
355b0502
GKH
2650 break;
2651 }
2652 binder_debug(BINDER_DEBUG_USER_REFS,
f7d87412
TK
2653 "%d:%d %s ref %d desc %d s %d w %d\n",
2654 proc->pid, thread->pid, debug_string,
2655 rdata.debug_id, rdata.desc, rdata.strong,
2656 rdata.weak);
355b0502
GKH
2657 break;
2658 }
2659 case BC_INCREFS_DONE:
2660 case BC_ACQUIRE_DONE: {
da49889d
AH
2661 binder_uintptr_t node_ptr;
2662 binder_uintptr_t cookie;
355b0502
GKH
2663 struct binder_node *node;
2664
da49889d 2665 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2666 return -EFAULT;
da49889d
AH
2667 ptr += sizeof(binder_uintptr_t);
2668 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2669 return -EFAULT;
da49889d 2670 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2671 node = binder_get_node(proc, node_ptr);
2672 if (node == NULL) {
da49889d 2673 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
2674 proc->pid, thread->pid,
2675 cmd == BC_INCREFS_DONE ?
2676 "BC_INCREFS_DONE" :
2677 "BC_ACQUIRE_DONE",
da49889d 2678 (u64)node_ptr);
355b0502
GKH
2679 break;
2680 }
2681 if (cookie != node->cookie) {
da49889d 2682 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
2683 proc->pid, thread->pid,
2684 cmd == BC_INCREFS_DONE ?
2685 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
2686 (u64)node_ptr, node->debug_id,
2687 (u64)cookie, (u64)node->cookie);
96dd75d9 2688 binder_put_node(node);
355b0502
GKH
2689 break;
2690 }
2691 if (cmd == BC_ACQUIRE_DONE) {
2692 if (node->pending_strong_ref == 0) {
56b468fc 2693 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
2694 proc->pid, thread->pid,
2695 node->debug_id);
96dd75d9 2696 binder_put_node(node);
355b0502
GKH
2697 break;
2698 }
2699 node->pending_strong_ref = 0;
2700 } else {
2701 if (node->pending_weak_ref == 0) {
56b468fc 2702 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
2703 proc->pid, thread->pid,
2704 node->debug_id);
96dd75d9 2705 binder_put_node(node);
355b0502
GKH
2706 break;
2707 }
2708 node->pending_weak_ref = 0;
2709 }
2710 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2711 binder_debug(BINDER_DEBUG_USER_REFS,
96dd75d9 2712 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
2713 proc->pid, thread->pid,
2714 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
96dd75d9
TK
2715 node->debug_id, node->local_strong_refs,
2716 node->local_weak_refs, node->tmp_refs);
2717 binder_put_node(node);
355b0502
GKH
2718 break;
2719 }
2720 case BC_ATTEMPT_ACQUIRE:
56b468fc 2721 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
2722 return -EINVAL;
2723 case BC_ACQUIRE_RESULT:
56b468fc 2724 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
2725 return -EINVAL;
2726
2727 case BC_FREE_BUFFER: {
da49889d 2728 binder_uintptr_t data_ptr;
355b0502
GKH
2729 struct binder_buffer *buffer;
2730
da49889d 2731 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2732 return -EFAULT;
da49889d 2733 ptr += sizeof(binder_uintptr_t);
355b0502 2734
db516584
TK
2735 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2736 data_ptr);
355b0502 2737 if (buffer == NULL) {
da49889d
AH
2738 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2739 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2740 break;
2741 }
2742 if (!buffer->allow_user_free) {
da49889d
AH
2743 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
2744 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2745 break;
2746 }
2747 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
2748 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
2749 proc->pid, thread->pid, (u64)data_ptr,
2750 buffer->debug_id,
355b0502
GKH
2751 buffer->transaction ? "active" : "finished");
2752
2753 if (buffer->transaction) {
2754 buffer->transaction->buffer = NULL;
2755 buffer->transaction = NULL;
2756 }
2757 if (buffer->async_transaction && buffer->target_node) {
2758 BUG_ON(!buffer->target_node->has_async_transaction);
2759 if (list_empty(&buffer->target_node->async_todo))
2760 buffer->target_node->has_async_transaction = 0;
2761 else
2762 list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
2763 }
975a1ac9 2764 trace_binder_transaction_buffer_release(buffer);
355b0502 2765 binder_transaction_buffer_release(proc, buffer, NULL);
467545d8 2766 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
2767 break;
2768 }
2769
dd9bc4f9
MC
2770 case BC_TRANSACTION_SG:
2771 case BC_REPLY_SG: {
2772 struct binder_transaction_data_sg tr;
2773
2774 if (copy_from_user(&tr, ptr, sizeof(tr)))
2775 return -EFAULT;
2776 ptr += sizeof(tr);
2777 binder_transaction(proc, thread, &tr.transaction_data,
2778 cmd == BC_REPLY_SG, tr.buffers_size);
2779 break;
2780 }
355b0502
GKH
2781 case BC_TRANSACTION:
2782 case BC_REPLY: {
2783 struct binder_transaction_data tr;
2784
2785 if (copy_from_user(&tr, ptr, sizeof(tr)))
2786 return -EFAULT;
2787 ptr += sizeof(tr);
843a2578
MC
2788 binder_transaction(proc, thread, &tr,
2789 cmd == BC_REPLY, 0);
355b0502
GKH
2790 break;
2791 }
2792
2793 case BC_REGISTER_LOOPER:
2794 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2795 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
2796 proc->pid, thread->pid);
2797 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
2798 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2799 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
2800 proc->pid, thread->pid);
2801 } else if (proc->requested_threads == 0) {
2802 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2803 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
2804 proc->pid, thread->pid);
2805 } else {
2806 proc->requested_threads--;
2807 proc->requested_threads_started++;
2808 }
2809 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
2810 break;
2811 case BC_ENTER_LOOPER:
2812 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2813 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
2814 proc->pid, thread->pid);
2815 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
2816 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 2817 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
2818 proc->pid, thread->pid);
2819 }
2820 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
2821 break;
2822 case BC_EXIT_LOOPER:
2823 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 2824 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
2825 proc->pid, thread->pid);
2826 thread->looper |= BINDER_LOOPER_STATE_EXITED;
2827 break;
2828
2829 case BC_REQUEST_DEATH_NOTIFICATION:
2830 case BC_CLEAR_DEATH_NOTIFICATION: {
2831 uint32_t target;
da49889d 2832 binder_uintptr_t cookie;
355b0502
GKH
2833 struct binder_ref *ref;
2834 struct binder_ref_death *death;
2835
2836 if (get_user(target, (uint32_t __user *)ptr))
2837 return -EFAULT;
2838 ptr += sizeof(uint32_t);
da49889d 2839 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2840 return -EFAULT;
da49889d 2841 ptr += sizeof(binder_uintptr_t);
b81f4c5f 2842 ref = binder_get_ref(proc, target, false);
355b0502 2843 if (ref == NULL) {
56b468fc 2844 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
2845 proc->pid, thread->pid,
2846 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2847 "BC_REQUEST_DEATH_NOTIFICATION" :
2848 "BC_CLEAR_DEATH_NOTIFICATION",
2849 target);
2850 break;
2851 }
2852
2853 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 2854 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
2855 proc->pid, thread->pid,
2856 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
2857 "BC_REQUEST_DEATH_NOTIFICATION" :
2858 "BC_CLEAR_DEATH_NOTIFICATION",
f7d87412
TK
2859 (u64)cookie, ref->data.debug_id,
2860 ref->data.desc, ref->data.strong,
2861 ref->data.weak, ref->node->debug_id);
355b0502
GKH
2862
2863 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
2864 if (ref->death) {
56b468fc 2865 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
2866 proc->pid, thread->pid);
2867 break;
2868 }
2869 death = kzalloc(sizeof(*death), GFP_KERNEL);
2870 if (death == NULL) {
3a822b33
TK
2871 WARN_ON(thread->return_error.cmd !=
2872 BR_OK);
2873 thread->return_error.cmd = BR_ERROR;
2874 list_add_tail(
2875 &thread->return_error.work.entry,
2876 &thread->todo);
355b0502 2877 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 2878 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
2879 proc->pid, thread->pid);
2880 break;
2881 }
2882 binder_stats_created(BINDER_STAT_DEATH);
2883 INIT_LIST_HEAD(&death->work.entry);
2884 death->cookie = cookie;
2885 ref->death = death;
2886 if (ref->node->proc == NULL) {
2887 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
2888 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2889 list_add_tail(&ref->death->work.entry, &thread->todo);
2890 } else {
2891 list_add_tail(&ref->death->work.entry, &proc->todo);
2892 wake_up_interruptible(&proc->wait);
2893 }
2894 }
2895 } else {
2896 if (ref->death == NULL) {
56b468fc 2897 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
2898 proc->pid, thread->pid);
2899 break;
2900 }
2901 death = ref->death;
2902 if (death->cookie != cookie) {
da49889d 2903 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 2904 proc->pid, thread->pid,
da49889d
AH
2905 (u64)death->cookie,
2906 (u64)cookie);
355b0502
GKH
2907 break;
2908 }
2909 ref->death = NULL;
2910 if (list_empty(&death->work.entry)) {
2911 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2912 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2913 list_add_tail(&death->work.entry, &thread->todo);
2914 } else {
2915 list_add_tail(&death->work.entry, &proc->todo);
2916 wake_up_interruptible(&proc->wait);
2917 }
2918 } else {
2919 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
2920 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
2921 }
2922 }
2923 } break;
2924 case BC_DEAD_BINDER_DONE: {
2925 struct binder_work *w;
da49889d 2926 binder_uintptr_t cookie;
355b0502 2927 struct binder_ref_death *death = NULL;
10f62861 2928
da49889d 2929 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
2930 return -EFAULT;
2931
3e908446 2932 ptr += sizeof(cookie);
355b0502
GKH
2933 list_for_each_entry(w, &proc->delivered_death, entry) {
2934 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
10f62861 2935
355b0502
GKH
2936 if (tmp_death->cookie == cookie) {
2937 death = tmp_death;
2938 break;
2939 }
2940 }
2941 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
2942 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
2943 proc->pid, thread->pid, (u64)cookie,
2944 death);
355b0502 2945 if (death == NULL) {
da49889d
AH
2946 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
2947 proc->pid, thread->pid, (u64)cookie);
355b0502
GKH
2948 break;
2949 }
2950
2951 list_del_init(&death->work.entry);
2952 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
2953 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
2954 if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
2955 list_add_tail(&death->work.entry, &thread->todo);
2956 } else {
2957 list_add_tail(&death->work.entry, &proc->todo);
2958 wake_up_interruptible(&proc->wait);
2959 }
2960 }
2961 } break;
2962
2963 default:
56b468fc 2964 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
2965 proc->pid, thread->pid, cmd);
2966 return -EINVAL;
2967 }
2968 *consumed = ptr - buffer;
2969 }
2970 return 0;
2971}
2972
fb07ebc3
BP
2973static void binder_stat_br(struct binder_proc *proc,
2974 struct binder_thread *thread, uint32_t cmd)
355b0502 2975{
975a1ac9 2976 trace_binder_return(cmd);
ec49bb00 2977 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
f716ecfc
BJS
2978 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
2979 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
2980 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
2981 }
2982}
2983
2984static int binder_has_proc_work(struct binder_proc *proc,
2985 struct binder_thread *thread)
2986{
afda44d0 2987 return !list_empty(&proc->todo) || thread->looper_need_return;
355b0502
GKH
2988}
2989
2990static int binder_has_thread_work(struct binder_thread *thread)
2991{
3a822b33 2992 return !list_empty(&thread->todo) || thread->looper_need_return;
355b0502
GKH
2993}
2994
9b9340c5
TK
2995static int binder_put_node_cmd(struct binder_proc *proc,
2996 struct binder_thread *thread,
2997 void __user **ptrp,
2998 binder_uintptr_t node_ptr,
2999 binder_uintptr_t node_cookie,
3000 int node_debug_id,
3001 uint32_t cmd, const char *cmd_name)
3002{
3003 void __user *ptr = *ptrp;
3004
3005 if (put_user(cmd, (uint32_t __user *)ptr))
3006 return -EFAULT;
3007 ptr += sizeof(uint32_t);
3008
3009 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3010 return -EFAULT;
3011 ptr += sizeof(binder_uintptr_t);
3012
3013 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3014 return -EFAULT;
3015 ptr += sizeof(binder_uintptr_t);
3016
3017 binder_stat_br(proc, thread, cmd);
3018 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3019 proc->pid, thread->pid, cmd_name, node_debug_id,
3020 (u64)node_ptr, (u64)node_cookie);
3021
3022 *ptrp = ptr;
3023 return 0;
3024}
3025
355b0502
GKH
3026static int binder_thread_read(struct binder_proc *proc,
3027 struct binder_thread *thread,
da49889d
AH
3028 binder_uintptr_t binder_buffer, size_t size,
3029 binder_size_t *consumed, int non_block)
355b0502 3030{
da49889d 3031 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3032 void __user *ptr = buffer + *consumed;
3033 void __user *end = buffer + size;
3034
3035 int ret = 0;
3036 int wait_for_proc_work;
3037
3038 if (*consumed == 0) {
3039 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3040 return -EFAULT;
3041 ptr += sizeof(uint32_t);
3042 }
3043
3044retry:
3045 wait_for_proc_work = thread->transaction_stack == NULL &&
3046 list_empty(&thread->todo);
3047
355b0502
GKH
3048 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3049 if (wait_for_proc_work)
3050 proc->ready_threads++;
975a1ac9 3051
ec49bb00 3052 binder_unlock(__func__);
975a1ac9
AH
3053
3054 trace_binder_wait_for_work(wait_for_proc_work,
3055 !!thread->transaction_stack,
3056 !list_empty(&thread->todo));
355b0502
GKH
3057 if (wait_for_proc_work) {
3058 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3059 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 3060 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
3061 proc->pid, thread->pid, thread->looper);
3062 wait_event_interruptible(binder_user_error_wait,
3063 binder_stop_on_user_error < 2);
3064 }
3065 binder_set_nice(proc->default_priority);
3066 if (non_block) {
3067 if (!binder_has_proc_work(proc, thread))
3068 ret = -EAGAIN;
3069 } else
e2610b26 3070 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
3071 } else {
3072 if (non_block) {
3073 if (!binder_has_thread_work(thread))
3074 ret = -EAGAIN;
3075 } else
e2610b26 3076 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 3077 }
975a1ac9 3078
ec49bb00 3079 binder_lock(__func__);
975a1ac9 3080
355b0502
GKH
3081 if (wait_for_proc_work)
3082 proc->ready_threads--;
3083 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3084
3085 if (ret)
3086 return ret;
3087
3088 while (1) {
3089 uint32_t cmd;
3090 struct binder_transaction_data tr;
3091 struct binder_work *w;
3092 struct binder_transaction *t = NULL;
e482ec39 3093 struct binder_thread *t_from;
355b0502 3094
395262a9
DV
3095 if (!list_empty(&thread->todo)) {
3096 w = list_first_entry(&thread->todo, struct binder_work,
3097 entry);
3098 } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
3099 w = list_first_entry(&proc->todo, struct binder_work,
3100 entry);
3101 } else {
3102 /* no data added */
afda44d0 3103 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
3104 goto retry;
3105 break;
3106 }
3107
3108 if (end - ptr < sizeof(tr) + 4)
3109 break;
3110
3111 switch (w->type) {
3112 case BINDER_WORK_TRANSACTION: {
3113 t = container_of(w, struct binder_transaction, work);
3114 } break;
3a822b33
TK
3115 case BINDER_WORK_RETURN_ERROR: {
3116 struct binder_error *e = container_of(
3117 w, struct binder_error, work);
3118
3119 WARN_ON(e->cmd == BR_OK);
3120 if (put_user(e->cmd, (uint32_t __user *)ptr))
3121 return -EFAULT;
3122 e->cmd = BR_OK;
3123 ptr += sizeof(uint32_t);
3124
3125 binder_stat_br(proc, thread, cmd);
3126 list_del(&w->entry);
3127 } break;
355b0502
GKH
3128 case BINDER_WORK_TRANSACTION_COMPLETE: {
3129 cmd = BR_TRANSACTION_COMPLETE;
3130 if (put_user(cmd, (uint32_t __user *)ptr))
3131 return -EFAULT;
3132 ptr += sizeof(uint32_t);
3133
3134 binder_stat_br(proc, thread, cmd);
3135 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 3136 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502
GKH
3137 proc->pid, thread->pid);
3138
3139 list_del(&w->entry);
3140 kfree(w);
3141 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3142 } break;
3143 case BINDER_WORK_NODE: {
3144 struct binder_node *node = container_of(w, struct binder_node, work);
9b9340c5
TK
3145 int strong, weak;
3146 binder_uintptr_t node_ptr = node->ptr;
3147 binder_uintptr_t node_cookie = node->cookie;
3148 int node_debug_id = node->debug_id;
3149 int has_weak_ref;
3150 int has_strong_ref;
3151 void __user *orig_ptr = ptr;
3152
3153 BUG_ON(proc != node->proc);
3154 strong = node->internal_strong_refs ||
3155 node->local_strong_refs;
3156 weak = !hlist_empty(&node->refs) ||
96dd75d9
TK
3157 node->local_weak_refs ||
3158 node->tmp_refs || strong;
9b9340c5
TK
3159 has_strong_ref = node->has_strong_ref;
3160 has_weak_ref = node->has_weak_ref;
3161
3162 if (weak && !has_weak_ref) {
355b0502
GKH
3163 node->has_weak_ref = 1;
3164 node->pending_weak_ref = 1;
3165 node->local_weak_refs++;
9b9340c5
TK
3166 }
3167 if (strong && !has_strong_ref) {
355b0502
GKH
3168 node->has_strong_ref = 1;
3169 node->pending_strong_ref = 1;
3170 node->local_strong_refs++;
9b9340c5
TK
3171 }
3172 if (!strong && has_strong_ref)
355b0502 3173 node->has_strong_ref = 0;
9b9340c5 3174 if (!weak && has_weak_ref)
355b0502 3175 node->has_weak_ref = 0;
9b9340c5
TK
3176 list_del(&w->entry);
3177
3178 if (!weak && !strong) {
3179 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3180 "%d:%d node %d u%016llx c%016llx deleted\n",
3181 proc->pid, thread->pid,
3182 node_debug_id,
3183 (u64)node_ptr,
3184 (u64)node_cookie);
3185 rb_erase(&node->rb_node, &proc->nodes);
3186 kfree(node);
3187 binder_stats_deleted(BINDER_STAT_NODE);
355b0502 3188 }
9b9340c5
TK
3189 if (weak && !has_weak_ref)
3190 ret = binder_put_node_cmd(
3191 proc, thread, &ptr, node_ptr,
3192 node_cookie, node_debug_id,
3193 BR_INCREFS, "BR_INCREFS");
3194 if (!ret && strong && !has_strong_ref)
3195 ret = binder_put_node_cmd(
3196 proc, thread, &ptr, node_ptr,
3197 node_cookie, node_debug_id,
3198 BR_ACQUIRE, "BR_ACQUIRE");
3199 if (!ret && !strong && has_strong_ref)
3200 ret = binder_put_node_cmd(
3201 proc, thread, &ptr, node_ptr,
3202 node_cookie, node_debug_id,
3203 BR_RELEASE, "BR_RELEASE");
3204 if (!ret && !weak && has_weak_ref)
3205 ret = binder_put_node_cmd(
3206 proc, thread, &ptr, node_ptr,
3207 node_cookie, node_debug_id,
3208 BR_DECREFS, "BR_DECREFS");
3209 if (orig_ptr == ptr)
3210 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3211 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3212 proc->pid, thread->pid,
3213 node_debug_id,
3214 (u64)node_ptr,
3215 (u64)node_cookie);
3216 if (ret)
3217 return ret;
355b0502
GKH
3218 } break;
3219 case BINDER_WORK_DEAD_BINDER:
3220 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3221 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3222 struct binder_ref_death *death;
3223 uint32_t cmd;
3224
3225 death = container_of(w, struct binder_ref_death, work);
3226 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3227 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3228 else
3229 cmd = BR_DEAD_BINDER;
3230 if (put_user(cmd, (uint32_t __user *)ptr))
3231 return -EFAULT;
3232 ptr += sizeof(uint32_t);
da49889d
AH
3233 if (put_user(death->cookie,
3234 (binder_uintptr_t __user *)ptr))
355b0502 3235 return -EFAULT;
da49889d 3236 ptr += sizeof(binder_uintptr_t);
89334ab4 3237 binder_stat_br(proc, thread, cmd);
355b0502 3238 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3239 "%d:%d %s %016llx\n",
355b0502
GKH
3240 proc->pid, thread->pid,
3241 cmd == BR_DEAD_BINDER ?
3242 "BR_DEAD_BINDER" :
3243 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 3244 (u64)death->cookie);
355b0502
GKH
3245
3246 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3247 list_del(&w->entry);
3248 kfree(death);
3249 binder_stats_deleted(BINDER_STAT_DEATH);
3250 } else
3251 list_move(&w->entry, &proc->delivered_death);
3252 if (cmd == BR_DEAD_BINDER)
3253 goto done; /* DEAD_BINDER notifications can cause transactions */
3254 } break;
3255 }
3256
3257 if (!t)
3258 continue;
3259
3260 BUG_ON(t->buffer == NULL);
3261 if (t->buffer->target_node) {
3262 struct binder_node *target_node = t->buffer->target_node;
10f62861 3263
355b0502
GKH
3264 tr.target.ptr = target_node->ptr;
3265 tr.cookie = target_node->cookie;
3266 t->saved_priority = task_nice(current);
3267 if (t->priority < target_node->min_priority &&
3268 !(t->flags & TF_ONE_WAY))
3269 binder_set_nice(t->priority);
3270 else if (!(t->flags & TF_ONE_WAY) ||
3271 t->saved_priority > target_node->min_priority)
3272 binder_set_nice(target_node->min_priority);
3273 cmd = BR_TRANSACTION;
3274 } else {
da49889d
AH
3275 tr.target.ptr = 0;
3276 tr.cookie = 0;
355b0502
GKH
3277 cmd = BR_REPLY;
3278 }
3279 tr.code = t->code;
3280 tr.flags = t->flags;
4a2ebb93 3281 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 3282
e482ec39
TK
3283 t_from = binder_get_txn_from(t);
3284 if (t_from) {
3285 struct task_struct *sender = t_from->proc->tsk;
10f62861 3286
355b0502 3287 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 3288 task_active_pid_ns(current));
355b0502
GKH
3289 } else {
3290 tr.sender_pid = 0;
3291 }
3292
3293 tr.data_size = t->buffer->data_size;
3294 tr.offsets_size = t->buffer->offsets_size;
467545d8
TK
3295 tr.data.ptr.buffer = (binder_uintptr_t)
3296 ((uintptr_t)t->buffer->data +
3297 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
3298 tr.data.ptr.offsets = tr.data.ptr.buffer +
3299 ALIGN(t->buffer->data_size,
3300 sizeof(void *));
3301
e482ec39
TK
3302 if (put_user(cmd, (uint32_t __user *)ptr)) {
3303 if (t_from)
3304 binder_thread_dec_tmpref(t_from);
355b0502 3305 return -EFAULT;
e482ec39 3306 }
355b0502 3307 ptr += sizeof(uint32_t);
e482ec39
TK
3308 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3309 if (t_from)
3310 binder_thread_dec_tmpref(t_from);
355b0502 3311 return -EFAULT;
e482ec39 3312 }
355b0502
GKH
3313 ptr += sizeof(tr);
3314
975a1ac9 3315 trace_binder_transaction_received(t);
355b0502
GKH
3316 binder_stat_br(proc, thread, cmd);
3317 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 3318 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
3319 proc->pid, thread->pid,
3320 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3321 "BR_REPLY",
e482ec39
TK
3322 t->debug_id, t_from ? t_from->proc->pid : 0,
3323 t_from ? t_from->pid : 0, cmd,
355b0502 3324 t->buffer->data_size, t->buffer->offsets_size,
da49889d 3325 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 3326
e482ec39
TK
3327 if (t_from)
3328 binder_thread_dec_tmpref(t_from);
355b0502
GKH
3329 list_del(&t->work.entry);
3330 t->buffer->allow_user_free = 1;
3331 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3332 t->to_parent = thread->transaction_stack;
3333 t->to_thread = thread;
3334 thread->transaction_stack = t;
3335 } else {
16273538 3336 binder_free_transaction(t);
355b0502
GKH
3337 }
3338 break;
3339 }
3340
3341done:
3342
3343 *consumed = ptr - buffer;
3344 if (proc->requested_threads + proc->ready_threads == 0 &&
3345 proc->requested_threads_started < proc->max_threads &&
3346 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3347 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3348 /*spawn a new thread if we leave this out */) {
3349 proc->requested_threads++;
3350 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3351 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
3352 proc->pid, thread->pid);
3353 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3354 return -EFAULT;
89334ab4 3355 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
3356 }
3357 return 0;
3358}
3359
3360static void binder_release_work(struct list_head *list)
3361{
3362 struct binder_work *w;
10f62861 3363
355b0502
GKH
3364 while (!list_empty(list)) {
3365 w = list_first_entry(list, struct binder_work, entry);
3366 list_del_init(&w->entry);
3367 switch (w->type) {
3368 case BINDER_WORK_TRANSACTION: {
3369 struct binder_transaction *t;
3370
3371 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
3372 if (t->buffer->target_node &&
3373 !(t->flags & TF_ONE_WAY)) {
355b0502 3374 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
3375 } else {
3376 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3377 "undelivered transaction %d\n",
675d66b0 3378 t->debug_id);
16273538 3379 binder_free_transaction(t);
675d66b0 3380 }
355b0502 3381 } break;
3a822b33
TK
3382 case BINDER_WORK_RETURN_ERROR: {
3383 struct binder_error *e = container_of(
3384 w, struct binder_error, work);
3385
3386 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3387 "undelivered TRANSACTION_ERROR: %u\n",
3388 e->cmd);
3389 } break;
355b0502 3390 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 3391 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3392 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
3393 kfree(w);
3394 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3395 } break;
675d66b0
AH
3396 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3397 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3398 struct binder_ref_death *death;
3399
3400 death = container_of(w, struct binder_ref_death, work);
3401 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
3402 "undelivered death notification, %016llx\n",
3403 (u64)death->cookie);
675d66b0
AH
3404 kfree(death);
3405 binder_stats_deleted(BINDER_STAT_DEATH);
3406 } break;
355b0502 3407 default:
56b468fc 3408 pr_err("unexpected work type, %d, not freed\n",
675d66b0 3409 w->type);
355b0502
GKH
3410 break;
3411 }
3412 }
3413
3414}
3415
3416static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3417{
3418 struct binder_thread *thread = NULL;
3419 struct rb_node *parent = NULL;
3420 struct rb_node **p = &proc->threads.rb_node;
3421
3422 while (*p) {
3423 parent = *p;
3424 thread = rb_entry(parent, struct binder_thread, rb_node);
3425
3426 if (current->pid < thread->pid)
3427 p = &(*p)->rb_left;
3428 else if (current->pid > thread->pid)
3429 p = &(*p)->rb_right;
3430 else
3431 break;
3432 }
3433 if (*p == NULL) {
3434 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3435 if (thread == NULL)
3436 return NULL;
3437 binder_stats_created(BINDER_STAT_THREAD);
3438 thread->proc = proc;
3439 thread->pid = current->pid;
e482ec39 3440 atomic_set(&thread->tmp_ref, 0);
355b0502
GKH
3441 init_waitqueue_head(&thread->wait);
3442 INIT_LIST_HEAD(&thread->todo);
3443 rb_link_node(&thread->rb_node, parent, p);
3444 rb_insert_color(&thread->rb_node, &proc->threads);
afda44d0 3445 thread->looper_need_return = true;
3a822b33
TK
3446 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3447 thread->return_error.cmd = BR_OK;
3448 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3449 thread->reply_error.cmd = BR_OK;
355b0502
GKH
3450 }
3451 return thread;
3452}
3453
e482ec39
TK
3454static void binder_free_proc(struct binder_proc *proc)
3455{
3456 BUG_ON(!list_empty(&proc->todo));
3457 BUG_ON(!list_empty(&proc->delivered_death));
3458 binder_alloc_deferred_release(&proc->alloc);
3459 put_task_struct(proc->tsk);
3460 binder_stats_deleted(BINDER_STAT_PROC);
3461 kfree(proc);
3462}
3463
3464static void binder_free_thread(struct binder_thread *thread)
3465{
3466 BUG_ON(!list_empty(&thread->todo));
3467 binder_stats_deleted(BINDER_STAT_THREAD);
3468 binder_proc_dec_tmpref(thread->proc);
3469 kfree(thread);
3470}
3471
3472static int binder_thread_release(struct binder_proc *proc,
3473 struct binder_thread *thread)
355b0502
GKH
3474{
3475 struct binder_transaction *t;
3476 struct binder_transaction *send_reply = NULL;
3477 int active_transactions = 0;
e482ec39 3478 struct binder_transaction *last_t = NULL;
355b0502 3479
e482ec39
TK
3480 /*
3481 * take a ref on the proc so it survives
3482 * after we remove this thread from proc->threads.
3483 * The corresponding dec is when we actually
3484 * free the thread in binder_free_thread()
3485 */
3486 proc->tmp_ref++;
3487 /*
3488 * take a ref on this thread to ensure it
3489 * survives while we are releasing it
3490 */
3491 atomic_inc(&thread->tmp_ref);
355b0502
GKH
3492 rb_erase(&thread->rb_node, &proc->threads);
3493 t = thread->transaction_stack;
e482ec39
TK
3494 if (t) {
3495 spin_lock(&t->lock);
3496 if (t->to_thread == thread)
3497 send_reply = t;
3498 }
3499 thread->is_dead = true;
3500
355b0502 3501 while (t) {
e482ec39 3502 last_t = t;
355b0502
GKH
3503 active_transactions++;
3504 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
3505 "release %d:%d transaction %d %s, still active\n",
3506 proc->pid, thread->pid,
355b0502
GKH
3507 t->debug_id,
3508 (t->to_thread == thread) ? "in" : "out");
3509
3510 if (t->to_thread == thread) {
3511 t->to_proc = NULL;
3512 t->to_thread = NULL;
3513 if (t->buffer) {
3514 t->buffer->transaction = NULL;
3515 t->buffer = NULL;
3516 }
3517 t = t->to_parent;
3518 } else if (t->from == thread) {
3519 t->from = NULL;
3520 t = t->from_parent;
3521 } else
3522 BUG();
e482ec39
TK
3523 spin_unlock(&last_t->lock);
3524 if (t)
3525 spin_lock(&t->lock);
355b0502 3526 }
e482ec39 3527
355b0502
GKH
3528 if (send_reply)
3529 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
3530 binder_release_work(&thread->todo);
e482ec39 3531 binder_thread_dec_tmpref(thread);
355b0502
GKH
3532 return active_transactions;
3533}
3534
3535static unsigned int binder_poll(struct file *filp,
3536 struct poll_table_struct *wait)
3537{
3538 struct binder_proc *proc = filp->private_data;
3539 struct binder_thread *thread = NULL;
3540 int wait_for_proc_work;
3541
ec49bb00 3542 binder_lock(__func__);
975a1ac9 3543
355b0502
GKH
3544 thread = binder_get_thread(proc);
3545
3546 wait_for_proc_work = thread->transaction_stack == NULL &&
3a822b33 3547 list_empty(&thread->todo);
975a1ac9 3548
ec49bb00 3549 binder_unlock(__func__);
355b0502
GKH
3550
3551 if (wait_for_proc_work) {
3552 if (binder_has_proc_work(proc, thread))
3553 return POLLIN;
3554 poll_wait(filp, &proc->wait, wait);
3555 if (binder_has_proc_work(proc, thread))
3556 return POLLIN;
3557 } else {
3558 if (binder_has_thread_work(thread))
3559 return POLLIN;
3560 poll_wait(filp, &thread->wait, wait);
3561 if (binder_has_thread_work(thread))
3562 return POLLIN;
3563 }
3564 return 0;
3565}
3566
78260ac6
TR
3567static int binder_ioctl_write_read(struct file *filp,
3568 unsigned int cmd, unsigned long arg,
3569 struct binder_thread *thread)
3570{
3571 int ret = 0;
3572 struct binder_proc *proc = filp->private_data;
3573 unsigned int size = _IOC_SIZE(cmd);
3574 void __user *ubuf = (void __user *)arg;
3575 struct binder_write_read bwr;
3576
3577 if (size != sizeof(struct binder_write_read)) {
3578 ret = -EINVAL;
3579 goto out;
3580 }
3581 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3582 ret = -EFAULT;
3583 goto out;
3584 }
3585 binder_debug(BINDER_DEBUG_READ_WRITE,
3586 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3587 proc->pid, thread->pid,
3588 (u64)bwr.write_size, (u64)bwr.write_buffer,
3589 (u64)bwr.read_size, (u64)bwr.read_buffer);
3590
3591 if (bwr.write_size > 0) {
3592 ret = binder_thread_write(proc, thread,
3593 bwr.write_buffer,
3594 bwr.write_size,
3595 &bwr.write_consumed);
3596 trace_binder_write_done(ret);
3597 if (ret < 0) {
3598 bwr.read_consumed = 0;
3599 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3600 ret = -EFAULT;
3601 goto out;
3602 }
3603 }
3604 if (bwr.read_size > 0) {
3605 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3606 bwr.read_size,
3607 &bwr.read_consumed,
3608 filp->f_flags & O_NONBLOCK);
3609 trace_binder_read_done(ret);
3610 if (!list_empty(&proc->todo))
3611 wake_up_interruptible(&proc->wait);
3612 if (ret < 0) {
3613 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3614 ret = -EFAULT;
3615 goto out;
3616 }
3617 }
3618 binder_debug(BINDER_DEBUG_READ_WRITE,
3619 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3620 proc->pid, thread->pid,
3621 (u64)bwr.write_consumed, (u64)bwr.write_size,
3622 (u64)bwr.read_consumed, (u64)bwr.read_size);
3623 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3624 ret = -EFAULT;
3625 goto out;
3626 }
3627out:
3628 return ret;
3629}
3630
3631static int binder_ioctl_set_ctx_mgr(struct file *filp)
3632{
3633 int ret = 0;
3634 struct binder_proc *proc = filp->private_data;
803df563 3635 struct binder_context *context = proc->context;
3490fdcb 3636 struct binder_node *new_node;
78260ac6
TR
3637 kuid_t curr_euid = current_euid();
3638
3490fdcb 3639 mutex_lock(&context->context_mgr_node_lock);
803df563 3640 if (context->binder_context_mgr_node) {
78260ac6
TR
3641 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3642 ret = -EBUSY;
3643 goto out;
3644 }
79af7307
SS
3645 ret = security_binder_set_context_mgr(proc->tsk);
3646 if (ret < 0)
3647 goto out;
803df563
MC
3648 if (uid_valid(context->binder_context_mgr_uid)) {
3649 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
3650 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3651 from_kuid(&init_user_ns, curr_euid),
3652 from_kuid(&init_user_ns,
803df563 3653 context->binder_context_mgr_uid));
78260ac6
TR
3654 ret = -EPERM;
3655 goto out;
3656 }
3657 } else {
803df563 3658 context->binder_context_mgr_uid = curr_euid;
78260ac6 3659 }
3490fdcb
TK
3660 new_node = binder_new_node(proc, 0, 0);
3661 if (!new_node) {
78260ac6
TR
3662 ret = -ENOMEM;
3663 goto out;
3664 }
3490fdcb
TK
3665 new_node->local_weak_refs++;
3666 new_node->local_strong_refs++;
3667 new_node->has_strong_ref = 1;
3668 new_node->has_weak_ref = 1;
3669 context->binder_context_mgr_node = new_node;
96dd75d9 3670 binder_put_node(new_node);
78260ac6 3671out:
3490fdcb 3672 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
3673 return ret;
3674}
3675
355b0502
GKH
3676static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3677{
3678 int ret;
3679 struct binder_proc *proc = filp->private_data;
3680 struct binder_thread *thread;
3681 unsigned int size = _IOC_SIZE(cmd);
3682 void __user *ubuf = (void __user *)arg;
3683
78260ac6
TR
3684 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
3685 proc->pid, current->pid, cmd, arg);*/
355b0502 3686
975a1ac9
AH
3687 trace_binder_ioctl(cmd, arg);
3688
355b0502
GKH
3689 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3690 if (ret)
975a1ac9 3691 goto err_unlocked;
355b0502 3692
ec49bb00 3693 binder_lock(__func__);
355b0502
GKH
3694 thread = binder_get_thread(proc);
3695 if (thread == NULL) {
3696 ret = -ENOMEM;
3697 goto err;
3698 }
3699
3700 switch (cmd) {
78260ac6
TR
3701 case BINDER_WRITE_READ:
3702 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
3703 if (ret)
355b0502 3704 goto err;
355b0502 3705 break;
355b0502
GKH
3706 case BINDER_SET_MAX_THREADS:
3707 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
3708 ret = -EINVAL;
3709 goto err;
3710 }
3711 break;
3712 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
3713 ret = binder_ioctl_set_ctx_mgr(filp);
3714 if (ret)
355b0502 3715 goto err;
355b0502
GKH
3716 break;
3717 case BINDER_THREAD_EXIT:
56b468fc 3718 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 3719 proc->pid, thread->pid);
e482ec39 3720 binder_thread_release(proc, thread);
355b0502
GKH
3721 thread = NULL;
3722 break;
36c89c0a
MM
3723 case BINDER_VERSION: {
3724 struct binder_version __user *ver = ubuf;
3725
355b0502
GKH
3726 if (size != sizeof(struct binder_version)) {
3727 ret = -EINVAL;
3728 goto err;
3729 }
36c89c0a
MM
3730 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
3731 &ver->protocol_version)) {
355b0502
GKH
3732 ret = -EINVAL;
3733 goto err;
3734 }
3735 break;
36c89c0a 3736 }
355b0502
GKH
3737 default:
3738 ret = -EINVAL;
3739 goto err;
3740 }
3741 ret = 0;
3742err:
3743 if (thread)
afda44d0 3744 thread->looper_need_return = false;
ec49bb00 3745 binder_unlock(__func__);
355b0502
GKH
3746 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
3747 if (ret && ret != -ERESTARTSYS)
56b468fc 3748 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
3749err_unlocked:
3750 trace_binder_ioctl_done(ret);
355b0502
GKH
3751 return ret;
3752}
3753
3754static void binder_vma_open(struct vm_area_struct *vma)
3755{
3756 struct binder_proc *proc = vma->vm_private_data;
10f62861 3757
355b0502 3758 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3759 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3760 proc->pid, vma->vm_start, vma->vm_end,
3761 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3762 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
3763}
3764
3765static void binder_vma_close(struct vm_area_struct *vma)
3766{
3767 struct binder_proc *proc = vma->vm_private_data;
10f62861 3768
355b0502 3769 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 3770 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
3771 proc->pid, vma->vm_start, vma->vm_end,
3772 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3773 (unsigned long)pgprot_val(vma->vm_page_prot));
467545d8 3774 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
3775 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
3776}
3777
ddac7d5f
VM
3778static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3779{
3780 return VM_FAULT_SIGBUS;
3781}
3782
7cbea8dc 3783static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
3784 .open = binder_vma_open,
3785 .close = binder_vma_close,
ddac7d5f 3786 .fault = binder_vm_fault,
355b0502
GKH
3787};
3788
467545d8
TK
3789static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
3790{
3791 int ret;
3792 struct binder_proc *proc = filp->private_data;
3793 const char *failure_string;
3794
3795 if (proc->tsk != current->group_leader)
3796 return -EINVAL;
3797
3798 if ((vma->vm_end - vma->vm_start) > SZ_4M)
3799 vma->vm_end = vma->vm_start + SZ_4M;
3800
3801 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3802 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
3803 __func__, proc->pid, vma->vm_start, vma->vm_end,
3804 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
3805 (unsigned long)pgprot_val(vma->vm_page_prot));
3806
3807 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
3808 ret = -EPERM;
3809 failure_string = "bad vm_flags";
3810 goto err_bad_arg;
3811 }
3812 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
3813 vma->vm_ops = &binder_vm_ops;
3814 vma->vm_private_data = proc;
3815
3816 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
3817 if (ret)
3818 return ret;
3819 proc->files = get_files_struct(current);
3820 return 0;
3821
355b0502 3822err_bad_arg:
258767fe 3823 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
3824 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
3825 return ret;
3826}
3827
3828static int binder_open(struct inode *nodp, struct file *filp)
3829{
3830 struct binder_proc *proc;
04e3812e 3831 struct binder_device *binder_dev;
355b0502
GKH
3832
3833 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
3834 current->group_leader->pid, current->pid);
3835
3836 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
3837 if (proc == NULL)
3838 return -ENOMEM;
b0f59d6d
TK
3839 spin_lock_init(&proc->inner_lock);
3840 spin_lock_init(&proc->outer_lock);
35979513
MC
3841 get_task_struct(current->group_leader);
3842 proc->tsk = current->group_leader;
355b0502
GKH
3843 INIT_LIST_HEAD(&proc->todo);
3844 init_waitqueue_head(&proc->wait);
3845 proc->default_priority = task_nice(current);
04e3812e
MC
3846 binder_dev = container_of(filp->private_data, struct binder_device,
3847 miscdev);
3848 proc->context = &binder_dev->context;
467545d8 3849 binder_alloc_init(&proc->alloc);
975a1ac9 3850
ec49bb00 3851 binder_lock(__func__);
975a1ac9 3852
355b0502 3853 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
3854 proc->pid = current->group_leader->pid;
3855 INIT_LIST_HEAD(&proc->delivered_death);
3856 filp->private_data = proc;
975a1ac9 3857
ec49bb00 3858 binder_unlock(__func__);
355b0502 3859
3490fdcb
TK
3860 mutex_lock(&binder_procs_lock);
3861 hlist_add_head(&proc->proc_node, &binder_procs);
3862 mutex_unlock(&binder_procs_lock);
3863
16b66554 3864 if (binder_debugfs_dir_entry_proc) {
355b0502 3865 char strbuf[11];
10f62861 3866
355b0502 3867 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
8b980bee
MC
3868 /*
3869 * proc debug entries are shared between contexts, so
3870 * this will fail if the process tries to open the driver
3871 * again with a different context. The priting code will
3872 * anyway print all contexts that a given PID has, so this
3873 * is not a problem.
3874 */
16b66554 3875 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
8b980bee
MC
3876 binder_debugfs_dir_entry_proc,
3877 (void *)(unsigned long)proc->pid,
3878 &binder_proc_fops);
355b0502
GKH
3879 }
3880
3881 return 0;
3882}
3883
3884static int binder_flush(struct file *filp, fl_owner_t id)
3885{
3886 struct binder_proc *proc = filp->private_data;
3887
3888 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
3889
3890 return 0;
3891}
3892
3893static void binder_deferred_flush(struct binder_proc *proc)
3894{
3895 struct rb_node *n;
3896 int wake_count = 0;
10f62861 3897
355b0502
GKH
3898 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
3899 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 3900
afda44d0 3901 thread->looper_need_return = true;
355b0502
GKH
3902 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
3903 wake_up_interruptible(&thread->wait);
3904 wake_count++;
3905 }
3906 }
3907 wake_up_interruptible_all(&proc->wait);
3908
3909 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
3910 "binder_flush: %d woke %d threads\n", proc->pid,
3911 wake_count);
3912}
3913
3914static int binder_release(struct inode *nodp, struct file *filp)
3915{
3916 struct binder_proc *proc = filp->private_data;
10f62861 3917
16b66554 3918 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
3919 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
3920
3921 return 0;
3922}
3923
008fa749
ME
3924static int binder_node_release(struct binder_node *node, int refs)
3925{
3926 struct binder_ref *ref;
3927 int death = 0;
3928
3929 list_del_init(&node->work.entry);
3930 binder_release_work(&node->async_todo);
96dd75d9
TK
3931 /*
3932 * The caller must have taken a temporary ref on the node,
3933 */
3934 BUG_ON(!node->tmp_refs);
3935 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
008fa749
ME
3936 kfree(node);
3937 binder_stats_deleted(BINDER_STAT_NODE);
3938
3939 return refs;
3940 }
3941
3942 node->proc = NULL;
3943 node->local_strong_refs = 0;
3944 node->local_weak_refs = 0;
3490fdcb
TK
3945
3946 spin_lock(&binder_dead_nodes_lock);
ec49bb00 3947 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3490fdcb 3948 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
3949
3950 hlist_for_each_entry(ref, &node->refs, node_entry) {
3951 refs++;
3952
3953 if (!ref->death)
e194fd8a 3954 continue;
008fa749
ME
3955
3956 death++;
3957
3958 if (list_empty(&ref->death->work.entry)) {
3959 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3960 list_add_tail(&ref->death->work.entry,
3961 &ref->proc->todo);
3962 wake_up_interruptible(&ref->proc->wait);
3963 } else
3964 BUG();
3965 }
3966
008fa749
ME
3967 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3968 "node %d now dead, refs %d, death %d\n",
3969 node->debug_id, refs, death);
96dd75d9 3970 binder_put_node(node);
008fa749
ME
3971
3972 return refs;
3973}
3974
355b0502
GKH
3975static void binder_deferred_release(struct binder_proc *proc)
3976{
803df563 3977 struct binder_context *context = proc->context;
355b0502 3978 struct rb_node *n;
467545d8 3979 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 3980
355b0502
GKH
3981 BUG_ON(proc->files);
3982
3490fdcb 3983 mutex_lock(&binder_procs_lock);
355b0502 3984 hlist_del(&proc->proc_node);
3490fdcb 3985 mutex_unlock(&binder_procs_lock);
53413e7d 3986
3490fdcb 3987 mutex_lock(&context->context_mgr_node_lock);
803df563
MC
3988 if (context->binder_context_mgr_node &&
3989 context->binder_context_mgr_node->proc == proc) {
355b0502 3990 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
3991 "%s: %d context_mgr_node gone\n",
3992 __func__, proc->pid);
803df563 3993 context->binder_context_mgr_node = NULL;
355b0502 3994 }
3490fdcb 3995 mutex_unlock(&context->context_mgr_node_lock);
e482ec39
TK
3996 /*
3997 * Make sure proc stays alive after we
3998 * remove all the threads
3999 */
4000 proc->tmp_ref++;
355b0502 4001
e482ec39 4002 proc->is_dead = true;
355b0502
GKH
4003 threads = 0;
4004 active_transactions = 0;
4005 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
4006 struct binder_thread *thread;
4007
4008 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502 4009 threads++;
e482ec39 4010 active_transactions += binder_thread_release(proc, thread);
355b0502 4011 }
53413e7d 4012
355b0502
GKH
4013 nodes = 0;
4014 incoming_refs = 0;
4015 while ((n = rb_first(&proc->nodes))) {
53413e7d 4016 struct binder_node *node;
355b0502 4017
53413e7d 4018 node = rb_entry(n, struct binder_node, rb_node);
355b0502 4019 nodes++;
96dd75d9
TK
4020 /*
4021 * take a temporary ref on the node before
4022 * calling binder_node_release() which will either
4023 * kfree() the node or call binder_put_node()
4024 */
4025 binder_inc_node_tmpref(node);
355b0502 4026 rb_erase(&node->rb_node, &proc->nodes);
ec49bb00 4027 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 4028 }
53413e7d 4029
355b0502
GKH
4030 outgoing_refs = 0;
4031 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
4032 struct binder_ref *ref;
4033
4034 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 4035 outgoing_refs++;
f7d87412
TK
4036 binder_cleanup_ref(ref);
4037 binder_free_ref(ref);
355b0502 4038 }
53413e7d 4039
355b0502 4040 binder_release_work(&proc->todo);
675d66b0 4041 binder_release_work(&proc->delivered_death);
355b0502 4042
355b0502 4043 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
467545d8 4044 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 4045 __func__, proc->pid, threads, nodes, incoming_refs,
467545d8 4046 outgoing_refs, active_transactions);
355b0502 4047
e482ec39 4048 binder_proc_dec_tmpref(proc);
355b0502
GKH
4049}
4050
4051static void binder_deferred_func(struct work_struct *work)
4052{
4053 struct binder_proc *proc;
4054 struct files_struct *files;
4055
4056 int defer;
10f62861 4057
355b0502 4058 do {
ec49bb00
TK
4059 binder_lock(__func__);
4060 mutex_lock(&binder_deferred_lock);
4061 if (!hlist_empty(&binder_deferred_list)) {
4062 proc = hlist_entry(binder_deferred_list.first,
355b0502
GKH
4063 struct binder_proc, deferred_work_node);
4064 hlist_del_init(&proc->deferred_work_node);
4065 defer = proc->deferred_work;
4066 proc->deferred_work = 0;
4067 } else {
4068 proc = NULL;
4069 defer = 0;
4070 }
ec49bb00 4071 mutex_unlock(&binder_deferred_lock);
355b0502
GKH
4072
4073 files = NULL;
4074 if (defer & BINDER_DEFERRED_PUT_FILES) {
4075 files = proc->files;
4076 if (files)
4077 proc->files = NULL;
4078 }
4079
4080 if (defer & BINDER_DEFERRED_FLUSH)
4081 binder_deferred_flush(proc);
4082
4083 if (defer & BINDER_DEFERRED_RELEASE)
4084 binder_deferred_release(proc); /* frees proc */
4085
ec49bb00 4086 binder_unlock(__func__);
355b0502
GKH
4087 if (files)
4088 put_files_struct(files);
4089 } while (proc);
4090}
ec49bb00 4091static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
355b0502
GKH
4092
4093static void
4094binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4095{
ec49bb00 4096 mutex_lock(&binder_deferred_lock);
355b0502
GKH
4097 proc->deferred_work |= defer;
4098 if (hlist_unhashed(&proc->deferred_work_node)) {
4099 hlist_add_head(&proc->deferred_work_node,
ec49bb00
TK
4100 &binder_deferred_list);
4101 queue_work(binder_deferred_workqueue, &binder_deferred_work);
355b0502 4102 }
ec49bb00 4103 mutex_unlock(&binder_deferred_lock);
355b0502
GKH
4104}
4105
5249f488
AH
4106static void print_binder_transaction(struct seq_file *m, const char *prefix,
4107 struct binder_transaction *t)
4108{
e482ec39 4109 spin_lock(&t->lock);
5249f488
AH
4110 seq_printf(m,
4111 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4112 prefix, t->debug_id, t,
4113 t->from ? t->from->proc->pid : 0,
4114 t->from ? t->from->pid : 0,
4115 t->to_proc ? t->to_proc->pid : 0,
4116 t->to_thread ? t->to_thread->pid : 0,
4117 t->code, t->flags, t->priority, t->need_reply);
e482ec39
TK
4118 spin_unlock(&t->lock);
4119
355b0502 4120 if (t->buffer == NULL) {
5249f488
AH
4121 seq_puts(m, " buffer free\n");
4122 return;
355b0502 4123 }
5249f488
AH
4124 if (t->buffer->target_node)
4125 seq_printf(m, " node %d",
4126 t->buffer->target_node->debug_id);
4127 seq_printf(m, " size %zd:%zd data %p\n",
4128 t->buffer->data_size, t->buffer->offsets_size,
4129 t->buffer->data);
355b0502
GKH
4130}
4131
5249f488
AH
4132static void print_binder_work(struct seq_file *m, const char *prefix,
4133 const char *transaction_prefix,
4134 struct binder_work *w)
355b0502
GKH
4135{
4136 struct binder_node *node;
4137 struct binder_transaction *t;
4138
4139 switch (w->type) {
4140 case BINDER_WORK_TRANSACTION:
4141 t = container_of(w, struct binder_transaction, work);
5249f488 4142 print_binder_transaction(m, transaction_prefix, t);
355b0502 4143 break;
3a822b33
TK
4144 case BINDER_WORK_RETURN_ERROR: {
4145 struct binder_error *e = container_of(
4146 w, struct binder_error, work);
4147
4148 seq_printf(m, "%stransaction error: %u\n",
4149 prefix, e->cmd);
4150 } break;
355b0502 4151 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 4152 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
4153 break;
4154 case BINDER_WORK_NODE:
4155 node = container_of(w, struct binder_node, work);
da49889d
AH
4156 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4157 prefix, node->debug_id,
4158 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
4159 break;
4160 case BINDER_WORK_DEAD_BINDER:
5249f488 4161 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
4162 break;
4163 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 4164 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
4165 break;
4166 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 4167 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
4168 break;
4169 default:
5249f488 4170 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
4171 break;
4172 }
355b0502
GKH
4173}
4174
5249f488
AH
4175static void print_binder_thread(struct seq_file *m,
4176 struct binder_thread *thread,
4177 int print_always)
355b0502
GKH
4178{
4179 struct binder_transaction *t;
4180 struct binder_work *w;
5249f488
AH
4181 size_t start_pos = m->count;
4182 size_t header_pos;
355b0502 4183
e482ec39 4184 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
afda44d0 4185 thread->pid, thread->looper,
e482ec39
TK
4186 thread->looper_need_return,
4187 atomic_read(&thread->tmp_ref));
5249f488 4188 header_pos = m->count;
355b0502
GKH
4189 t = thread->transaction_stack;
4190 while (t) {
355b0502 4191 if (t->from == thread) {
5249f488
AH
4192 print_binder_transaction(m,
4193 " outgoing transaction", t);
355b0502
GKH
4194 t = t->from_parent;
4195 } else if (t->to_thread == thread) {
5249f488
AH
4196 print_binder_transaction(m,
4197 " incoming transaction", t);
355b0502
GKH
4198 t = t->to_parent;
4199 } else {
5249f488 4200 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
4201 t = NULL;
4202 }
4203 }
4204 list_for_each_entry(w, &thread->todo, entry) {
5249f488 4205 print_binder_work(m, " ", " pending transaction", w);
355b0502 4206 }
5249f488
AH
4207 if (!print_always && m->count == header_pos)
4208 m->count = start_pos;
355b0502
GKH
4209}
4210
5249f488 4211static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
4212{
4213 struct binder_ref *ref;
355b0502
GKH
4214 struct binder_work *w;
4215 int count;
4216
4217 count = 0;
b67bfe0d 4218 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
4219 count++;
4220
96dd75d9 4221 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 4222 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
4223 node->has_strong_ref, node->has_weak_ref,
4224 node->local_strong_refs, node->local_weak_refs,
96dd75d9 4225 node->internal_strong_refs, count, node->tmp_refs);
355b0502 4226 if (count) {
5249f488 4227 seq_puts(m, " proc");
b67bfe0d 4228 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 4229 seq_printf(m, " %d", ref->proc->pid);
355b0502 4230 }
5249f488
AH
4231 seq_puts(m, "\n");
4232 list_for_each_entry(w, &node->async_todo, entry)
4233 print_binder_work(m, " ",
4234 " pending async transaction", w);
355b0502
GKH
4235}
4236
5249f488 4237static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 4238{
f7d87412
TK
4239 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4240 ref->data.debug_id, ref->data.desc,
4241 ref->node->proc ? "" : "dead ",
4242 ref->node->debug_id, ref->data.strong,
4243 ref->data.weak, ref->death);
355b0502
GKH
4244}
4245
5249f488
AH
4246static void print_binder_proc(struct seq_file *m,
4247 struct binder_proc *proc, int print_all)
355b0502
GKH
4248{
4249 struct binder_work *w;
4250 struct rb_node *n;
5249f488
AH
4251 size_t start_pos = m->count;
4252 size_t header_pos;
4253
4254 seq_printf(m, "proc %d\n", proc->pid);
8b980bee 4255 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
4256 header_pos = m->count;
4257
4258 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4259 print_binder_thread(m, rb_entry(n, struct binder_thread,
4260 rb_node), print_all);
4261 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
4262 struct binder_node *node = rb_entry(n, struct binder_node,
4263 rb_node);
4264 if (print_all || node->has_async_transaction)
5249f488 4265 print_binder_node(m, node);
355b0502
GKH
4266 }
4267 if (print_all) {
4268 for (n = rb_first(&proc->refs_by_desc);
5249f488 4269 n != NULL;
355b0502 4270 n = rb_next(n))
5249f488
AH
4271 print_binder_ref(m, rb_entry(n, struct binder_ref,
4272 rb_node_desc));
355b0502 4273 }
467545d8 4274 binder_alloc_print_allocated(m, &proc->alloc);
5249f488
AH
4275 list_for_each_entry(w, &proc->todo, entry)
4276 print_binder_work(m, " ", " pending transaction", w);
355b0502 4277 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 4278 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
4279 break;
4280 }
5249f488
AH
4281 if (!print_all && m->count == header_pos)
4282 m->count = start_pos;
355b0502
GKH
4283}
4284
167bccbd 4285static const char * const binder_return_strings[] = {
355b0502
GKH
4286 "BR_ERROR",
4287 "BR_OK",
4288 "BR_TRANSACTION",
4289 "BR_REPLY",
4290 "BR_ACQUIRE_RESULT",
4291 "BR_DEAD_REPLY",
4292 "BR_TRANSACTION_COMPLETE",
4293 "BR_INCREFS",
4294 "BR_ACQUIRE",
4295 "BR_RELEASE",
4296 "BR_DECREFS",
4297 "BR_ATTEMPT_ACQUIRE",
4298 "BR_NOOP",
4299 "BR_SPAWN_LOOPER",
4300 "BR_FINISHED",
4301 "BR_DEAD_BINDER",
4302 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4303 "BR_FAILED_REPLY"
4304};
4305
167bccbd 4306static const char * const binder_command_strings[] = {
355b0502
GKH
4307 "BC_TRANSACTION",
4308 "BC_REPLY",
4309 "BC_ACQUIRE_RESULT",
4310 "BC_FREE_BUFFER",
4311 "BC_INCREFS",
4312 "BC_ACQUIRE",
4313 "BC_RELEASE",
4314 "BC_DECREFS",
4315 "BC_INCREFS_DONE",
4316 "BC_ACQUIRE_DONE",
4317 "BC_ATTEMPT_ACQUIRE",
4318 "BC_REGISTER_LOOPER",
4319 "BC_ENTER_LOOPER",
4320 "BC_EXIT_LOOPER",
4321 "BC_REQUEST_DEATH_NOTIFICATION",
4322 "BC_CLEAR_DEATH_NOTIFICATION",
dd9bc4f9
MC
4323 "BC_DEAD_BINDER_DONE",
4324 "BC_TRANSACTION_SG",
4325 "BC_REPLY_SG",
355b0502
GKH
4326};
4327
167bccbd 4328static const char * const binder_objstat_strings[] = {
355b0502
GKH
4329 "proc",
4330 "thread",
4331 "node",
4332 "ref",
4333 "death",
4334 "transaction",
4335 "transaction_complete"
4336};
4337
5249f488 4338static void print_binder_stats(struct seq_file *m, const char *prefix,
ec49bb00 4339 struct binder_stats *stats)
355b0502
GKH
4340{
4341 int i;
4342
4343 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 4344 ARRAY_SIZE(binder_command_strings));
355b0502 4345 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
f716ecfc
BJS
4346 int temp = atomic_read(&stats->bc[i]);
4347
4348 if (temp)
5249f488 4349 seq_printf(m, "%s%s: %d\n", prefix,
f716ecfc 4350 binder_command_strings[i], temp);
355b0502
GKH
4351 }
4352
4353 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 4354 ARRAY_SIZE(binder_return_strings));
355b0502 4355 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
f716ecfc
BJS
4356 int temp = atomic_read(&stats->br[i]);
4357
4358 if (temp)
5249f488 4359 seq_printf(m, "%s%s: %d\n", prefix,
f716ecfc 4360 binder_return_strings[i], temp);
355b0502
GKH
4361 }
4362
ec49bb00 4363 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4364 ARRAY_SIZE(binder_objstat_strings));
ec49bb00
TK
4365 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
4366 ARRAY_SIZE(stats->obj_deleted));
4367 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
f716ecfc
BJS
4368 int created = atomic_read(&stats->obj_created[i]);
4369 int deleted = atomic_read(&stats->obj_deleted[i]);
4370
4371 if (created || deleted)
4372 seq_printf(m, "%s%s: active %d total %d\n",
4373 prefix,
ec49bb00 4374 binder_objstat_strings[i],
f716ecfc
BJS
4375 created - deleted,
4376 created);
355b0502 4377 }
467545d8
TK
4378}
4379
5249f488
AH
4380static void print_binder_proc_stats(struct seq_file *m,
4381 struct binder_proc *proc)
355b0502
GKH
4382{
4383 struct binder_work *w;
4384 struct rb_node *n;
4385 int count, strong, weak;
4386
5249f488 4387 seq_printf(m, "proc %d\n", proc->pid);
8b980bee 4388 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
4389 count = 0;
4390 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4391 count++;
5249f488
AH
4392 seq_printf(m, " threads: %d\n", count);
4393 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
4394 " ready threads %d\n"
4395 " free async space %zd\n", proc->requested_threads,
4396 proc->requested_threads_started, proc->max_threads,
467545d8
TK
4397 proc->ready_threads,
4398 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
4399 count = 0;
4400 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4401 count++;
5249f488 4402 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
4403 count = 0;
4404 strong = 0;
4405 weak = 0;
4406 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4407 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4408 rb_node_desc);
4409 count++;
f7d87412
TK
4410 strong += ref->data.strong;
4411 weak += ref->data.weak;
355b0502 4412 }
5249f488 4413 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 4414
467545d8 4415 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 4416 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
4417
4418 count = 0;
4419 list_for_each_entry(w, &proc->todo, entry) {
4420 switch (w->type) {
4421 case BINDER_WORK_TRANSACTION:
4422 count++;
4423 break;
4424 default:
4425 break;
4426 }
4427 }
5249f488 4428 seq_printf(m, " pending transactions: %d\n", count);
355b0502 4429
ec49bb00 4430 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
4431}
4432
4433
5249f488 4434static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
4435{
4436 struct binder_proc *proc;
355b0502 4437 struct binder_node *node;
355b0502 4438
a19f3efc 4439 binder_lock(__func__);
355b0502 4440
ec49bb00 4441 seq_puts(m, "binder state:\n");
355b0502 4442
3490fdcb 4443 spin_lock(&binder_dead_nodes_lock);
ec49bb00
TK
4444 if (!hlist_empty(&binder_dead_nodes))
4445 seq_puts(m, "dead nodes:\n");
4446 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
4447 print_binder_node(m, node);
3490fdcb 4448 spin_unlock(&binder_dead_nodes_lock);
d6bbb327 4449
3490fdcb 4450 mutex_lock(&binder_procs_lock);
ec49bb00
TK
4451 hlist_for_each_entry(proc, &binder_procs, proc_node)
4452 print_binder_proc(m, proc, 1);
3490fdcb 4453 mutex_unlock(&binder_procs_lock);
a19f3efc 4454 binder_unlock(__func__);
5249f488 4455 return 0;
355b0502
GKH
4456}
4457
5249f488 4458static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
4459{
4460 struct binder_proc *proc;
355b0502 4461
a19f3efc 4462 binder_lock(__func__);
355b0502 4463
5249f488 4464 seq_puts(m, "binder stats:\n");
355b0502 4465
ec49bb00 4466 print_binder_stats(m, "", &binder_stats);
355b0502 4467
3490fdcb 4468 mutex_lock(&binder_procs_lock);
ec49bb00
TK
4469 hlist_for_each_entry(proc, &binder_procs, proc_node)
4470 print_binder_proc_stats(m, proc);
3490fdcb 4471 mutex_unlock(&binder_procs_lock);
a19f3efc 4472 binder_unlock(__func__);
5249f488 4473 return 0;
355b0502
GKH
4474}
4475
5249f488 4476static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
4477{
4478 struct binder_proc *proc;
355b0502 4479
a19f3efc 4480 binder_lock(__func__);
d6bbb327 4481
ec49bb00 4482 seq_puts(m, "binder transactions:\n");
3490fdcb 4483 mutex_lock(&binder_procs_lock);
ec49bb00
TK
4484 hlist_for_each_entry(proc, &binder_procs, proc_node)
4485 print_binder_proc(m, proc, 0);
3490fdcb 4486 mutex_unlock(&binder_procs_lock);
a19f3efc 4487 binder_unlock(__func__);
5249f488 4488 return 0;
355b0502
GKH
4489}
4490
5249f488 4491static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 4492{
aa29c32d 4493 struct binder_proc *itr;
8b980bee 4494 int pid = (unsigned long)m->private;
355b0502 4495
a19f3efc 4496 binder_lock(__func__);
aa29c32d 4497
3490fdcb 4498 mutex_lock(&binder_procs_lock);
ec49bb00
TK
4499 hlist_for_each_entry(itr, &binder_procs, proc_node) {
4500 if (itr->pid == pid) {
4501 seq_puts(m, "binder proc state:\n");
4502 print_binder_proc(m, itr, 1);
aa29c32d
RA
4503 }
4504 }
3490fdcb
TK
4505 mutex_unlock(&binder_procs_lock);
4506
a19f3efc 4507 binder_unlock(__func__);
5249f488 4508 return 0;
355b0502
GKH
4509}
4510
5249f488 4511static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
4512 struct binder_transaction_log_entry *e)
4513{
0f32aeb3
TK
4514 int debug_id = READ_ONCE(e->debug_id_done);
4515 /*
4516 * read barrier to guarantee debug_id_done read before
4517 * we print the log values
4518 */
4519 smp_rmb();
5249f488 4520 seq_printf(m,
0f32aeb3 4521 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
4522 e->debug_id, (e->call_type == 2) ? "reply" :
4523 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
8b980bee 4524 e->from_thread, e->to_proc, e->to_thread, e->context_name,
0a0fdc1f
TK
4525 e->to_node, e->target_handle, e->data_size, e->offsets_size,
4526 e->return_error, e->return_error_param,
4527 e->return_error_line);
0f32aeb3
TK
4528 /*
4529 * read-barrier to guarantee read of debug_id_done after
4530 * done printing the fields of the entry
4531 */
4532 smp_rmb();
4533 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
4534 "\n" : " (incomplete)\n");
355b0502
GKH
4535}
4536
ec49bb00 4537static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 4538{
ec49bb00 4539 struct binder_transaction_log *log = m->private;
0f32aeb3
TK
4540 unsigned int log_cur = atomic_read(&log->cur);
4541 unsigned int count;
4542 unsigned int cur;
355b0502 4543 int i;
ec49bb00 4544
0f32aeb3
TK
4545 count = log_cur + 1;
4546 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
4547 0 : count % ARRAY_SIZE(log->entry);
4548 if (count > ARRAY_SIZE(log->entry) || log->full)
4549 count = ARRAY_SIZE(log->entry);
4550 for (i = 0; i < count; i++) {
4551 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
4552
4553 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 4554 }
5249f488 4555 return 0;
355b0502
GKH
4556}
4557
4558static const struct file_operations binder_fops = {
4559 .owner = THIS_MODULE,
4560 .poll = binder_poll,
4561 .unlocked_ioctl = binder_ioctl,
da49889d 4562 .compat_ioctl = binder_ioctl,
355b0502
GKH
4563 .mmap = binder_mmap,
4564 .open = binder_open,
4565 .flush = binder_flush,
4566 .release = binder_release,
4567};
4568
5249f488
AH
4569BINDER_DEBUG_ENTRY(state);
4570BINDER_DEBUG_ENTRY(stats);
4571BINDER_DEBUG_ENTRY(transactions);
4572BINDER_DEBUG_ENTRY(transaction_log);
4573
04e3812e
MC
4574static int __init init_binder_device(const char *name)
4575{
4576 int ret;
4577 struct binder_device *binder_device;
4578
4579 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4580 if (!binder_device)
4581 return -ENOMEM;
4582
4583 binder_device->miscdev.fops = &binder_fops;
4584 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4585 binder_device->miscdev.name = name;
4586
ec49bb00
TK
4587 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4588 binder_device->context.name = name;
3490fdcb 4589 mutex_init(&binder_device->context.context_mgr_node_lock);
04e3812e
MC
4590
4591 ret = misc_register(&binder_device->miscdev);
4592 if (ret < 0) {
ec49bb00
TK
4593 kfree(binder_device);
4594 return ret;
04e3812e
MC
4595 }
4596
4597 hlist_add_head(&binder_device->hlist, &binder_devices);
4598
4599 return ret;
4600}
4601
355b0502
GKH
4602static int __init binder_init(void)
4603{
ec49bb00 4604 int ret;
04e3812e
MC
4605 char *device_name, *device_names;
4606 struct binder_device *device;
4607 struct hlist_node *tmp;
355b0502 4608
0f32aeb3
TK
4609 atomic_set(&binder_transaction_log.cur, ~0U);
4610 atomic_set(&binder_transaction_log_failed.cur, ~0U);
ec49bb00
TK
4611 binder_deferred_workqueue = create_singlethread_workqueue("binder");
4612 if (!binder_deferred_workqueue)
3c762a49
AH
4613 return -ENOMEM;
4614
16b66554
AH
4615 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4616 if (binder_debugfs_dir_entry_root)
4617 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4618 binder_debugfs_dir_entry_root);
04e3812e 4619
16b66554
AH
4620 if (binder_debugfs_dir_entry_root) {
4621 debugfs_create_file("state",
4622 S_IRUGO,
4623 binder_debugfs_dir_entry_root,
4624 NULL,
4625 &binder_state_fops);
4626 debugfs_create_file("stats",
4627 S_IRUGO,
4628 binder_debugfs_dir_entry_root,
4629 NULL,
4630 &binder_stats_fops);
4631 debugfs_create_file("transactions",
4632 S_IRUGO,
4633 binder_debugfs_dir_entry_root,
4634 NULL,
4635 &binder_transactions_fops);
4636 debugfs_create_file("transaction_log",
4637 S_IRUGO,
4638 binder_debugfs_dir_entry_root,
ec49bb00 4639 &binder_transaction_log,
16b66554
AH
4640 &binder_transaction_log_fops);
4641 debugfs_create_file("failed_transaction_log",
4642 S_IRUGO,
4643 binder_debugfs_dir_entry_root,
ec49bb00
TK
4644 &binder_transaction_log_failed,
4645 &binder_transaction_log_fops);
4646 }
4647
4648 /*
4649 * Copy the module_parameter string, because we don't want to
4650 * tokenize it in-place.
4651 */
4652 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4653 if (!device_names) {
4654 ret = -ENOMEM;
4655 goto err_alloc_device_names_failed;
4656 }
4657 strcpy(device_names, binder_devices_param);
4658
4659 while ((device_name = strsep(&device_names, ","))) {
4660 ret = init_binder_device(device_name);
4661 if (ret)
4662 goto err_init_binder_device_failed;
04e3812e
MC
4663 }
4664
4665 return ret;
4666
4667err_init_binder_device_failed:
4668 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4669 misc_deregister(&device->miscdev);
4670 hlist_del(&device->hlist);
ec49bb00 4671 kfree(device);
04e3812e 4672 }
ec49bb00
TK
4673err_alloc_device_names_failed:
4674 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
4675
4676 destroy_workqueue(binder_deferred_workqueue);
04e3812e 4677
355b0502
GKH
4678 return ret;
4679}
4680
4681device_initcall(binder_init);
4682
975a1ac9
AH
4683#define CREATE_TRACE_POINTS
4684#include "binder_trace.h"
4685
355b0502 4686MODULE_LICENSE("GPL v2");