binder: add spinlocks to protect todo lists
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
9630fe88
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->nodes) and all todo lists associated
32 * with the binder_proc (proc->todo, thread->todo,
33 * proc->delivered_death and node->async_todo).
34 * binder_inner_proc_lock() and binder_inner_proc_unlock()
35 * are used to acq/rel
36 *
37 * Any lock under procA must never be nested under any lock at the same
38 * level or below on procB.
39 *
40 * Functions that require a lock held on entry indicate which lock
41 * in the suffix of the function name:
42 *
43 * foo_olocked() : requires node->outer_lock
44 * foo_nlocked() : requires node->lock
45 * foo_ilocked() : requires proc->inner_lock
46 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
47 * foo_nilocked(): requires node->lock and proc->inner_lock
48 * ...
49 */
50
56b468fc
AS
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
355b0502
GKH
53#include <asm/cacheflush.h>
54#include <linux/fdtable.h>
55#include <linux/file.h>
e2610b26 56#include <linux/freezer.h>
355b0502
GKH
57#include <linux/fs.h>
58#include <linux/list.h>
59#include <linux/miscdevice.h>
355b0502
GKH
60#include <linux/module.h>
61#include <linux/mutex.h>
62#include <linux/nsproxy.h>
63#include <linux/poll.h>
16b66554 64#include <linux/debugfs.h>
355b0502 65#include <linux/rbtree.h>
3f07c014 66#include <linux/sched/signal.h>
6e84f315 67#include <linux/sched/mm.h>
5249f488 68#include <linux/seq_file.h>
355b0502 69#include <linux/uaccess.h>
17cf22c3 70#include <linux/pid_namespace.h>
79af7307 71#include <linux/security.h>
9630fe88 72#include <linux/spinlock.h>
355b0502 73
9246a4a9
GKH
74#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
75#define BINDER_IPC_32BIT 1
76#endif
77
78#include <uapi/linux/android/binder.h>
0c972a05 79#include "binder_alloc.h"
975a1ac9 80#include "binder_trace.h"
355b0502 81
975a1ac9 82static DEFINE_MUTEX(binder_main_lock);
c44b1231
TK
83
84static HLIST_HEAD(binder_deferred_list);
355b0502
GKH
85static DEFINE_MUTEX(binder_deferred_lock);
86
ac4812c5 87static HLIST_HEAD(binder_devices);
355b0502 88static HLIST_HEAD(binder_procs);
c44b1231
TK
89static DEFINE_MUTEX(binder_procs_lock);
90
355b0502 91static HLIST_HEAD(binder_dead_nodes);
c44b1231 92static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 93
16b66554
AH
94static struct dentry *binder_debugfs_dir_entry_root;
95static struct dentry *binder_debugfs_dir_entry_proc;
656a800a 96static atomic_t binder_last_id;
355b0502 97
5249f488
AH
98#define BINDER_DEBUG_ENTRY(name) \
99static int binder_##name##_open(struct inode *inode, struct file *file) \
100{ \
16b66554 101 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
102} \
103\
104static const struct file_operations binder_##name##_fops = { \
105 .owner = THIS_MODULE, \
106 .open = binder_##name##_open, \
107 .read = seq_read, \
108 .llseek = seq_lseek, \
109 .release = single_release, \
110}
111
112static int binder_proc_show(struct seq_file *m, void *unused);
113BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
114
115/* This is only defined in include/asm-arm/sizes.h */
116#ifndef SZ_1K
117#define SZ_1K 0x400
118#endif
119
120#ifndef SZ_4M
121#define SZ_4M 0x400000
122#endif
123
124#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
125
126#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
127
128enum {
129 BINDER_DEBUG_USER_ERROR = 1U << 0,
130 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
131 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
132 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
133 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
134 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
135 BINDER_DEBUG_READ_WRITE = 1U << 6,
136 BINDER_DEBUG_USER_REFS = 1U << 7,
137 BINDER_DEBUG_THREADS = 1U << 8,
138 BINDER_DEBUG_TRANSACTION = 1U << 9,
139 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
140 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
141 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
19c98724 142 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
9630fe88 143 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
144};
145static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
146 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
147module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
148
ac4812c5
MC
149static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
150module_param_named(devices, binder_devices_param, charp, 0444);
151
355b0502
GKH
152static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
153static int binder_stop_on_user_error;
154
155static int binder_set_stop_on_user_error(const char *val,
156 struct kernel_param *kp)
157{
158 int ret;
10f62861 159
355b0502
GKH
160 ret = param_set_int(val, kp);
161 if (binder_stop_on_user_error < 2)
162 wake_up(&binder_user_error_wait);
163 return ret;
164}
165module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
166 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
167
168#define binder_debug(mask, x...) \
169 do { \
170 if (binder_debug_mask & mask) \
258767fe 171 pr_info(x); \
355b0502
GKH
172 } while (0)
173
174#define binder_user_error(x...) \
175 do { \
176 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 177 pr_info(x); \
355b0502
GKH
178 if (binder_stop_on_user_error) \
179 binder_stop_on_user_error = 2; \
180 } while (0)
181
feba3900
MC
182#define to_flat_binder_object(hdr) \
183 container_of(hdr, struct flat_binder_object, hdr)
184
185#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
186
7980240b
MC
187#define to_binder_buffer_object(hdr) \
188 container_of(hdr, struct binder_buffer_object, hdr)
189
def95c73
MC
190#define to_binder_fd_array_object(hdr) \
191 container_of(hdr, struct binder_fd_array_object, hdr)
192
355b0502
GKH
193enum binder_stat_types {
194 BINDER_STAT_PROC,
195 BINDER_STAT_THREAD,
196 BINDER_STAT_NODE,
197 BINDER_STAT_REF,
198 BINDER_STAT_DEATH,
199 BINDER_STAT_TRANSACTION,
200 BINDER_STAT_TRANSACTION_COMPLETE,
201 BINDER_STAT_COUNT
202};
203
204struct binder_stats {
0953c797
BJS
205 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
206 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
207 atomic_t obj_created[BINDER_STAT_COUNT];
208 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
209};
210
211static struct binder_stats binder_stats;
212
213static inline void binder_stats_deleted(enum binder_stat_types type)
214{
0953c797 215 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
216}
217
218static inline void binder_stats_created(enum binder_stat_types type)
219{
0953c797 220 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
221}
222
223struct binder_transaction_log_entry {
224 int debug_id;
d99c7333 225 int debug_id_done;
355b0502
GKH
226 int call_type;
227 int from_proc;
228 int from_thread;
229 int target_handle;
230 int to_proc;
231 int to_thread;
232 int to_node;
233 int data_size;
234 int offsets_size;
57ada2fb
TK
235 int return_error_line;
236 uint32_t return_error;
237 uint32_t return_error_param;
14db3181 238 const char *context_name;
355b0502
GKH
239};
240struct binder_transaction_log {
d99c7333
TK
241 atomic_t cur;
242 bool full;
355b0502
GKH
243 struct binder_transaction_log_entry entry[32];
244};
245static struct binder_transaction_log binder_transaction_log;
246static struct binder_transaction_log binder_transaction_log_failed;
247
248static struct binder_transaction_log_entry *binder_transaction_log_add(
249 struct binder_transaction_log *log)
250{
251 struct binder_transaction_log_entry *e;
d99c7333 252 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 253
d99c7333 254 if (cur >= ARRAY_SIZE(log->entry))
355b0502 255 log->full = 1;
d99c7333
TK
256 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
257 WRITE_ONCE(e->debug_id_done, 0);
258 /*
259 * write-barrier to synchronize access to e->debug_id_done.
260 * We make sure the initialized 0 value is seen before
261 * memset() other fields are zeroed by memset.
262 */
263 smp_wmb();
264 memset(e, 0, sizeof(*e));
355b0502
GKH
265 return e;
266}
267
342e5c90
MC
268struct binder_context {
269 struct binder_node *binder_context_mgr_node;
c44b1231
TK
270 struct mutex context_mgr_node_lock;
271
342e5c90 272 kuid_t binder_context_mgr_uid;
14db3181 273 const char *name;
342e5c90
MC
274};
275
ac4812c5
MC
276struct binder_device {
277 struct hlist_node hlist;
278 struct miscdevice miscdev;
279 struct binder_context context;
342e5c90
MC
280};
281
72196393
TK
282/**
283 * struct binder_work - work enqueued on a worklist
284 * @entry: node enqueued on list
285 * @type: type of work to be performed
286 *
287 * There are separate work lists for proc, thread, and node (async).
288 */
355b0502
GKH
289struct binder_work {
290 struct list_head entry;
72196393 291
355b0502
GKH
292 enum {
293 BINDER_WORK_TRANSACTION = 1,
294 BINDER_WORK_TRANSACTION_COMPLETE,
26549d17 295 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
296 BINDER_WORK_NODE,
297 BINDER_WORK_DEAD_BINDER,
298 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
299 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 } type;
301};
302
26549d17
TK
303struct binder_error {
304 struct binder_work work;
305 uint32_t cmd;
306};
307
9630fe88
TK
308/**
309 * struct binder_node - binder node bookkeeping
310 * @debug_id: unique ID for debugging
311 * (invariant after initialized)
312 * @lock: lock for node fields
313 * @work: worklist element for node work
72196393 314 * (protected by @proc->inner_lock)
9630fe88
TK
315 * @rb_node: element for proc->nodes tree
316 * @dead_node: element for binder_dead_nodes list
317 * (protected by binder_dead_nodes_lock)
318 * @proc: binder_proc that owns this node
319 * (invariant after initialized)
320 * @refs: list of references on this node
321 * @internal_strong_refs: used to take strong references when
322 * initiating a transaction
ed29721e
TK
323 * (protected by @proc->inner_lock if @proc
324 * and by @lock)
9630fe88 325 * @local_weak_refs: weak user refs from local process
ed29721e
TK
326 * (protected by @proc->inner_lock if @proc
327 * and by @lock)
9630fe88 328 * @local_strong_refs: strong user refs from local process
ed29721e
TK
329 * (protected by @proc->inner_lock if @proc
330 * and by @lock)
9630fe88 331 * @tmp_refs: temporary kernel refs
ed29721e
TK
332 * (protected by @proc->inner_lock while @proc
333 * is valid, and by binder_dead_nodes_lock
334 * if @proc is NULL. During inc/dec and node release
335 * it is also protected by @lock to provide safety
336 * as the node dies and @proc becomes NULL)
9630fe88
TK
337 * @ptr: userspace pointer for node
338 * (invariant, no lock needed)
339 * @cookie: userspace cookie for node
340 * (invariant, no lock needed)
341 * @has_strong_ref: userspace notified of strong ref
ed29721e
TK
342 * (protected by @proc->inner_lock if @proc
343 * and by @lock)
9630fe88 344 * @pending_strong_ref: userspace has acked notification of strong ref
ed29721e
TK
345 * (protected by @proc->inner_lock if @proc
346 * and by @lock)
9630fe88 347 * @has_weak_ref: userspace notified of weak ref
ed29721e
TK
348 * (protected by @proc->inner_lock if @proc
349 * and by @lock)
9630fe88 350 * @pending_weak_ref: userspace has acked notification of weak ref
ed29721e
TK
351 * (protected by @proc->inner_lock if @proc
352 * and by @lock)
9630fe88
TK
353 * @has_async_transaction: async transaction to node in progress
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
72196393 359 * (protected by @proc->inner_lock)
9630fe88
TK
360 *
361 * Bookkeeping structure for binder nodes.
362 */
355b0502
GKH
363struct binder_node {
364 int debug_id;
9630fe88 365 spinlock_t lock;
355b0502
GKH
366 struct binder_work work;
367 union {
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
370 };
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
374 int local_weak_refs;
375 int local_strong_refs;
adc18842 376 int tmp_refs;
da49889d
AH
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
ed29721e
TK
379 struct {
380 /*
381 * bitfield elements protected by
382 * proc inner_lock
383 */
384 u8 has_strong_ref:1;
385 u8 pending_strong_ref:1;
386 u8 has_weak_ref:1;
387 u8 pending_weak_ref:1;
388 };
389 struct {
390 /*
391 * invariant after initialization
392 */
393 u8 accept_fds:1;
394 u8 min_priority;
395 };
396 bool has_async_transaction;
355b0502
GKH
397 struct list_head async_todo;
398};
399
400struct binder_ref_death {
72196393
TK
401 /**
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
405 */
355b0502 406 struct binder_work work;
da49889d 407 binder_uintptr_t cookie;
355b0502
GKH
408};
409
372e3147
TK
410/**
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
416 *
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
421 */
422struct binder_ref_data {
423 int debug_id;
424 uint32_t desc;
425 int strong;
426 int weak;
427};
428
429/**
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
435 * @proc: binder_proc containing ref
436 * @node: binder_node of target node. When cleaning up a
437 * ref for deletion in binder_cleanup_ref, a non-NULL
438 * @node indicates the node must be freed
439 * @death: pointer to death notification (ref_death) if requested
440 *
441 * Structure to track references from procA to target node (on procB). This
442 * structure is unsafe to access without holding @proc->outer_lock.
443 */
355b0502
GKH
444struct binder_ref {
445 /* Lookups needed: */
446 /* node + proc => ref (transaction) */
447 /* desc + proc => ref (transaction, inc/dec ref) */
448 /* node => refs + procs (proc exit) */
372e3147 449 struct binder_ref_data data;
355b0502
GKH
450 struct rb_node rb_node_desc;
451 struct rb_node rb_node_node;
452 struct hlist_node node_entry;
453 struct binder_proc *proc;
454 struct binder_node *node;
355b0502
GKH
455 struct binder_ref_death *death;
456};
457
355b0502
GKH
458enum binder_deferred_state {
459 BINDER_DEFERRED_PUT_FILES = 0x01,
460 BINDER_DEFERRED_FLUSH = 0x02,
461 BINDER_DEFERRED_RELEASE = 0x04,
462};
463
9630fe88
TK
464/**
465 * struct binder_proc - binder process bookkeeping
466 * @proc_node: element for binder_procs list
467 * @threads: rbtree of binder_threads in this proc
468 * @nodes: rbtree of binder nodes associated with
469 * this proc ordered by node->ptr
470 * @refs_by_desc: rbtree of refs ordered by ref->desc
471 * @refs_by_node: rbtree of refs ordered by ref->node
472 * @pid PID of group_leader of process
473 * (invariant after initialized)
474 * @tsk task_struct for group_leader of process
475 * (invariant after initialized)
476 * @files files_struct for process
477 * (invariant after initialized)
478 * @deferred_work_node: element for binder_deferred_list
479 * (protected by binder_deferred_lock)
480 * @deferred_work: bitmap of deferred work to perform
481 * (protected by binder_deferred_lock)
482 * @is_dead: process is dead and awaiting free
483 * when outstanding transactions are cleaned up
484 * @todo: list of work for this process
72196393 485 * (protected by @inner_lock)
9630fe88
TK
486 * @wait: wait queue head to wait for proc work
487 * (invariant after initialized)
488 * @stats: per-process binder statistics
489 * (atomics, no lock needed)
490 * @delivered_death: list of delivered death notification
72196393 491 * (protected by @inner_lock)
9630fe88
TK
492 * @max_threads: cap on number of binder threads
493 * @requested_threads: number of binder threads requested but not
494 * yet started. In current implementation, can
495 * only be 0 or 1.
496 * @requested_threads_started: number binder threads started
497 * @ready_threads: number of threads waiting for proc work
498 * @tmp_ref: temporary reference to indicate proc is in use
499 * @default_priority: default scheduler priority
500 * (invariant after initialized)
501 * @debugfs_entry: debugfs node
502 * @alloc: binder allocator bookkeeping
503 * @context: binder_context for this proc
504 * (invariant after initialized)
505 * @inner_lock: can nest under outer_lock and/or node lock
506 * @outer_lock: no nesting under innor or node lock
507 * Lock order: 1) outer, 2) node, 3) inner
508 *
509 * Bookkeeping structure for binder processes
510 */
355b0502
GKH
511struct binder_proc {
512 struct hlist_node proc_node;
513 struct rb_root threads;
514 struct rb_root nodes;
515 struct rb_root refs_by_desc;
516 struct rb_root refs_by_node;
517 int pid;
355b0502
GKH
518 struct task_struct *tsk;
519 struct files_struct *files;
520 struct hlist_node deferred_work_node;
521 int deferred_work;
7a4408c6 522 bool is_dead;
355b0502 523
355b0502
GKH
524 struct list_head todo;
525 wait_queue_head_t wait;
526 struct binder_stats stats;
527 struct list_head delivered_death;
528 int max_threads;
529 int requested_threads;
530 int requested_threads_started;
531 int ready_threads;
7a4408c6 532 int tmp_ref;
355b0502 533 long default_priority;
16b66554 534 struct dentry *debugfs_entry;
fdfb4a99 535 struct binder_alloc alloc;
342e5c90 536 struct binder_context *context;
9630fe88
TK
537 spinlock_t inner_lock;
538 spinlock_t outer_lock;
355b0502
GKH
539};
540
541enum {
542 BINDER_LOOPER_STATE_REGISTERED = 0x01,
543 BINDER_LOOPER_STATE_ENTERED = 0x02,
544 BINDER_LOOPER_STATE_EXITED = 0x04,
545 BINDER_LOOPER_STATE_INVALID = 0x08,
546 BINDER_LOOPER_STATE_WAITING = 0x10,
355b0502
GKH
547};
548
9630fe88
TK
549/**
550 * struct binder_thread - binder thread bookkeeping
551 * @proc: binder process for this thread
552 * (invariant after initialization)
553 * @rb_node: element for proc->threads rbtree
554 * @pid: PID for this thread
555 * (invariant after initialization)
556 * @looper: bitmap of looping state
557 * (only accessed by this thread)
558 * @looper_needs_return: looping thread needs to exit driver
559 * (no lock needed)
560 * @transaction_stack: stack of in-progress transactions for this thread
561 * @todo: list of work to do for this thread
72196393 562 * (protected by @proc->inner_lock)
9630fe88
TK
563 * @return_error: transaction errors reported by this thread
564 * (only accessed by this thread)
565 * @reply_error: transaction errors reported by target thread
566 * @wait: wait queue for thread work
567 * @stats: per-thread statistics
568 * (atomics, no lock needed)
569 * @tmp_ref: temporary reference to indicate thread is in use
570 * (atomic since @proc->inner_lock cannot
571 * always be acquired)
572 * @is_dead: thread is dead and awaiting free
573 * when outstanding transactions are cleaned up
574 *
575 * Bookkeeping structure for binder threads.
576 */
355b0502
GKH
577struct binder_thread {
578 struct binder_proc *proc;
579 struct rb_node rb_node;
580 int pid;
08dabcee
TK
581 int looper; /* only modified by this thread */
582 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
583 struct binder_transaction *transaction_stack;
584 struct list_head todo;
26549d17
TK
585 struct binder_error return_error;
586 struct binder_error reply_error;
355b0502
GKH
587 wait_queue_head_t wait;
588 struct binder_stats stats;
7a4408c6
TK
589 atomic_t tmp_ref;
590 bool is_dead;
355b0502
GKH
591};
592
593struct binder_transaction {
594 int debug_id;
595 struct binder_work work;
596 struct binder_thread *from;
597 struct binder_transaction *from_parent;
598 struct binder_proc *to_proc;
599 struct binder_thread *to_thread;
600 struct binder_transaction *to_parent;
601 unsigned need_reply:1;
602 /* unsigned is_dead:1; */ /* not used at the moment */
603
604 struct binder_buffer *buffer;
605 unsigned int code;
606 unsigned int flags;
607 long priority;
608 long saved_priority;
4a2ebb93 609 kuid_t sender_euid;
7a4408c6
TK
610 /**
611 * @lock: protects @from, @to_proc, and @to_thread
612 *
613 * @from, @to_proc, and @to_thread can be set to NULL
614 * during thread teardown
615 */
616 spinlock_t lock;
355b0502
GKH
617};
618
9630fe88
TK
619/**
620 * binder_proc_lock() - Acquire outer lock for given binder_proc
621 * @proc: struct binder_proc to acquire
622 *
623 * Acquires proc->outer_lock. Used to protect binder_ref
624 * structures associated with the given proc.
625 */
626#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
627static void
628_binder_proc_lock(struct binder_proc *proc, int line)
629{
630 binder_debug(BINDER_DEBUG_SPINLOCKS,
631 "%s: line=%d\n", __func__, line);
632 spin_lock(&proc->outer_lock);
633}
634
635/**
636 * binder_proc_unlock() - Release spinlock for given binder_proc
637 * @proc: struct binder_proc to acquire
638 *
639 * Release lock acquired via binder_proc_lock()
640 */
641#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
642static void
643_binder_proc_unlock(struct binder_proc *proc, int line)
644{
645 binder_debug(BINDER_DEBUG_SPINLOCKS,
646 "%s: line=%d\n", __func__, line);
647 spin_unlock(&proc->outer_lock);
648}
649
650/**
651 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
652 * @proc: struct binder_proc to acquire
653 *
654 * Acquires proc->inner_lock. Used to protect todo lists
655 */
656#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
657static void
658_binder_inner_proc_lock(struct binder_proc *proc, int line)
659{
660 binder_debug(BINDER_DEBUG_SPINLOCKS,
661 "%s: line=%d\n", __func__, line);
662 spin_lock(&proc->inner_lock);
663}
664
665/**
666 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
667 * @proc: struct binder_proc to acquire
668 *
669 * Release lock acquired via binder_inner_proc_lock()
670 */
671#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
672static void
673_binder_inner_proc_unlock(struct binder_proc *proc, int line)
674{
675 binder_debug(BINDER_DEBUG_SPINLOCKS,
676 "%s: line=%d\n", __func__, line);
677 spin_unlock(&proc->inner_lock);
678}
679
680/**
681 * binder_node_lock() - Acquire spinlock for given binder_node
682 * @node: struct binder_node to acquire
683 *
684 * Acquires node->lock. Used to protect binder_node fields
685 */
686#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
687static void
688_binder_node_lock(struct binder_node *node, int line)
689{
690 binder_debug(BINDER_DEBUG_SPINLOCKS,
691 "%s: line=%d\n", __func__, line);
692 spin_lock(&node->lock);
693}
694
695/**
696 * binder_node_unlock() - Release spinlock for given binder_proc
697 * @node: struct binder_node to acquire
698 *
699 * Release lock acquired via binder_node_lock()
700 */
701#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
702static void
703_binder_node_unlock(struct binder_node *node, int line)
704{
705 binder_debug(BINDER_DEBUG_SPINLOCKS,
706 "%s: line=%d\n", __func__, line);
707 spin_unlock(&node->lock);
708}
709
72196393
TK
710static bool binder_worklist_empty_ilocked(struct list_head *list)
711{
712 return list_empty(list);
713}
714
715/**
716 * binder_worklist_empty() - Check if no items on the work list
717 * @proc: binder_proc associated with list
718 * @list: list to check
719 *
720 * Return: true if there are no items on list, else false
721 */
722static bool binder_worklist_empty(struct binder_proc *proc,
723 struct list_head *list)
724{
725 bool ret;
726
727 binder_inner_proc_lock(proc);
728 ret = binder_worklist_empty_ilocked(list);
729 binder_inner_proc_unlock(proc);
730 return ret;
731}
732
733static void
734binder_enqueue_work_ilocked(struct binder_work *work,
735 struct list_head *target_list)
736{
737 BUG_ON(target_list == NULL);
738 BUG_ON(work->entry.next && !list_empty(&work->entry));
739 list_add_tail(&work->entry, target_list);
740}
741
742/**
743 * binder_enqueue_work() - Add an item to the work list
744 * @proc: binder_proc associated with list
745 * @work: struct binder_work to add to list
746 * @target_list: list to add work to
747 *
748 * Adds the work to the specified list. Asserts that work
749 * is not already on a list.
750 */
751static void
752binder_enqueue_work(struct binder_proc *proc,
753 struct binder_work *work,
754 struct list_head *target_list)
755{
756 binder_inner_proc_lock(proc);
757 binder_enqueue_work_ilocked(work, target_list);
758 binder_inner_proc_unlock(proc);
759}
760
761static void
762binder_dequeue_work_ilocked(struct binder_work *work)
763{
764 list_del_init(&work->entry);
765}
766
767/**
768 * binder_dequeue_work() - Removes an item from the work list
769 * @proc: binder_proc associated with list
770 * @work: struct binder_work to remove from list
771 *
772 * Removes the specified work item from whatever list it is on.
773 * Can safely be called if work is not on any list.
774 */
775static void
776binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
777{
778 binder_inner_proc_lock(proc);
779 binder_dequeue_work_ilocked(work);
780 binder_inner_proc_unlock(proc);
781}
782
783static struct binder_work *binder_dequeue_work_head_ilocked(
784 struct list_head *list)
785{
786 struct binder_work *w;
787
788 w = list_first_entry_or_null(list, struct binder_work, entry);
789 if (w)
790 list_del_init(&w->entry);
791 return w;
792}
793
794/**
795 * binder_dequeue_work_head() - Dequeues the item at head of list
796 * @proc: binder_proc associated with list
797 * @list: list to dequeue head
798 *
799 * Removes the head of the list if there are items on the list
800 *
801 * Return: pointer dequeued binder_work, NULL if list was empty
802 */
803static struct binder_work *binder_dequeue_work_head(
804 struct binder_proc *proc,
805 struct list_head *list)
806{
807 struct binder_work *w;
808
809 binder_inner_proc_lock(proc);
810 w = binder_dequeue_work_head_ilocked(list);
811 binder_inner_proc_unlock(proc);
812 return w;
813}
814
355b0502
GKH
815static void
816binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
7a4408c6
TK
817static void binder_free_thread(struct binder_thread *thread);
818static void binder_free_proc(struct binder_proc *proc);
adc18842 819static void binder_inc_node_tmpref(struct binder_node *node);
355b0502 820
efde99cd 821static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
822{
823 struct files_struct *files = proc->files;
355b0502
GKH
824 unsigned long rlim_cur;
825 unsigned long irqs;
826
827 if (files == NULL)
828 return -ESRCH;
829
dcfadfa4
AV
830 if (!lock_task_sighand(proc->tsk, &irqs))
831 return -EMFILE;
bf202361 832
dcfadfa4
AV
833 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
834 unlock_task_sighand(proc->tsk, &irqs);
355b0502 835
dcfadfa4 836 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
837}
838
839/*
840 * copied from fd_install
841 */
842static void task_fd_install(
843 struct binder_proc *proc, unsigned int fd, struct file *file)
844{
f869e8a7
AV
845 if (proc->files)
846 __fd_install(proc->files, fd, file);
355b0502
GKH
847}
848
849/*
850 * copied from sys_close
851 */
852static long task_close_fd(struct binder_proc *proc, unsigned int fd)
853{
355b0502
GKH
854 int retval;
855
483ce1d4 856 if (proc->files == NULL)
355b0502
GKH
857 return -ESRCH;
858
483ce1d4 859 retval = __close_fd(proc->files, fd);
355b0502
GKH
860 /* can't restart close syscall because file table entry was cleared */
861 if (unlikely(retval == -ERESTARTSYS ||
862 retval == -ERESTARTNOINTR ||
863 retval == -ERESTARTNOHAND ||
864 retval == -ERESTART_RESTARTBLOCK))
865 retval = -EINTR;
866
867 return retval;
355b0502
GKH
868}
869
975a1ac9
AH
870static inline void binder_lock(const char *tag)
871{
872 trace_binder_lock(tag);
873 mutex_lock(&binder_main_lock);
874 trace_binder_locked(tag);
875}
876
877static inline void binder_unlock(const char *tag)
878{
879 trace_binder_unlock(tag);
880 mutex_unlock(&binder_main_lock);
881}
882
355b0502
GKH
883static void binder_set_nice(long nice)
884{
885 long min_nice;
10f62861 886
355b0502
GKH
887 if (can_nice(current, nice)) {
888 set_user_nice(current, nice);
889 return;
890 }
7aa2c016 891 min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
355b0502 892 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
56b468fc
AS
893 "%d: nice value %ld not allowed use %ld instead\n",
894 current->pid, nice, min_nice);
355b0502 895 set_user_nice(current, min_nice);
8698a745 896 if (min_nice <= MAX_NICE)
355b0502 897 return;
56b468fc 898 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
355b0502
GKH
899}
900
355b0502 901static struct binder_node *binder_get_node(struct binder_proc *proc,
da49889d 902 binder_uintptr_t ptr)
355b0502
GKH
903{
904 struct rb_node *n = proc->nodes.rb_node;
905 struct binder_node *node;
906
907 while (n) {
908 node = rb_entry(n, struct binder_node, rb_node);
909
910 if (ptr < node->ptr)
911 n = n->rb_left;
912 else if (ptr > node->ptr)
913 n = n->rb_right;
adc18842
TK
914 else {
915 /*
916 * take an implicit weak reference
917 * to ensure node stays alive until
918 * call to binder_put_node()
919 */
920 binder_inc_node_tmpref(node);
355b0502 921 return node;
adc18842 922 }
355b0502
GKH
923 }
924 return NULL;
925}
926
927static struct binder_node *binder_new_node(struct binder_proc *proc,
da49889d
AH
928 binder_uintptr_t ptr,
929 binder_uintptr_t cookie)
355b0502
GKH
930{
931 struct rb_node **p = &proc->nodes.rb_node;
932 struct rb_node *parent = NULL;
933 struct binder_node *node;
934
935 while (*p) {
936 parent = *p;
937 node = rb_entry(parent, struct binder_node, rb_node);
938
939 if (ptr < node->ptr)
940 p = &(*p)->rb_left;
941 else if (ptr > node->ptr)
942 p = &(*p)->rb_right;
943 else
944 return NULL;
945 }
946
947 node = kzalloc(sizeof(*node), GFP_KERNEL);
948 if (node == NULL)
949 return NULL;
950 binder_stats_created(BINDER_STAT_NODE);
adc18842 951 node->tmp_refs++;
355b0502
GKH
952 rb_link_node(&node->rb_node, parent, p);
953 rb_insert_color(&node->rb_node, &proc->nodes);
656a800a 954 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
955 node->proc = proc;
956 node->ptr = ptr;
957 node->cookie = cookie;
958 node->work.type = BINDER_WORK_NODE;
9630fe88 959 spin_lock_init(&node->lock);
355b0502
GKH
960 INIT_LIST_HEAD(&node->work.entry);
961 INIT_LIST_HEAD(&node->async_todo);
962 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 963 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 964 proc->pid, current->pid, node->debug_id,
da49889d 965 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
966 return node;
967}
968
ed29721e 969static void binder_free_node(struct binder_node *node)
355b0502 970{
ed29721e
TK
971 kfree(node);
972 binder_stats_deleted(BINDER_STAT_NODE);
973}
974
975static int binder_inc_node_ilocked(struct binder_node *node, int strong,
976 int internal,
977 struct list_head *target_list)
978{
979 if (node->proc)
980 BUG_ON(!spin_is_locked(&node->proc->inner_lock));
355b0502
GKH
981 if (strong) {
982 if (internal) {
983 if (target_list == NULL &&
984 node->internal_strong_refs == 0 &&
342e5c90
MC
985 !(node->proc &&
986 node == node->proc->context->binder_context_mgr_node &&
987 node->has_strong_ref)) {
56b468fc
AS
988 pr_err("invalid inc strong node for %d\n",
989 node->debug_id);
355b0502
GKH
990 return -EINVAL;
991 }
992 node->internal_strong_refs++;
993 } else
994 node->local_strong_refs++;
995 if (!node->has_strong_ref && target_list) {
72196393
TK
996 binder_dequeue_work_ilocked(&node->work);
997 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
998 }
999 } else {
1000 if (!internal)
1001 node->local_weak_refs++;
1002 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1003 if (target_list == NULL) {
56b468fc
AS
1004 pr_err("invalid inc weak node for %d\n",
1005 node->debug_id);
355b0502
GKH
1006 return -EINVAL;
1007 }
72196393 1008 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1009 }
1010 }
1011 return 0;
1012}
1013
ed29721e
TK
1014static int binder_inc_node(struct binder_node *node, int strong, int internal,
1015 struct list_head *target_list)
1016{
1017 int ret;
1018
1019 if (node->proc)
1020 binder_inner_proc_lock(node->proc);
1021 ret = binder_inc_node_ilocked(node, strong, internal, target_list);
1022 if (node->proc)
1023 binder_inner_proc_unlock(node->proc);
1024
1025 return ret;
1026}
1027
1028static bool binder_dec_node_ilocked(struct binder_node *node,
1029 int strong, int internal)
355b0502 1030{
ed29721e
TK
1031 struct binder_proc *proc = node->proc;
1032
1033 if (proc)
1034 BUG_ON(!spin_is_locked(&proc->inner_lock));
355b0502
GKH
1035 if (strong) {
1036 if (internal)
1037 node->internal_strong_refs--;
1038 else
1039 node->local_strong_refs--;
1040 if (node->local_strong_refs || node->internal_strong_refs)
ed29721e 1041 return false;
355b0502
GKH
1042 } else {
1043 if (!internal)
1044 node->local_weak_refs--;
adc18842
TK
1045 if (node->local_weak_refs || node->tmp_refs ||
1046 !hlist_empty(&node->refs))
ed29721e 1047 return false;
355b0502 1048 }
ed29721e
TK
1049
1050 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502 1051 if (list_empty(&node->work.entry)) {
72196393 1052 binder_enqueue_work_ilocked(&node->work, &proc->todo);
355b0502
GKH
1053 wake_up_interruptible(&node->proc->wait);
1054 }
1055 } else {
1056 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
adc18842 1057 !node->local_weak_refs && !node->tmp_refs) {
ed29721e 1058 if (proc) {
72196393
TK
1059 binder_dequeue_work_ilocked(&node->work);
1060 rb_erase(&node->rb_node, &proc->nodes);
355b0502 1061 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1062 "refless node %d deleted\n",
355b0502
GKH
1063 node->debug_id);
1064 } else {
72196393 1065 BUG_ON(!list_empty(&node->work.entry));
c44b1231 1066 spin_lock(&binder_dead_nodes_lock);
ed29721e
TK
1067 /*
1068 * tmp_refs could have changed so
1069 * check it again
1070 */
1071 if (node->tmp_refs) {
1072 spin_unlock(&binder_dead_nodes_lock);
1073 return false;
1074 }
355b0502 1075 hlist_del(&node->dead_node);
c44b1231 1076 spin_unlock(&binder_dead_nodes_lock);
355b0502 1077 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1078 "dead node %d deleted\n",
355b0502
GKH
1079 node->debug_id);
1080 }
ed29721e 1081 return true;
355b0502
GKH
1082 }
1083 }
ed29721e
TK
1084 return false;
1085}
355b0502 1086
ed29721e
TK
1087static void binder_dec_node(struct binder_node *node, int strong, int internal)
1088{
1089 bool free_node;
1090
1091 if (node->proc)
1092 binder_inner_proc_lock(node->proc);
1093 free_node = binder_dec_node_ilocked(node, strong, internal);
1094 if (node->proc)
1095 binder_inner_proc_unlock(node->proc);
1096
1097 if (free_node)
1098 binder_free_node(node);
1099}
1100
1101static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1102{
1103 /*
1104 * No call to binder_inc_node() is needed since we
1105 * don't need to inform userspace of any changes to
1106 * tmp_refs
1107 */
1108 node->tmp_refs++;
355b0502
GKH
1109}
1110
adc18842
TK
1111/**
1112 * binder_inc_node_tmpref() - take a temporary reference on node
1113 * @node: node to reference
1114 *
1115 * Take reference on node to prevent the node from being freed
ed29721e
TK
1116 * while referenced only by a local variable. The inner lock is
1117 * needed to serialize with the node work on the queue (which
1118 * isn't needed after the node is dead). If the node is dead
1119 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1120 * node->tmp_refs against dead-node-only cases where the node
1121 * lock cannot be acquired (eg traversing the dead node list to
1122 * print nodes)
adc18842
TK
1123 */
1124static void binder_inc_node_tmpref(struct binder_node *node)
1125{
ed29721e
TK
1126 if (node->proc)
1127 binder_inner_proc_lock(node->proc);
1128 else
1129 spin_lock(&binder_dead_nodes_lock);
1130 binder_inc_node_tmpref_ilocked(node);
1131 if (node->proc)
1132 binder_inner_proc_unlock(node->proc);
1133 else
1134 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1135}
1136
1137/**
1138 * binder_dec_node_tmpref() - remove a temporary reference on node
1139 * @node: node to reference
1140 *
1141 * Release temporary reference on node taken via binder_inc_node_tmpref()
1142 */
1143static void binder_dec_node_tmpref(struct binder_node *node)
1144{
ed29721e
TK
1145 bool free_node;
1146
1147 if (node->proc)
1148 binder_inner_proc_lock(node->proc);
1149 else
1150 spin_lock(&binder_dead_nodes_lock);
adc18842
TK
1151 node->tmp_refs--;
1152 BUG_ON(node->tmp_refs < 0);
ed29721e
TK
1153 if (!node->proc)
1154 spin_unlock(&binder_dead_nodes_lock);
adc18842
TK
1155 /*
1156 * Call binder_dec_node() to check if all refcounts are 0
1157 * and cleanup is needed. Calling with strong=0 and internal=1
1158 * causes no actual reference to be released in binder_dec_node().
1159 * If that changes, a change is needed here too.
1160 */
ed29721e
TK
1161 free_node = binder_dec_node_ilocked(node, 0, 1);
1162 if (node->proc)
1163 binder_inner_proc_unlock(node->proc);
1164 if (free_node)
1165 binder_free_node(node);
adc18842
TK
1166}
1167
1168static void binder_put_node(struct binder_node *node)
1169{
1170 binder_dec_node_tmpref(node);
1171}
355b0502
GKH
1172
1173static struct binder_ref *binder_get_ref(struct binder_proc *proc,
0a3ffab9 1174 u32 desc, bool need_strong_ref)
355b0502
GKH
1175{
1176 struct rb_node *n = proc->refs_by_desc.rb_node;
1177 struct binder_ref *ref;
1178
1179 while (n) {
1180 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1181
372e3147 1182 if (desc < ref->data.desc) {
355b0502 1183 n = n->rb_left;
372e3147 1184 } else if (desc > ref->data.desc) {
355b0502 1185 n = n->rb_right;
372e3147 1186 } else if (need_strong_ref && !ref->data.strong) {
0a3ffab9
AH
1187 binder_user_error("tried to use weak ref as strong ref\n");
1188 return NULL;
1189 } else {
355b0502 1190 return ref;
0a3ffab9 1191 }
355b0502
GKH
1192 }
1193 return NULL;
1194}
1195
372e3147
TK
1196/**
1197 * binder_get_ref_for_node() - get the ref associated with given node
1198 * @proc: binder_proc that owns the ref
1199 * @node: binder_node of target
1200 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1201 *
1202 * Look up the ref for the given node and return it if it exists
1203 *
1204 * If it doesn't exist and the caller provides a newly allocated
1205 * ref, initialize the fields of the newly allocated ref and insert
1206 * into the given proc rb_trees and node refs list.
1207 *
1208 * Return: the ref for node. It is possible that another thread
1209 * allocated/initialized the ref first in which case the
1210 * returned ref would be different than the passed-in
1211 * new_ref. new_ref must be kfree'd by the caller in
1212 * this case.
1213 */
355b0502 1214static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
372e3147
TK
1215 struct binder_node *node,
1216 struct binder_ref *new_ref)
355b0502 1217{
372e3147 1218 struct binder_context *context = proc->context;
355b0502
GKH
1219 struct rb_node **p = &proc->refs_by_node.rb_node;
1220 struct rb_node *parent = NULL;
372e3147
TK
1221 struct binder_ref *ref;
1222 struct rb_node *n;
355b0502
GKH
1223
1224 while (*p) {
1225 parent = *p;
1226 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1227
1228 if (node < ref->node)
1229 p = &(*p)->rb_left;
1230 else if (node > ref->node)
1231 p = &(*p)->rb_right;
1232 else
1233 return ref;
1234 }
372e3147 1235 if (!new_ref)
355b0502 1236 return NULL;
372e3147 1237
355b0502 1238 binder_stats_created(BINDER_STAT_REF);
372e3147 1239 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1240 new_ref->proc = proc;
1241 new_ref->node = node;
1242 rb_link_node(&new_ref->rb_node_node, parent, p);
1243 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1244
372e3147 1245 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1246 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1247 ref = rb_entry(n, struct binder_ref, rb_node_desc);
372e3147 1248 if (ref->data.desc > new_ref->data.desc)
355b0502 1249 break;
372e3147 1250 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1251 }
1252
1253 p = &proc->refs_by_desc.rb_node;
1254 while (*p) {
1255 parent = *p;
1256 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1257
372e3147 1258 if (new_ref->data.desc < ref->data.desc)
355b0502 1259 p = &(*p)->rb_left;
372e3147 1260 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1261 p = &(*p)->rb_right;
1262 else
1263 BUG();
1264 }
1265 rb_link_node(&new_ref->rb_node_desc, parent, p);
1266 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
e4cffcf4 1267 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1268
e4cffcf4
TK
1269 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1270 "%d new ref %d desc %d for node %d\n",
372e3147 1271 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
e4cffcf4 1272 node->debug_id);
355b0502
GKH
1273 return new_ref;
1274}
1275
372e3147 1276static void binder_cleanup_ref(struct binder_ref *ref)
355b0502 1277{
ed29721e
TK
1278 bool delete_node = false;
1279 struct binder_proc *node_proc = ref->node->proc;
1280
355b0502 1281 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1282 "%d delete ref %d desc %d for node %d\n",
372e3147 1283 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1284 ref->node->debug_id);
355b0502
GKH
1285
1286 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1287 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
372e3147 1288
ed29721e
TK
1289 if (node_proc)
1290 binder_inner_proc_lock(node_proc);
372e3147 1291 if (ref->data.strong)
ed29721e 1292 binder_dec_node_ilocked(ref->node, 1, 1);
372e3147 1293
355b0502 1294 hlist_del(&ref->node_entry);
ed29721e
TK
1295 delete_node = binder_dec_node_ilocked(ref->node, 0, 1);
1296 if (node_proc)
1297 binder_inner_proc_unlock(node_proc);
1298 /*
1299 * Clear ref->node unless we want the caller to free the node
1300 */
1301 if (!delete_node) {
1302 /*
1303 * The caller uses ref->node to determine
1304 * whether the node needs to be freed. Clear
1305 * it since the node is still alive.
1306 */
1307 ref->node = NULL;
1308 }
372e3147 1309
355b0502
GKH
1310 if (ref->death) {
1311 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1312 "%d delete ref %d desc %d has death notification\n",
372e3147
TK
1313 ref->proc->pid, ref->data.debug_id,
1314 ref->data.desc);
72196393 1315 binder_dequeue_work(ref->proc, &ref->death->work);
355b0502
GKH
1316 binder_stats_deleted(BINDER_STAT_DEATH);
1317 }
355b0502
GKH
1318 binder_stats_deleted(BINDER_STAT_REF);
1319}
1320
372e3147
TK
1321/**
1322 * binder_inc_ref() - increment the ref for given handle
1323 * @ref: ref to be incremented
1324 * @strong: if true, strong increment, else weak
1325 * @target_list: list to queue node work on
1326 *
1327 * Increment the ref.
1328 *
1329 * Return: 0, if successful, else errno
1330 */
355b0502
GKH
1331static int binder_inc_ref(struct binder_ref *ref, int strong,
1332 struct list_head *target_list)
1333{
1334 int ret;
10f62861 1335
355b0502 1336 if (strong) {
372e3147 1337 if (ref->data.strong == 0) {
355b0502
GKH
1338 ret = binder_inc_node(ref->node, 1, 1, target_list);
1339 if (ret)
1340 return ret;
1341 }
372e3147 1342 ref->data.strong++;
355b0502 1343 } else {
372e3147 1344 if (ref->data.weak == 0) {
355b0502
GKH
1345 ret = binder_inc_node(ref->node, 0, 1, target_list);
1346 if (ret)
1347 return ret;
1348 }
372e3147 1349 ref->data.weak++;
355b0502
GKH
1350 }
1351 return 0;
1352}
1353
372e3147
TK
1354/**
1355 * binder_dec_ref() - dec the ref for given handle
1356 * @ref: ref to be decremented
1357 * @strong: if true, strong decrement, else weak
1358 *
1359 * Decrement the ref.
1360 *
1361 * TODO: kfree is avoided here since an upcoming patch
1362 * will put this under a lock.
1363 *
1364 * Return: true if ref is cleaned up and ready to be freed
1365 */
1366static bool binder_dec_ref(struct binder_ref *ref, int strong)
355b0502
GKH
1367{
1368 if (strong) {
372e3147 1369 if (ref->data.strong == 0) {
56b468fc 1370 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
372e3147
TK
1371 ref->proc->pid, ref->data.debug_id,
1372 ref->data.desc, ref->data.strong,
1373 ref->data.weak);
1374 return false;
355b0502 1375 }
372e3147 1376 ref->data.strong--;
ed29721e
TK
1377 if (ref->data.strong == 0)
1378 binder_dec_node(ref->node, strong, 1);
355b0502 1379 } else {
372e3147 1380 if (ref->data.weak == 0) {
56b468fc 1381 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
372e3147
TK
1382 ref->proc->pid, ref->data.debug_id,
1383 ref->data.desc, ref->data.strong,
1384 ref->data.weak);
1385 return false;
355b0502 1386 }
372e3147 1387 ref->data.weak--;
355b0502 1388 }
372e3147
TK
1389 if (ref->data.strong == 0 && ref->data.weak == 0) {
1390 binder_cleanup_ref(ref);
1391 /*
1392 * TODO: we could kfree(ref) here, but an upcoming
1393 * patch will call this with a lock held, so we
1394 * return an indication that the ref should be
1395 * freed.
1396 */
1397 return true;
1398 }
1399 return false;
1400}
1401
1402/**
1403 * binder_get_node_from_ref() - get the node from the given proc/desc
1404 * @proc: proc containing the ref
1405 * @desc: the handle associated with the ref
1406 * @need_strong_ref: if true, only return node if ref is strong
1407 * @rdata: the id/refcount data for the ref
1408 *
1409 * Given a proc and ref handle, return the associated binder_node
1410 *
1411 * Return: a binder_node or NULL if not found or not strong when strong required
1412 */
1413static struct binder_node *binder_get_node_from_ref(
1414 struct binder_proc *proc,
1415 u32 desc, bool need_strong_ref,
1416 struct binder_ref_data *rdata)
1417{
1418 struct binder_node *node;
1419 struct binder_ref *ref;
1420
1421 ref = binder_get_ref(proc, desc, need_strong_ref);
1422 if (!ref)
1423 goto err_no_ref;
1424 node = ref->node;
adc18842
TK
1425 /*
1426 * Take an implicit reference on the node to ensure
1427 * it stays alive until the call to binder_put_node()
1428 */
1429 binder_inc_node_tmpref(node);
372e3147
TK
1430 if (rdata)
1431 *rdata = ref->data;
1432
1433 return node;
1434
1435err_no_ref:
1436 return NULL;
1437}
1438
1439/**
1440 * binder_free_ref() - free the binder_ref
1441 * @ref: ref to free
1442 *
ed29721e
TK
1443 * Free the binder_ref. Free the binder_node indicated by ref->node
1444 * (if non-NULL) and the binder_ref_death indicated by ref->death.
372e3147
TK
1445 */
1446static void binder_free_ref(struct binder_ref *ref)
1447{
ed29721e
TK
1448 if (ref->node)
1449 binder_free_node(ref->node);
372e3147
TK
1450 kfree(ref->death);
1451 kfree(ref);
1452}
1453
1454/**
1455 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1456 * @proc: proc containing the ref
1457 * @desc: the handle associated with the ref
1458 * @increment: true=inc reference, false=dec reference
1459 * @strong: true=strong reference, false=weak reference
1460 * @rdata: the id/refcount data for the ref
1461 *
1462 * Given a proc and ref handle, increment or decrement the ref
1463 * according to "increment" arg.
1464 *
1465 * Return: 0 if successful, else errno
1466 */
1467static int binder_update_ref_for_handle(struct binder_proc *proc,
1468 uint32_t desc, bool increment, bool strong,
1469 struct binder_ref_data *rdata)
1470{
1471 int ret = 0;
1472 struct binder_ref *ref;
1473 bool delete_ref = false;
1474
1475 ref = binder_get_ref(proc, desc, strong);
1476 if (!ref) {
1477 ret = -EINVAL;
1478 goto err_no_ref;
1479 }
1480 if (increment)
1481 ret = binder_inc_ref(ref, strong, NULL);
1482 else
1483 delete_ref = binder_dec_ref(ref, strong);
1484
1485 if (rdata)
1486 *rdata = ref->data;
1487
1488 if (delete_ref)
1489 binder_free_ref(ref);
1490 return ret;
1491
1492err_no_ref:
1493 return ret;
1494}
1495
1496/**
1497 * binder_dec_ref_for_handle() - dec the ref for given handle
1498 * @proc: proc containing the ref
1499 * @desc: the handle associated with the ref
1500 * @strong: true=strong reference, false=weak reference
1501 * @rdata: the id/refcount data for the ref
1502 *
1503 * Just calls binder_update_ref_for_handle() to decrement the ref.
1504 *
1505 * Return: 0 if successful, else errno
1506 */
1507static int binder_dec_ref_for_handle(struct binder_proc *proc,
1508 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1509{
1510 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1511}
1512
1513
1514/**
1515 * binder_inc_ref_for_node() - increment the ref for given proc/node
1516 * @proc: proc containing the ref
1517 * @node: target node
1518 * @strong: true=strong reference, false=weak reference
1519 * @target_list: worklist to use if node is incremented
1520 * @rdata: the id/refcount data for the ref
1521 *
1522 * Given a proc and node, increment the ref. Create the ref if it
1523 * doesn't already exist
1524 *
1525 * Return: 0 if successful, else errno
1526 */
1527static int binder_inc_ref_for_node(struct binder_proc *proc,
1528 struct binder_node *node,
1529 bool strong,
1530 struct list_head *target_list,
1531 struct binder_ref_data *rdata)
1532{
1533 struct binder_ref *ref;
1534 struct binder_ref *new_ref = NULL;
1535 int ret = 0;
1536
1537 ref = binder_get_ref_for_node(proc, node, NULL);
1538 if (!ref) {
1539 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1540 if (!new_ref)
1541 return -ENOMEM;
1542 ref = binder_get_ref_for_node(proc, node, new_ref);
1543 }
1544 ret = binder_inc_ref(ref, strong, target_list);
1545 *rdata = ref->data;
1546 if (new_ref && ref != new_ref)
1547 /*
1548 * Another thread created the ref first so
1549 * free the one we allocated
1550 */
1551 kfree(new_ref);
1552 return ret;
355b0502
GKH
1553}
1554
1555static void binder_pop_transaction(struct binder_thread *target_thread,
1556 struct binder_transaction *t)
1557{
b6d282ce
TK
1558 BUG_ON(!target_thread);
1559 BUG_ON(target_thread->transaction_stack != t);
1560 BUG_ON(target_thread->transaction_stack->from != target_thread);
1561 target_thread->transaction_stack =
1562 target_thread->transaction_stack->from_parent;
1563 t->from = NULL;
1564}
1565
7a4408c6
TK
1566/**
1567 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1568 * @thread: thread to decrement
1569 *
1570 * A thread needs to be kept alive while being used to create or
1571 * handle a transaction. binder_get_txn_from() is used to safely
1572 * extract t->from from a binder_transaction and keep the thread
1573 * indicated by t->from from being freed. When done with that
1574 * binder_thread, this function is called to decrement the
1575 * tmp_ref and free if appropriate (thread has been released
1576 * and no transaction being processed by the driver)
1577 */
1578static void binder_thread_dec_tmpref(struct binder_thread *thread)
1579{
1580 /*
1581 * atomic is used to protect the counter value while
1582 * it cannot reach zero or thread->is_dead is false
1583 *
1584 * TODO: future patch adds locking to ensure that the
1585 * check of tmp_ref and is_dead is done with a lock held
1586 */
1587 atomic_dec(&thread->tmp_ref);
1588 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1589 binder_free_thread(thread);
1590 return;
1591 }
1592}
1593
1594/**
1595 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1596 * @proc: proc to decrement
1597 *
1598 * A binder_proc needs to be kept alive while being used to create or
1599 * handle a transaction. proc->tmp_ref is incremented when
1600 * creating a new transaction or the binder_proc is currently in-use
1601 * by threads that are being released. When done with the binder_proc,
1602 * this function is called to decrement the counter and free the
1603 * proc if appropriate (proc has been released, all threads have
1604 * been released and not currenly in-use to process a transaction).
1605 */
1606static void binder_proc_dec_tmpref(struct binder_proc *proc)
1607{
1608 proc->tmp_ref--;
1609 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1610 !proc->tmp_ref) {
1611 binder_free_proc(proc);
1612 return;
1613 }
1614}
1615
1616/**
1617 * binder_get_txn_from() - safely extract the "from" thread in transaction
1618 * @t: binder transaction for t->from
1619 *
1620 * Atomically return the "from" thread and increment the tmp_ref
1621 * count for the thread to ensure it stays alive until
1622 * binder_thread_dec_tmpref() is called.
1623 *
1624 * Return: the value of t->from
1625 */
1626static struct binder_thread *binder_get_txn_from(
1627 struct binder_transaction *t)
1628{
1629 struct binder_thread *from;
1630
1631 spin_lock(&t->lock);
1632 from = t->from;
1633 if (from)
1634 atomic_inc(&from->tmp_ref);
1635 spin_unlock(&t->lock);
1636 return from;
1637}
1638
b6d282ce
TK
1639static void binder_free_transaction(struct binder_transaction *t)
1640{
355b0502
GKH
1641 if (t->buffer)
1642 t->buffer->transaction = NULL;
1643 kfree(t);
1644 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1645}
1646
1647static void binder_send_failed_reply(struct binder_transaction *t,
1648 uint32_t error_code)
1649{
1650 struct binder_thread *target_thread;
d4ec15e1 1651 struct binder_transaction *next;
10f62861 1652
355b0502
GKH
1653 BUG_ON(t->flags & TF_ONE_WAY);
1654 while (1) {
7a4408c6 1655 target_thread = binder_get_txn_from(t);
355b0502 1656 if (target_thread) {
26549d17
TK
1657 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1658 "send failed reply for transaction %d to %d:%d\n",
1659 t->debug_id,
1660 target_thread->proc->pid,
1661 target_thread->pid);
1662
1663 binder_pop_transaction(target_thread, t);
1664 if (target_thread->reply_error.cmd == BR_OK) {
1665 target_thread->reply_error.cmd = error_code;
72196393
TK
1666 binder_enqueue_work(
1667 target_thread->proc,
1668 &target_thread->reply_error.work,
26549d17 1669 &target_thread->todo);
355b0502
GKH
1670 wake_up_interruptible(&target_thread->wait);
1671 } else {
26549d17
TK
1672 WARN(1, "Unexpected reply error: %u\n",
1673 target_thread->reply_error.cmd);
355b0502 1674 }
7a4408c6 1675 binder_thread_dec_tmpref(target_thread);
26549d17 1676 binder_free_transaction(t);
355b0502 1677 return;
d4ec15e1
LT
1678 }
1679 next = t->from_parent;
1680
1681 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1682 "send failed reply for transaction %d, target dead\n",
1683 t->debug_id);
1684
b6d282ce 1685 binder_free_transaction(t);
d4ec15e1 1686 if (next == NULL) {
355b0502 1687 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
1688 "reply failed, no target thread at root\n");
1689 return;
355b0502 1690 }
d4ec15e1
LT
1691 t = next;
1692 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1693 "reply failed, no target thread -- retry %d\n",
1694 t->debug_id);
355b0502
GKH
1695 }
1696}
1697
feba3900
MC
1698/**
1699 * binder_validate_object() - checks for a valid metadata object in a buffer.
1700 * @buffer: binder_buffer that we're parsing.
1701 * @offset: offset in the buffer at which to validate an object.
1702 *
1703 * Return: If there's a valid metadata object at @offset in @buffer, the
1704 * size of that object. Otherwise, it returns zero.
1705 */
1706static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1707{
1708 /* Check if we can read a header first */
1709 struct binder_object_header *hdr;
1710 size_t object_size = 0;
1711
1712 if (offset > buffer->data_size - sizeof(*hdr) ||
1713 buffer->data_size < sizeof(*hdr) ||
1714 !IS_ALIGNED(offset, sizeof(u32)))
1715 return 0;
1716
1717 /* Ok, now see if we can read a complete object. */
1718 hdr = (struct binder_object_header *)(buffer->data + offset);
1719 switch (hdr->type) {
1720 case BINDER_TYPE_BINDER:
1721 case BINDER_TYPE_WEAK_BINDER:
1722 case BINDER_TYPE_HANDLE:
1723 case BINDER_TYPE_WEAK_HANDLE:
1724 object_size = sizeof(struct flat_binder_object);
1725 break;
1726 case BINDER_TYPE_FD:
1727 object_size = sizeof(struct binder_fd_object);
1728 break;
7980240b
MC
1729 case BINDER_TYPE_PTR:
1730 object_size = sizeof(struct binder_buffer_object);
1731 break;
def95c73
MC
1732 case BINDER_TYPE_FDA:
1733 object_size = sizeof(struct binder_fd_array_object);
1734 break;
feba3900
MC
1735 default:
1736 return 0;
1737 }
1738 if (offset <= buffer->data_size - object_size &&
1739 buffer->data_size >= object_size)
1740 return object_size;
1741 else
1742 return 0;
1743}
1744
7980240b
MC
1745/**
1746 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1747 * @b: binder_buffer containing the object
1748 * @index: index in offset array at which the binder_buffer_object is
1749 * located
1750 * @start: points to the start of the offset array
1751 * @num_valid: the number of valid offsets in the offset array
1752 *
1753 * Return: If @index is within the valid range of the offset array
1754 * described by @start and @num_valid, and if there's a valid
1755 * binder_buffer_object at the offset found in index @index
1756 * of the offset array, that object is returned. Otherwise,
1757 * %NULL is returned.
1758 * Note that the offset found in index @index itself is not
1759 * verified; this function assumes that @num_valid elements
1760 * from @start were previously verified to have valid offsets.
1761 */
1762static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
1763 binder_size_t index,
1764 binder_size_t *start,
1765 binder_size_t num_valid)
1766{
1767 struct binder_buffer_object *buffer_obj;
1768 binder_size_t *offp;
1769
1770 if (index >= num_valid)
1771 return NULL;
1772
1773 offp = start + index;
1774 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
1775 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
1776 return NULL;
1777
1778 return buffer_obj;
1779}
1780
1781/**
1782 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1783 * @b: transaction buffer
1784 * @objects_start start of objects buffer
1785 * @buffer: binder_buffer_object in which to fix up
1786 * @offset: start offset in @buffer to fix up
1787 * @last_obj: last binder_buffer_object that we fixed up in
1788 * @last_min_offset: minimum fixup offset in @last_obj
1789 *
1790 * Return: %true if a fixup in buffer @buffer at offset @offset is
1791 * allowed.
1792 *
1793 * For safety reasons, we only allow fixups inside a buffer to happen
1794 * at increasing offsets; additionally, we only allow fixup on the last
1795 * buffer object that was verified, or one of its parents.
1796 *
1797 * Example of what is allowed:
1798 *
1799 * A
1800 * B (parent = A, offset = 0)
1801 * C (parent = A, offset = 16)
1802 * D (parent = C, offset = 0)
1803 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1804 *
1805 * Examples of what is not allowed:
1806 *
1807 * Decreasing offsets within the same parent:
1808 * A
1809 * C (parent = A, offset = 16)
1810 * B (parent = A, offset = 0) // decreasing offset within A
1811 *
1812 * Referring to a parent that wasn't the last object or any of its parents:
1813 * A
1814 * B (parent = A, offset = 0)
1815 * C (parent = A, offset = 0)
1816 * C (parent = A, offset = 16)
1817 * D (parent = B, offset = 0) // B is not A or any of A's parents
1818 */
1819static bool binder_validate_fixup(struct binder_buffer *b,
1820 binder_size_t *objects_start,
1821 struct binder_buffer_object *buffer,
1822 binder_size_t fixup_offset,
1823 struct binder_buffer_object *last_obj,
1824 binder_size_t last_min_offset)
1825{
1826 if (!last_obj) {
1827 /* Nothing to fix up in */
1828 return false;
1829 }
1830
1831 while (last_obj != buffer) {
1832 /*
1833 * Safe to retrieve the parent of last_obj, since it
1834 * was already previously verified by the driver.
1835 */
1836 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1837 return false;
1838 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
1839 last_obj = (struct binder_buffer_object *)
1840 (b->data + *(objects_start + last_obj->parent));
1841 }
1842 return (fixup_offset >= last_min_offset);
1843}
1844
355b0502
GKH
1845static void binder_transaction_buffer_release(struct binder_proc *proc,
1846 struct binder_buffer *buffer,
da49889d 1847 binder_size_t *failed_at)
355b0502 1848{
7980240b 1849 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
1850 int debug_id = buffer->debug_id;
1851
1852 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 1853 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
1854 proc->pid, buffer->debug_id,
1855 buffer->data_size, buffer->offsets_size, failed_at);
1856
1857 if (buffer->target_node)
1858 binder_dec_node(buffer->target_node, 1, 0);
1859
7980240b
MC
1860 off_start = (binder_size_t *)(buffer->data +
1861 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
1862 if (failed_at)
1863 off_end = failed_at;
1864 else
7980240b
MC
1865 off_end = (void *)off_start + buffer->offsets_size;
1866 for (offp = off_start; offp < off_end; offp++) {
feba3900
MC
1867 struct binder_object_header *hdr;
1868 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 1869
feba3900
MC
1870 if (object_size == 0) {
1871 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 1872 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
1873 continue;
1874 }
feba3900
MC
1875 hdr = (struct binder_object_header *)(buffer->data + *offp);
1876 switch (hdr->type) {
355b0502
GKH
1877 case BINDER_TYPE_BINDER:
1878 case BINDER_TYPE_WEAK_BINDER: {
feba3900
MC
1879 struct flat_binder_object *fp;
1880 struct binder_node *node;
10f62861 1881
feba3900
MC
1882 fp = to_flat_binder_object(hdr);
1883 node = binder_get_node(proc, fp->binder);
355b0502 1884 if (node == NULL) {
da49889d
AH
1885 pr_err("transaction release %d bad node %016llx\n",
1886 debug_id, (u64)fp->binder);
355b0502
GKH
1887 break;
1888 }
1889 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
1890 " node %d u%016llx\n",
1891 node->debug_id, (u64)node->ptr);
feba3900
MC
1892 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1893 0);
adc18842 1894 binder_put_node(node);
355b0502
GKH
1895 } break;
1896 case BINDER_TYPE_HANDLE:
1897 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 1898 struct flat_binder_object *fp;
372e3147
TK
1899 struct binder_ref_data rdata;
1900 int ret;
0a3ffab9 1901
feba3900 1902 fp = to_flat_binder_object(hdr);
372e3147
TK
1903 ret = binder_dec_ref_for_handle(proc, fp->handle,
1904 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1905
1906 if (ret) {
1907 pr_err("transaction release %d bad handle %d, ret = %d\n",
1908 debug_id, fp->handle, ret);
355b0502
GKH
1909 break;
1910 }
1911 binder_debug(BINDER_DEBUG_TRANSACTION,
372e3147
TK
1912 " ref %d desc %d\n",
1913 rdata.debug_id, rdata.desc);
355b0502
GKH
1914 } break;
1915
feba3900
MC
1916 case BINDER_TYPE_FD: {
1917 struct binder_fd_object *fp = to_binder_fd_object(hdr);
1918
355b0502 1919 binder_debug(BINDER_DEBUG_TRANSACTION,
feba3900 1920 " fd %d\n", fp->fd);
355b0502 1921 if (failed_at)
feba3900
MC
1922 task_close_fd(proc, fp->fd);
1923 } break;
7980240b
MC
1924 case BINDER_TYPE_PTR:
1925 /*
1926 * Nothing to do here, this will get cleaned up when the
1927 * transaction buffer gets freed
1928 */
1929 break;
def95c73
MC
1930 case BINDER_TYPE_FDA: {
1931 struct binder_fd_array_object *fda;
1932 struct binder_buffer_object *parent;
1933 uintptr_t parent_buffer;
1934 u32 *fd_array;
1935 size_t fd_index;
1936 binder_size_t fd_buf_size;
1937
1938 fda = to_binder_fd_array_object(hdr);
1939 parent = binder_validate_ptr(buffer, fda->parent,
1940 off_start,
1941 offp - off_start);
1942 if (!parent) {
1943 pr_err("transaction release %d bad parent offset",
1944 debug_id);
1945 continue;
1946 }
1947 /*
1948 * Since the parent was already fixed up, convert it
1949 * back to kernel address space to access it
1950 */
1951 parent_buffer = parent->buffer -
19c98724
TK
1952 binder_alloc_get_user_buffer_offset(
1953 &proc->alloc);
def95c73
MC
1954
1955 fd_buf_size = sizeof(u32) * fda->num_fds;
1956 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
1957 pr_err("transaction release %d invalid number of fds (%lld)\n",
1958 debug_id, (u64)fda->num_fds);
1959 continue;
1960 }
1961 if (fd_buf_size > parent->length ||
1962 fda->parent_offset > parent->length - fd_buf_size) {
1963 /* No space for all file descriptors here. */
1964 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
1965 debug_id, (u64)fda->num_fds);
1966 continue;
1967 }
1968 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
1969 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
1970 task_close_fd(proc, fd_array[fd_index]);
1971 } break;
355b0502 1972 default:
64dcfe6b 1973 pr_err("transaction release %d bad object type %x\n",
feba3900 1974 debug_id, hdr->type);
355b0502
GKH
1975 break;
1976 }
1977 }
1978}
1979
a056af42
MC
1980static int binder_translate_binder(struct flat_binder_object *fp,
1981 struct binder_transaction *t,
1982 struct binder_thread *thread)
1983{
1984 struct binder_node *node;
a056af42
MC
1985 struct binder_proc *proc = thread->proc;
1986 struct binder_proc *target_proc = t->to_proc;
372e3147 1987 struct binder_ref_data rdata;
adc18842 1988 int ret = 0;
a056af42
MC
1989
1990 node = binder_get_node(proc, fp->binder);
1991 if (!node) {
1992 node = binder_new_node(proc, fp->binder, fp->cookie);
1993 if (!node)
1994 return -ENOMEM;
1995
1996 node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1997 node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1998 }
1999 if (fp->cookie != node->cookie) {
2000 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2001 proc->pid, thread->pid, (u64)fp->binder,
2002 node->debug_id, (u64)fp->cookie,
2003 (u64)node->cookie);
adc18842
TK
2004 ret = -EINVAL;
2005 goto done;
2006 }
2007 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2008 ret = -EPERM;
2009 goto done;
a056af42 2010 }
a056af42 2011
372e3147
TK
2012 ret = binder_inc_ref_for_node(target_proc, node,
2013 fp->hdr.type == BINDER_TYPE_BINDER,
2014 &thread->todo, &rdata);
2015 if (ret)
adc18842 2016 goto done;
a056af42
MC
2017
2018 if (fp->hdr.type == BINDER_TYPE_BINDER)
2019 fp->hdr.type = BINDER_TYPE_HANDLE;
2020 else
2021 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2022 fp->binder = 0;
372e3147 2023 fp->handle = rdata.desc;
a056af42 2024 fp->cookie = 0;
a056af42 2025
372e3147 2026 trace_binder_transaction_node_to_ref(t, node, &rdata);
a056af42
MC
2027 binder_debug(BINDER_DEBUG_TRANSACTION,
2028 " node %d u%016llx -> ref %d desc %d\n",
2029 node->debug_id, (u64)node->ptr,
372e3147 2030 rdata.debug_id, rdata.desc);
adc18842
TK
2031done:
2032 binder_put_node(node);
2033 return ret;
a056af42
MC
2034}
2035
2036static int binder_translate_handle(struct flat_binder_object *fp,
2037 struct binder_transaction *t,
2038 struct binder_thread *thread)
2039{
a056af42
MC
2040 struct binder_proc *proc = thread->proc;
2041 struct binder_proc *target_proc = t->to_proc;
372e3147
TK
2042 struct binder_node *node;
2043 struct binder_ref_data src_rdata;
adc18842 2044 int ret = 0;
a056af42 2045
372e3147
TK
2046 node = binder_get_node_from_ref(proc, fp->handle,
2047 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2048 if (!node) {
a056af42
MC
2049 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2050 proc->pid, thread->pid, fp->handle);
2051 return -EINVAL;
2052 }
adc18842
TK
2053 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2054 ret = -EPERM;
2055 goto done;
2056 }
a056af42 2057
372e3147 2058 if (node->proc == target_proc) {
a056af42
MC
2059 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2060 fp->hdr.type = BINDER_TYPE_BINDER;
2061 else
2062 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
372e3147
TK
2063 fp->binder = node->ptr;
2064 fp->cookie = node->cookie;
2065 binder_inc_node(node,
2066 fp->hdr.type == BINDER_TYPE_BINDER,
a056af42 2067 0, NULL);
372e3147 2068 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
a056af42
MC
2069 binder_debug(BINDER_DEBUG_TRANSACTION,
2070 " ref %d desc %d -> node %d u%016llx\n",
372e3147
TK
2071 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2072 (u64)node->ptr);
a056af42 2073 } else {
372e3147
TK
2074 int ret;
2075 struct binder_ref_data dest_rdata;
a056af42 2076
372e3147
TK
2077 ret = binder_inc_ref_for_node(target_proc, node,
2078 fp->hdr.type == BINDER_TYPE_HANDLE,
2079 NULL, &dest_rdata);
2080 if (ret)
adc18842 2081 goto done;
a056af42
MC
2082
2083 fp->binder = 0;
372e3147 2084 fp->handle = dest_rdata.desc;
a056af42 2085 fp->cookie = 0;
372e3147
TK
2086 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2087 &dest_rdata);
a056af42
MC
2088 binder_debug(BINDER_DEBUG_TRANSACTION,
2089 " ref %d desc %d -> ref %d desc %d (node %d)\n",
372e3147
TK
2090 src_rdata.debug_id, src_rdata.desc,
2091 dest_rdata.debug_id, dest_rdata.desc,
2092 node->debug_id);
a056af42 2093 }
adc18842
TK
2094done:
2095 binder_put_node(node);
2096 return ret;
a056af42
MC
2097}
2098
2099static int binder_translate_fd(int fd,
2100 struct binder_transaction *t,
2101 struct binder_thread *thread,
2102 struct binder_transaction *in_reply_to)
2103{
2104 struct binder_proc *proc = thread->proc;
2105 struct binder_proc *target_proc = t->to_proc;
2106 int target_fd;
2107 struct file *file;
2108 int ret;
2109 bool target_allows_fd;
2110
2111 if (in_reply_to)
2112 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2113 else
2114 target_allows_fd = t->buffer->target_node->accept_fds;
2115 if (!target_allows_fd) {
2116 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2117 proc->pid, thread->pid,
2118 in_reply_to ? "reply" : "transaction",
2119 fd);
2120 ret = -EPERM;
2121 goto err_fd_not_accepted;
2122 }
2123
2124 file = fget(fd);
2125 if (!file) {
2126 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2127 proc->pid, thread->pid, fd);
2128 ret = -EBADF;
2129 goto err_fget;
2130 }
2131 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2132 if (ret < 0) {
2133 ret = -EPERM;
2134 goto err_security;
2135 }
2136
2137 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2138 if (target_fd < 0) {
2139 ret = -ENOMEM;
2140 goto err_get_unused_fd;
2141 }
2142 task_fd_install(target_proc, target_fd, file);
2143 trace_binder_transaction_fd(t, fd, target_fd);
2144 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2145 fd, target_fd);
2146
2147 return target_fd;
2148
2149err_get_unused_fd:
2150err_security:
2151 fput(file);
2152err_fget:
2153err_fd_not_accepted:
2154 return ret;
2155}
2156
def95c73
MC
2157static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2158 struct binder_buffer_object *parent,
2159 struct binder_transaction *t,
2160 struct binder_thread *thread,
2161 struct binder_transaction *in_reply_to)
2162{
2163 binder_size_t fdi, fd_buf_size, num_installed_fds;
2164 int target_fd;
2165 uintptr_t parent_buffer;
2166 u32 *fd_array;
2167 struct binder_proc *proc = thread->proc;
2168 struct binder_proc *target_proc = t->to_proc;
2169
2170 fd_buf_size = sizeof(u32) * fda->num_fds;
2171 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2172 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2173 proc->pid, thread->pid, (u64)fda->num_fds);
2174 return -EINVAL;
2175 }
2176 if (fd_buf_size > parent->length ||
2177 fda->parent_offset > parent->length - fd_buf_size) {
2178 /* No space for all file descriptors here. */
2179 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2180 proc->pid, thread->pid, (u64)fda->num_fds);
2181 return -EINVAL;
2182 }
2183 /*
2184 * Since the parent was already fixed up, convert it
2185 * back to the kernel address space to access it
2186 */
19c98724
TK
2187 parent_buffer = parent->buffer -
2188 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
def95c73
MC
2189 fd_array = (u32 *)(parent_buffer + fda->parent_offset);
2190 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2191 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2192 proc->pid, thread->pid);
2193 return -EINVAL;
2194 }
2195 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2196 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2197 in_reply_to);
2198 if (target_fd < 0)
2199 goto err_translate_fd_failed;
2200 fd_array[fdi] = target_fd;
2201 }
2202 return 0;
2203
2204err_translate_fd_failed:
2205 /*
2206 * Failed to allocate fd or security error, free fds
2207 * installed so far.
2208 */
2209 num_installed_fds = fdi;
2210 for (fdi = 0; fdi < num_installed_fds; fdi++)
2211 task_close_fd(target_proc, fd_array[fdi]);
2212 return target_fd;
2213}
2214
7980240b
MC
2215static int binder_fixup_parent(struct binder_transaction *t,
2216 struct binder_thread *thread,
2217 struct binder_buffer_object *bp,
2218 binder_size_t *off_start,
2219 binder_size_t num_valid,
2220 struct binder_buffer_object *last_fixup_obj,
2221 binder_size_t last_fixup_min_off)
2222{
2223 struct binder_buffer_object *parent;
2224 u8 *parent_buffer;
2225 struct binder_buffer *b = t->buffer;
2226 struct binder_proc *proc = thread->proc;
2227 struct binder_proc *target_proc = t->to_proc;
2228
2229 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2230 return 0;
2231
2232 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2233 if (!parent) {
2234 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2235 proc->pid, thread->pid);
2236 return -EINVAL;
2237 }
2238
2239 if (!binder_validate_fixup(b, off_start,
2240 parent, bp->parent_offset,
2241 last_fixup_obj,
2242 last_fixup_min_off)) {
2243 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2244 proc->pid, thread->pid);
2245 return -EINVAL;
2246 }
2247
2248 if (parent->length < sizeof(binder_uintptr_t) ||
2249 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2250 /* No space for a pointer here! */
2251 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2252 proc->pid, thread->pid);
2253 return -EINVAL;
2254 }
2255 parent_buffer = (u8 *)(parent->buffer -
19c98724
TK
2256 binder_alloc_get_user_buffer_offset(
2257 &target_proc->alloc));
7980240b
MC
2258 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2259
2260 return 0;
2261}
2262
355b0502
GKH
2263static void binder_transaction(struct binder_proc *proc,
2264 struct binder_thread *thread,
4bfac80a
MC
2265 struct binder_transaction_data *tr, int reply,
2266 binder_size_t extra_buffers_size)
355b0502 2267{
a056af42 2268 int ret;
355b0502
GKH
2269 struct binder_transaction *t;
2270 struct binder_work *tcomplete;
7980240b 2271 binder_size_t *offp, *off_end, *off_start;
212265e5 2272 binder_size_t off_min;
7980240b 2273 u8 *sg_bufp, *sg_buf_end;
7a4408c6 2274 struct binder_proc *target_proc = NULL;
355b0502
GKH
2275 struct binder_thread *target_thread = NULL;
2276 struct binder_node *target_node = NULL;
2277 struct list_head *target_list;
2278 wait_queue_head_t *target_wait;
2279 struct binder_transaction *in_reply_to = NULL;
2280 struct binder_transaction_log_entry *e;
57ada2fb
TK
2281 uint32_t return_error = 0;
2282 uint32_t return_error_param = 0;
2283 uint32_t return_error_line = 0;
7980240b
MC
2284 struct binder_buffer_object *last_fixup_obj = NULL;
2285 binder_size_t last_fixup_min_off = 0;
342e5c90 2286 struct binder_context *context = proc->context;
d99c7333 2287 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
2288
2289 e = binder_transaction_log_add(&binder_transaction_log);
d99c7333 2290 e->debug_id = t_debug_id;
355b0502
GKH
2291 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2292 e->from_proc = proc->pid;
2293 e->from_thread = thread->pid;
2294 e->target_handle = tr->target.handle;
2295 e->data_size = tr->data_size;
2296 e->offsets_size = tr->offsets_size;
14db3181 2297 e->context_name = proc->context->name;
355b0502
GKH
2298
2299 if (reply) {
2300 in_reply_to = thread->transaction_stack;
2301 if (in_reply_to == NULL) {
56b468fc 2302 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2303 proc->pid, thread->pid);
2304 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2305 return_error_param = -EPROTO;
2306 return_error_line = __LINE__;
355b0502
GKH
2307 goto err_empty_call_stack;
2308 }
2309 binder_set_nice(in_reply_to->saved_priority);
2310 if (in_reply_to->to_thread != thread) {
7a4408c6 2311 spin_lock(&in_reply_to->lock);
56b468fc 2312 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2313 proc->pid, thread->pid, in_reply_to->debug_id,
2314 in_reply_to->to_proc ?
2315 in_reply_to->to_proc->pid : 0,
2316 in_reply_to->to_thread ?
2317 in_reply_to->to_thread->pid : 0);
7a4408c6 2318 spin_unlock(&in_reply_to->lock);
355b0502 2319 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2320 return_error_param = -EPROTO;
2321 return_error_line = __LINE__;
355b0502
GKH
2322 in_reply_to = NULL;
2323 goto err_bad_call_stack;
2324 }
2325 thread->transaction_stack = in_reply_to->to_parent;
7a4408c6 2326 target_thread = binder_get_txn_from(in_reply_to);
355b0502
GKH
2327 if (target_thread == NULL) {
2328 return_error = BR_DEAD_REPLY;
57ada2fb 2329 return_error_line = __LINE__;
355b0502
GKH
2330 goto err_dead_binder;
2331 }
2332 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2333 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2334 proc->pid, thread->pid,
2335 target_thread->transaction_stack ?
2336 target_thread->transaction_stack->debug_id : 0,
2337 in_reply_to->debug_id);
2338 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2339 return_error_param = -EPROTO;
2340 return_error_line = __LINE__;
355b0502
GKH
2341 in_reply_to = NULL;
2342 target_thread = NULL;
2343 goto err_dead_binder;
2344 }
2345 target_proc = target_thread->proc;
7a4408c6 2346 target_proc->tmp_ref++;
355b0502
GKH
2347 } else {
2348 if (tr->target.handle) {
2349 struct binder_ref *ref;
10f62861 2350
eb34983b
TK
2351 /*
2352 * There must already be a strong ref
2353 * on this node. If so, do a strong
2354 * increment on the node to ensure it
2355 * stays alive until the transaction is
2356 * done.
2357 */
0a3ffab9 2358 ref = binder_get_ref(proc, tr->target.handle, true);
eb34983b
TK
2359 if (ref) {
2360 binder_inc_node(ref->node, 1, 0, NULL);
2361 target_node = ref->node;
2362 }
2363 if (target_node == NULL) {
56b468fc 2364 binder_user_error("%d:%d got transaction to invalid handle\n",
355b0502
GKH
2365 proc->pid, thread->pid);
2366 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2367 return_error_param = -EINVAL;
2368 return_error_line = __LINE__;
355b0502
GKH
2369 goto err_invalid_target_handle;
2370 }
355b0502 2371 } else {
c44b1231 2372 mutex_lock(&context->context_mgr_node_lock);
342e5c90 2373 target_node = context->binder_context_mgr_node;
355b0502
GKH
2374 if (target_node == NULL) {
2375 return_error = BR_DEAD_REPLY;
c44b1231 2376 mutex_unlock(&context->context_mgr_node_lock);
57ada2fb 2377 return_error_line = __LINE__;
355b0502
GKH
2378 goto err_no_context_mgr_node;
2379 }
eb34983b 2380 binder_inc_node(target_node, 1, 0, NULL);
c44b1231 2381 mutex_unlock(&context->context_mgr_node_lock);
355b0502
GKH
2382 }
2383 e->to_node = target_node->debug_id;
2384 target_proc = target_node->proc;
2385 if (target_proc == NULL) {
2386 return_error = BR_DEAD_REPLY;
57ada2fb 2387 return_error_line = __LINE__;
355b0502
GKH
2388 goto err_dead_binder;
2389 }
7a4408c6 2390 target_proc->tmp_ref++;
79af7307
SS
2391 if (security_binder_transaction(proc->tsk,
2392 target_proc->tsk) < 0) {
2393 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2394 return_error_param = -EPERM;
2395 return_error_line = __LINE__;
79af7307
SS
2396 goto err_invalid_target_handle;
2397 }
355b0502
GKH
2398 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2399 struct binder_transaction *tmp;
10f62861 2400
355b0502
GKH
2401 tmp = thread->transaction_stack;
2402 if (tmp->to_thread != thread) {
7a4408c6 2403 spin_lock(&tmp->lock);
56b468fc 2404 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2405 proc->pid, thread->pid, tmp->debug_id,
2406 tmp->to_proc ? tmp->to_proc->pid : 0,
2407 tmp->to_thread ?
2408 tmp->to_thread->pid : 0);
7a4408c6 2409 spin_unlock(&tmp->lock);
355b0502 2410 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2411 return_error_param = -EPROTO;
2412 return_error_line = __LINE__;
355b0502
GKH
2413 goto err_bad_call_stack;
2414 }
2415 while (tmp) {
7a4408c6
TK
2416 struct binder_thread *from;
2417
2418 spin_lock(&tmp->lock);
2419 from = tmp->from;
2420 if (from && from->proc == target_proc) {
2421 atomic_inc(&from->tmp_ref);
2422 target_thread = from;
2423 spin_unlock(&tmp->lock);
2424 break;
2425 }
2426 spin_unlock(&tmp->lock);
355b0502
GKH
2427 tmp = tmp->from_parent;
2428 }
2429 }
2430 }
2431 if (target_thread) {
2432 e->to_thread = target_thread->pid;
2433 target_list = &target_thread->todo;
2434 target_wait = &target_thread->wait;
2435 } else {
2436 target_list = &target_proc->todo;
2437 target_wait = &target_proc->wait;
2438 }
2439 e->to_proc = target_proc->pid;
2440
2441 /* TODO: reuse incoming transaction for reply */
2442 t = kzalloc(sizeof(*t), GFP_KERNEL);
2443 if (t == NULL) {
2444 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2445 return_error_param = -ENOMEM;
2446 return_error_line = __LINE__;
355b0502
GKH
2447 goto err_alloc_t_failed;
2448 }
2449 binder_stats_created(BINDER_STAT_TRANSACTION);
7a4408c6 2450 spin_lock_init(&t->lock);
355b0502
GKH
2451
2452 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2453 if (tcomplete == NULL) {
2454 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2455 return_error_param = -ENOMEM;
2456 return_error_line = __LINE__;
355b0502
GKH
2457 goto err_alloc_tcomplete_failed;
2458 }
2459 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2460
d99c7333 2461 t->debug_id = t_debug_id;
355b0502
GKH
2462
2463 if (reply)
2464 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2465 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2466 proc->pid, thread->pid, t->debug_id,
2467 target_proc->pid, target_thread->pid,
da49889d
AH
2468 (u64)tr->data.ptr.buffer,
2469 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2470 (u64)tr->data_size, (u64)tr->offsets_size,
2471 (u64)extra_buffers_size);
355b0502
GKH
2472 else
2473 binder_debug(BINDER_DEBUG_TRANSACTION,
4bfac80a 2474 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
2475 proc->pid, thread->pid, t->debug_id,
2476 target_proc->pid, target_node->debug_id,
da49889d
AH
2477 (u64)tr->data.ptr.buffer,
2478 (u64)tr->data.ptr.offsets,
4bfac80a
MC
2479 (u64)tr->data_size, (u64)tr->offsets_size,
2480 (u64)extra_buffers_size);
355b0502
GKH
2481
2482 if (!reply && !(tr->flags & TF_ONE_WAY))
2483 t->from = thread;
2484 else
2485 t->from = NULL;
57bab7cb 2486 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
2487 t->to_proc = target_proc;
2488 t->to_thread = target_thread;
2489 t->code = tr->code;
2490 t->flags = tr->flags;
2491 t->priority = task_nice(current);
975a1ac9
AH
2492
2493 trace_binder_transaction(reply, t, target_node);
2494
19c98724 2495 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
4bfac80a
MC
2496 tr->offsets_size, extra_buffers_size,
2497 !reply && (t->flags & TF_ONE_WAY));
57ada2fb
TK
2498 if (IS_ERR(t->buffer)) {
2499 /*
2500 * -ESRCH indicates VMA cleared. The target is dying.
2501 */
2502 return_error_param = PTR_ERR(t->buffer);
2503 return_error = return_error_param == -ESRCH ?
2504 BR_DEAD_REPLY : BR_FAILED_REPLY;
2505 return_error_line = __LINE__;
2506 t->buffer = NULL;
355b0502
GKH
2507 goto err_binder_alloc_buf_failed;
2508 }
2509 t->buffer->allow_user_free = 0;
2510 t->buffer->debug_id = t->debug_id;
2511 t->buffer->transaction = t;
2512 t->buffer->target_node = target_node;
975a1ac9 2513 trace_binder_transaction_alloc_buf(t->buffer);
7980240b
MC
2514 off_start = (binder_size_t *)(t->buffer->data +
2515 ALIGN(tr->data_size, sizeof(void *)));
2516 offp = off_start;
355b0502 2517
da49889d
AH
2518 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2519 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
2520 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2521 proc->pid, thread->pid);
355b0502 2522 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2523 return_error_param = -EFAULT;
2524 return_error_line = __LINE__;
355b0502
GKH
2525 goto err_copy_data_failed;
2526 }
da49889d
AH
2527 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2528 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
2529 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2530 proc->pid, thread->pid);
355b0502 2531 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2532 return_error_param = -EFAULT;
2533 return_error_line = __LINE__;
355b0502
GKH
2534 goto err_copy_data_failed;
2535 }
da49889d
AH
2536 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2537 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2538 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 2539 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2540 return_error_param = -EINVAL;
2541 return_error_line = __LINE__;
355b0502
GKH
2542 goto err_bad_offset;
2543 }
7980240b
MC
2544 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2545 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2546 proc->pid, thread->pid,
2547 (u64)extra_buffers_size);
2548 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2549 return_error_param = -EINVAL;
2550 return_error_line = __LINE__;
7980240b
MC
2551 goto err_bad_offset;
2552 }
2553 off_end = (void *)off_start + tr->offsets_size;
2554 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2555 sg_buf_end = sg_bufp + extra_buffers_size;
212265e5 2556 off_min = 0;
355b0502 2557 for (; offp < off_end; offp++) {
feba3900
MC
2558 struct binder_object_header *hdr;
2559 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 2560
feba3900
MC
2561 if (object_size == 0 || *offp < off_min) {
2562 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
212265e5
AH
2563 proc->pid, thread->pid, (u64)*offp,
2564 (u64)off_min,
feba3900 2565 (u64)t->buffer->data_size);
355b0502 2566 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2567 return_error_param = -EINVAL;
2568 return_error_line = __LINE__;
355b0502
GKH
2569 goto err_bad_offset;
2570 }
feba3900
MC
2571
2572 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2573 off_min = *offp + object_size;
2574 switch (hdr->type) {
355b0502
GKH
2575 case BINDER_TYPE_BINDER:
2576 case BINDER_TYPE_WEAK_BINDER: {
feba3900 2577 struct flat_binder_object *fp;
10f62861 2578
feba3900 2579 fp = to_flat_binder_object(hdr);
a056af42
MC
2580 ret = binder_translate_binder(fp, t, thread);
2581 if (ret < 0) {
355b0502 2582 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2583 return_error_param = ret;
2584 return_error_line = __LINE__;
a056af42 2585 goto err_translate_failed;
355b0502 2586 }
355b0502
GKH
2587 } break;
2588 case BINDER_TYPE_HANDLE:
2589 case BINDER_TYPE_WEAK_HANDLE: {
feba3900 2590 struct flat_binder_object *fp;
0a3ffab9 2591
feba3900 2592 fp = to_flat_binder_object(hdr);
a056af42
MC
2593 ret = binder_translate_handle(fp, t, thread);
2594 if (ret < 0) {
79af7307 2595 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2596 return_error_param = ret;
2597 return_error_line = __LINE__;
a056af42 2598 goto err_translate_failed;
355b0502
GKH
2599 }
2600 } break;
2601
2602 case BINDER_TYPE_FD: {
feba3900 2603 struct binder_fd_object *fp = to_binder_fd_object(hdr);
a056af42
MC
2604 int target_fd = binder_translate_fd(fp->fd, t, thread,
2605 in_reply_to);
355b0502 2606
355b0502 2607 if (target_fd < 0) {
355b0502 2608 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2609 return_error_param = target_fd;
2610 return_error_line = __LINE__;
a056af42 2611 goto err_translate_failed;
355b0502 2612 }
feba3900
MC
2613 fp->pad_binder = 0;
2614 fp->fd = target_fd;
355b0502 2615 } break;
def95c73
MC
2616 case BINDER_TYPE_FDA: {
2617 struct binder_fd_array_object *fda =
2618 to_binder_fd_array_object(hdr);
2619 struct binder_buffer_object *parent =
2620 binder_validate_ptr(t->buffer, fda->parent,
2621 off_start,
2622 offp - off_start);
2623 if (!parent) {
2624 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2625 proc->pid, thread->pid);
2626 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2627 return_error_param = -EINVAL;
2628 return_error_line = __LINE__;
def95c73
MC
2629 goto err_bad_parent;
2630 }
2631 if (!binder_validate_fixup(t->buffer, off_start,
2632 parent, fda->parent_offset,
2633 last_fixup_obj,
2634 last_fixup_min_off)) {
2635 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2636 proc->pid, thread->pid);
2637 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2638 return_error_param = -EINVAL;
2639 return_error_line = __LINE__;
def95c73
MC
2640 goto err_bad_parent;
2641 }
2642 ret = binder_translate_fd_array(fda, parent, t, thread,
2643 in_reply_to);
2644 if (ret < 0) {
2645 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2646 return_error_param = ret;
2647 return_error_line = __LINE__;
def95c73
MC
2648 goto err_translate_failed;
2649 }
2650 last_fixup_obj = parent;
2651 last_fixup_min_off =
2652 fda->parent_offset + sizeof(u32) * fda->num_fds;
2653 } break;
7980240b
MC
2654 case BINDER_TYPE_PTR: {
2655 struct binder_buffer_object *bp =
2656 to_binder_buffer_object(hdr);
2657 size_t buf_left = sg_buf_end - sg_bufp;
2658
2659 if (bp->length > buf_left) {
2660 binder_user_error("%d:%d got transaction with too large buffer\n",
2661 proc->pid, thread->pid);
2662 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2663 return_error_param = -EINVAL;
2664 return_error_line = __LINE__;
7980240b
MC
2665 goto err_bad_offset;
2666 }
2667 if (copy_from_user(sg_bufp,
2668 (const void __user *)(uintptr_t)
2669 bp->buffer, bp->length)) {
2670 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2671 proc->pid, thread->pid);
57ada2fb 2672 return_error_param = -EFAULT;
7980240b 2673 return_error = BR_FAILED_REPLY;
57ada2fb 2674 return_error_line = __LINE__;
7980240b
MC
2675 goto err_copy_data_failed;
2676 }
2677 /* Fixup buffer pointer to target proc address space */
2678 bp->buffer = (uintptr_t)sg_bufp +
19c98724
TK
2679 binder_alloc_get_user_buffer_offset(
2680 &target_proc->alloc);
7980240b
MC
2681 sg_bufp += ALIGN(bp->length, sizeof(u64));
2682
2683 ret = binder_fixup_parent(t, thread, bp, off_start,
2684 offp - off_start,
2685 last_fixup_obj,
2686 last_fixup_min_off);
2687 if (ret < 0) {
2688 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2689 return_error_param = ret;
2690 return_error_line = __LINE__;
7980240b
MC
2691 goto err_translate_failed;
2692 }
2693 last_fixup_obj = bp;
2694 last_fixup_min_off = 0;
2695 } break;
355b0502 2696 default:
64dcfe6b 2697 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
feba3900 2698 proc->pid, thread->pid, hdr->type);
355b0502 2699 return_error = BR_FAILED_REPLY;
57ada2fb
TK
2700 return_error_param = -EINVAL;
2701 return_error_line = __LINE__;
355b0502
GKH
2702 goto err_bad_object_type;
2703 }
2704 }
ccae6f67 2705 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
72196393 2706 binder_enqueue_work(proc, tcomplete, &thread->todo);
ccae6f67 2707
355b0502 2708 if (reply) {
7a4408c6
TK
2709 if (target_thread->is_dead)
2710 goto err_dead_proc_or_thread;
355b0502
GKH
2711 BUG_ON(t->buffer->async_transaction != 0);
2712 binder_pop_transaction(target_thread, in_reply_to);
b6d282ce 2713 binder_free_transaction(in_reply_to);
355b0502
GKH
2714 } else if (!(t->flags & TF_ONE_WAY)) {
2715 BUG_ON(t->buffer->async_transaction != 0);
2716 t->need_reply = 1;
2717 t->from_parent = thread->transaction_stack;
2718 thread->transaction_stack = t;
7a4408c6
TK
2719 if (target_proc->is_dead ||
2720 (target_thread && target_thread->is_dead)) {
2721 binder_pop_transaction(thread, t);
2722 goto err_dead_proc_or_thread;
2723 }
355b0502
GKH
2724 } else {
2725 BUG_ON(target_node == NULL);
2726 BUG_ON(t->buffer->async_transaction != 1);
2727 if (target_node->has_async_transaction) {
2728 target_list = &target_node->async_todo;
2729 target_wait = NULL;
2730 } else
2731 target_node->has_async_transaction = 1;
7a4408c6
TK
2732 if (target_proc->is_dead ||
2733 (target_thread && target_thread->is_dead))
2734 goto err_dead_proc_or_thread;
355b0502
GKH
2735 }
2736 t->work.type = BINDER_WORK_TRANSACTION;
72196393 2737 binder_enqueue_work(target_proc, &t->work, target_list);
00b40d61 2738 if (target_wait) {
ccae6f67 2739 if (reply || !(tr->flags & TF_ONE_WAY))
00b40d61
RA
2740 wake_up_interruptible_sync(target_wait);
2741 else
2742 wake_up_interruptible(target_wait);
2743 }
7a4408c6
TK
2744 if (target_thread)
2745 binder_thread_dec_tmpref(target_thread);
2746 binder_proc_dec_tmpref(target_proc);
d99c7333
TK
2747 /*
2748 * write barrier to synchronize with initialization
2749 * of log entry
2750 */
2751 smp_wmb();
2752 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
2753 return;
2754
7a4408c6
TK
2755err_dead_proc_or_thread:
2756 return_error = BR_DEAD_REPLY;
2757 return_error_line = __LINE__;
a056af42 2758err_translate_failed:
355b0502
GKH
2759err_bad_object_type:
2760err_bad_offset:
def95c73 2761err_bad_parent:
355b0502 2762err_copy_data_failed:
975a1ac9 2763 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 2764 binder_transaction_buffer_release(target_proc, t->buffer, offp);
eb34983b 2765 target_node = NULL;
355b0502 2766 t->buffer->transaction = NULL;
19c98724 2767 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
2768err_binder_alloc_buf_failed:
2769 kfree(tcomplete);
2770 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
2771err_alloc_tcomplete_failed:
2772 kfree(t);
2773 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2774err_alloc_t_failed:
2775err_bad_call_stack:
2776err_empty_call_stack:
2777err_dead_binder:
2778err_invalid_target_handle:
2779err_no_context_mgr_node:
7a4408c6
TK
2780 if (target_thread)
2781 binder_thread_dec_tmpref(target_thread);
2782 if (target_proc)
2783 binder_proc_dec_tmpref(target_proc);
eb34983b
TK
2784 if (target_node)
2785 binder_dec_node(target_node, 1, 0);
2786
355b0502 2787 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
57ada2fb
TK
2788 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
2789 proc->pid, thread->pid, return_error, return_error_param,
2790 (u64)tr->data_size, (u64)tr->offsets_size,
2791 return_error_line);
355b0502
GKH
2792
2793 {
2794 struct binder_transaction_log_entry *fe;
10f62861 2795
57ada2fb
TK
2796 e->return_error = return_error;
2797 e->return_error_param = return_error_param;
2798 e->return_error_line = return_error_line;
355b0502
GKH
2799 fe = binder_transaction_log_add(&binder_transaction_log_failed);
2800 *fe = *e;
d99c7333
TK
2801 /*
2802 * write barrier to synchronize with initialization
2803 * of log entry
2804 */
2805 smp_wmb();
2806 WRITE_ONCE(e->debug_id_done, t_debug_id);
2807 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
2808 }
2809
26549d17 2810 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 2811 if (in_reply_to) {
26549d17 2812 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
72196393
TK
2813 binder_enqueue_work(thread->proc,
2814 &thread->return_error.work,
2815 &thread->todo);
355b0502 2816 binder_send_failed_reply(in_reply_to, return_error);
26549d17
TK
2817 } else {
2818 thread->return_error.cmd = return_error;
72196393
TK
2819 binder_enqueue_work(thread->proc,
2820 &thread->return_error.work,
2821 &thread->todo);
26549d17 2822 }
355b0502
GKH
2823}
2824
fb07ebc3
BP
2825static int binder_thread_write(struct binder_proc *proc,
2826 struct binder_thread *thread,
da49889d
AH
2827 binder_uintptr_t binder_buffer, size_t size,
2828 binder_size_t *consumed)
355b0502
GKH
2829{
2830 uint32_t cmd;
342e5c90 2831 struct binder_context *context = proc->context;
da49889d 2832 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
2833 void __user *ptr = buffer + *consumed;
2834 void __user *end = buffer + size;
2835
26549d17 2836 while (ptr < end && thread->return_error.cmd == BR_OK) {
372e3147
TK
2837 int ret;
2838
355b0502
GKH
2839 if (get_user(cmd, (uint32_t __user *)ptr))
2840 return -EFAULT;
2841 ptr += sizeof(uint32_t);
975a1ac9 2842 trace_binder_command(cmd);
355b0502 2843 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
0953c797
BJS
2844 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
2845 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
2846 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
2847 }
2848 switch (cmd) {
2849 case BC_INCREFS:
2850 case BC_ACQUIRE:
2851 case BC_RELEASE:
2852 case BC_DECREFS: {
2853 uint32_t target;
355b0502 2854 const char *debug_string;
372e3147
TK
2855 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
2856 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
2857 struct binder_ref_data rdata;
355b0502
GKH
2858
2859 if (get_user(target, (uint32_t __user *)ptr))
2860 return -EFAULT;
c44b1231 2861
355b0502 2862 ptr += sizeof(uint32_t);
372e3147
TK
2863 ret = -1;
2864 if (increment && !target) {
c44b1231 2865 struct binder_node *ctx_mgr_node;
c44b1231
TK
2866 mutex_lock(&context->context_mgr_node_lock);
2867 ctx_mgr_node = context->binder_context_mgr_node;
372e3147
TK
2868 if (ctx_mgr_node)
2869 ret = binder_inc_ref_for_node(
2870 proc, ctx_mgr_node,
2871 strong, NULL, &rdata);
c44b1231
TK
2872 mutex_unlock(&context->context_mgr_node_lock);
2873 }
372e3147
TK
2874 if (ret)
2875 ret = binder_update_ref_for_handle(
2876 proc, target, increment, strong,
2877 &rdata);
2878 if (!ret && rdata.desc != target) {
2879 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
2880 proc->pid, thread->pid,
2881 target, rdata.desc);
355b0502
GKH
2882 }
2883 switch (cmd) {
2884 case BC_INCREFS:
2885 debug_string = "IncRefs";
355b0502
GKH
2886 break;
2887 case BC_ACQUIRE:
2888 debug_string = "Acquire";
355b0502
GKH
2889 break;
2890 case BC_RELEASE:
2891 debug_string = "Release";
355b0502
GKH
2892 break;
2893 case BC_DECREFS:
2894 default:
2895 debug_string = "DecRefs";
372e3147
TK
2896 break;
2897 }
2898 if (ret) {
2899 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
2900 proc->pid, thread->pid, debug_string,
2901 strong, target, ret);
355b0502
GKH
2902 break;
2903 }
2904 binder_debug(BINDER_DEBUG_USER_REFS,
372e3147
TK
2905 "%d:%d %s ref %d desc %d s %d w %d\n",
2906 proc->pid, thread->pid, debug_string,
2907 rdata.debug_id, rdata.desc, rdata.strong,
2908 rdata.weak);
355b0502
GKH
2909 break;
2910 }
2911 case BC_INCREFS_DONE:
2912 case BC_ACQUIRE_DONE: {
da49889d
AH
2913 binder_uintptr_t node_ptr;
2914 binder_uintptr_t cookie;
355b0502
GKH
2915 struct binder_node *node;
2916
da49889d 2917 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2918 return -EFAULT;
da49889d
AH
2919 ptr += sizeof(binder_uintptr_t);
2920 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 2921 return -EFAULT;
da49889d 2922 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
2923 node = binder_get_node(proc, node_ptr);
2924 if (node == NULL) {
da49889d 2925 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
2926 proc->pid, thread->pid,
2927 cmd == BC_INCREFS_DONE ?
2928 "BC_INCREFS_DONE" :
2929 "BC_ACQUIRE_DONE",
da49889d 2930 (u64)node_ptr);
355b0502
GKH
2931 break;
2932 }
2933 if (cookie != node->cookie) {
da49889d 2934 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
2935 proc->pid, thread->pid,
2936 cmd == BC_INCREFS_DONE ?
2937 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
2938 (u64)node_ptr, node->debug_id,
2939 (u64)cookie, (u64)node->cookie);
adc18842 2940 binder_put_node(node);
355b0502
GKH
2941 break;
2942 }
ed29721e 2943 binder_inner_proc_lock(proc);
355b0502
GKH
2944 if (cmd == BC_ACQUIRE_DONE) {
2945 if (node->pending_strong_ref == 0) {
56b468fc 2946 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
2947 proc->pid, thread->pid,
2948 node->debug_id);
ed29721e 2949 binder_inner_proc_unlock(proc);
adc18842 2950 binder_put_node(node);
355b0502
GKH
2951 break;
2952 }
2953 node->pending_strong_ref = 0;
2954 } else {
2955 if (node->pending_weak_ref == 0) {
56b468fc 2956 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
2957 proc->pid, thread->pid,
2958 node->debug_id);
ed29721e 2959 binder_inner_proc_unlock(proc);
adc18842 2960 binder_put_node(node);
355b0502
GKH
2961 break;
2962 }
2963 node->pending_weak_ref = 0;
2964 }
ed29721e 2965 binder_inner_proc_unlock(proc);
355b0502
GKH
2966 binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
2967 binder_debug(BINDER_DEBUG_USER_REFS,
adc18842 2968 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
2969 proc->pid, thread->pid,
2970 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
adc18842
TK
2971 node->debug_id, node->local_strong_refs,
2972 node->local_weak_refs, node->tmp_refs);
2973 binder_put_node(node);
355b0502
GKH
2974 break;
2975 }
2976 case BC_ATTEMPT_ACQUIRE:
56b468fc 2977 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
2978 return -EINVAL;
2979 case BC_ACQUIRE_RESULT:
56b468fc 2980 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
2981 return -EINVAL;
2982
2983 case BC_FREE_BUFFER: {
da49889d 2984 binder_uintptr_t data_ptr;
355b0502
GKH
2985 struct binder_buffer *buffer;
2986
da49889d 2987 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 2988 return -EFAULT;
da49889d 2989 ptr += sizeof(binder_uintptr_t);
355b0502 2990
53d311cf
TK
2991 buffer = binder_alloc_prepare_to_free(&proc->alloc,
2992 data_ptr);
355b0502 2993 if (buffer == NULL) {
da49889d
AH
2994 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
2995 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
2996 break;
2997 }
2998 if (!buffer->allow_user_free) {
da49889d
AH
2999 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3000 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3001 break;
3002 }
3003 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
3004 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3005 proc->pid, thread->pid, (u64)data_ptr,
3006 buffer->debug_id,
355b0502
GKH
3007 buffer->transaction ? "active" : "finished");
3008
3009 if (buffer->transaction) {
3010 buffer->transaction->buffer = NULL;
3011 buffer->transaction = NULL;
3012 }
3013 if (buffer->async_transaction && buffer->target_node) {
72196393
TK
3014 struct binder_node *buf_node;
3015 struct binder_work *w;
3016
3017 buf_node = buffer->target_node;
3018 BUG_ON(!buf_node->has_async_transaction);
3019 BUG_ON(buf_node->proc != proc);
3020 binder_inner_proc_lock(proc);
3021 w = binder_dequeue_work_head_ilocked(
3022 &buf_node->async_todo);
3023 if (!w)
3024 buf_node->has_async_transaction = 0;
355b0502 3025 else
72196393
TK
3026 binder_enqueue_work_ilocked(
3027 w, &thread->todo);
3028 binder_inner_proc_unlock(proc);
355b0502 3029 }
975a1ac9 3030 trace_binder_transaction_buffer_release(buffer);
355b0502 3031 binder_transaction_buffer_release(proc, buffer, NULL);
19c98724 3032 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
3033 break;
3034 }
3035
7980240b
MC
3036 case BC_TRANSACTION_SG:
3037 case BC_REPLY_SG: {
3038 struct binder_transaction_data_sg tr;
3039
3040 if (copy_from_user(&tr, ptr, sizeof(tr)))
3041 return -EFAULT;
3042 ptr += sizeof(tr);
3043 binder_transaction(proc, thread, &tr.transaction_data,
3044 cmd == BC_REPLY_SG, tr.buffers_size);
3045 break;
3046 }
355b0502
GKH
3047 case BC_TRANSACTION:
3048 case BC_REPLY: {
3049 struct binder_transaction_data tr;
3050
3051 if (copy_from_user(&tr, ptr, sizeof(tr)))
3052 return -EFAULT;
3053 ptr += sizeof(tr);
4bfac80a
MC
3054 binder_transaction(proc, thread, &tr,
3055 cmd == BC_REPLY, 0);
355b0502
GKH
3056 break;
3057 }
3058
3059 case BC_REGISTER_LOOPER:
3060 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3061 "%d:%d BC_REGISTER_LOOPER\n",
355b0502
GKH
3062 proc->pid, thread->pid);
3063 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3064 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3065 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
3066 proc->pid, thread->pid);
3067 } else if (proc->requested_threads == 0) {
3068 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3069 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
3070 proc->pid, thread->pid);
3071 } else {
3072 proc->requested_threads--;
3073 proc->requested_threads_started++;
3074 }
3075 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3076 break;
3077 case BC_ENTER_LOOPER:
3078 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3079 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
3080 proc->pid, thread->pid);
3081 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3082 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3083 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
3084 proc->pid, thread->pid);
3085 }
3086 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3087 break;
3088 case BC_EXIT_LOOPER:
3089 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3090 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
3091 proc->pid, thread->pid);
3092 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3093 break;
3094
3095 case BC_REQUEST_DEATH_NOTIFICATION:
3096 case BC_CLEAR_DEATH_NOTIFICATION: {
3097 uint32_t target;
da49889d 3098 binder_uintptr_t cookie;
355b0502
GKH
3099 struct binder_ref *ref;
3100 struct binder_ref_death *death;
3101
3102 if (get_user(target, (uint32_t __user *)ptr))
3103 return -EFAULT;
3104 ptr += sizeof(uint32_t);
da49889d 3105 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3106 return -EFAULT;
da49889d 3107 ptr += sizeof(binder_uintptr_t);
0a3ffab9 3108 ref = binder_get_ref(proc, target, false);
355b0502 3109 if (ref == NULL) {
56b468fc 3110 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
3111 proc->pid, thread->pid,
3112 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3113 "BC_REQUEST_DEATH_NOTIFICATION" :
3114 "BC_CLEAR_DEATH_NOTIFICATION",
3115 target);
3116 break;
3117 }
3118
3119 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3120 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
3121 proc->pid, thread->pid,
3122 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3123 "BC_REQUEST_DEATH_NOTIFICATION" :
3124 "BC_CLEAR_DEATH_NOTIFICATION",
372e3147
TK
3125 (u64)cookie, ref->data.debug_id,
3126 ref->data.desc, ref->data.strong,
3127 ref->data.weak, ref->node->debug_id);
355b0502
GKH
3128
3129 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3130 if (ref->death) {
56b468fc 3131 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502
GKH
3132 proc->pid, thread->pid);
3133 break;
3134 }
3135 death = kzalloc(sizeof(*death), GFP_KERNEL);
3136 if (death == NULL) {
26549d17
TK
3137 WARN_ON(thread->return_error.cmd !=
3138 BR_OK);
3139 thread->return_error.cmd = BR_ERROR;
72196393
TK
3140 binder_enqueue_work(
3141 thread->proc,
3142 &thread->return_error.work,
3143 &thread->todo);
355b0502 3144 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
56b468fc 3145 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
355b0502
GKH
3146 proc->pid, thread->pid);
3147 break;
3148 }
3149 binder_stats_created(BINDER_STAT_DEATH);
3150 INIT_LIST_HEAD(&death->work.entry);
3151 death->cookie = cookie;
3152 ref->death = death;
3153 if (ref->node->proc == NULL) {
3154 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
72196393
TK
3155 if (thread->looper &
3156 (BINDER_LOOPER_STATE_REGISTERED |
3157 BINDER_LOOPER_STATE_ENTERED))
3158 binder_enqueue_work(
3159 proc,
3160 &ref->death->work,
3161 &thread->todo);
3162 else {
3163 binder_enqueue_work(
3164 proc,
3165 &ref->death->work,
3166 &proc->todo);
3167 wake_up_interruptible(
3168 &proc->wait);
355b0502
GKH
3169 }
3170 }
3171 } else {
3172 if (ref->death == NULL) {
56b468fc 3173 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502
GKH
3174 proc->pid, thread->pid);
3175 break;
3176 }
3177 death = ref->death;
3178 if (death->cookie != cookie) {
da49889d 3179 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3180 proc->pid, thread->pid,
da49889d
AH
3181 (u64)death->cookie,
3182 (u64)cookie);
355b0502
GKH
3183 break;
3184 }
3185 ref->death = NULL;
72196393 3186 binder_inner_proc_lock(proc);
355b0502
GKH
3187 if (list_empty(&death->work.entry)) {
3188 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3189 if (thread->looper &
3190 (BINDER_LOOPER_STATE_REGISTERED |
3191 BINDER_LOOPER_STATE_ENTERED))
3192 binder_enqueue_work_ilocked(
3193 &death->work,
3194 &thread->todo);
3195 else {
3196 binder_enqueue_work_ilocked(
3197 &death->work,
3198 &proc->todo);
3199 wake_up_interruptible(
3200 &proc->wait);
355b0502
GKH
3201 }
3202 } else {
3203 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3204 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3205 }
72196393 3206 binder_inner_proc_unlock(proc);
355b0502
GKH
3207 }
3208 } break;
3209 case BC_DEAD_BINDER_DONE: {
3210 struct binder_work *w;
da49889d 3211 binder_uintptr_t cookie;
355b0502 3212 struct binder_ref_death *death = NULL;
10f62861 3213
da49889d 3214 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3215 return -EFAULT;
3216
7a64cd88 3217 ptr += sizeof(cookie);
72196393
TK
3218 binder_inner_proc_lock(proc);
3219 list_for_each_entry(w, &proc->delivered_death,
3220 entry) {
3221 struct binder_ref_death *tmp_death =
3222 container_of(w,
3223 struct binder_ref_death,
3224 work);
10f62861 3225
355b0502
GKH
3226 if (tmp_death->cookie == cookie) {
3227 death = tmp_death;
3228 break;
3229 }
3230 }
3231 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
3232 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3233 proc->pid, thread->pid, (u64)cookie,
3234 death);
355b0502 3235 if (death == NULL) {
da49889d
AH
3236 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3237 proc->pid, thread->pid, (u64)cookie);
72196393 3238 binder_inner_proc_unlock(proc);
355b0502
GKH
3239 break;
3240 }
72196393 3241 binder_dequeue_work_ilocked(&death->work);
355b0502
GKH
3242 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3243 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
72196393
TK
3244 if (thread->looper &
3245 (BINDER_LOOPER_STATE_REGISTERED |
3246 BINDER_LOOPER_STATE_ENTERED))
3247 binder_enqueue_work_ilocked(
3248 &death->work, &thread->todo);
3249 else {
3250 binder_enqueue_work_ilocked(
3251 &death->work,
3252 &proc->todo);
355b0502
GKH
3253 wake_up_interruptible(&proc->wait);
3254 }
3255 }
72196393 3256 binder_inner_proc_unlock(proc);
355b0502
GKH
3257 } break;
3258
3259 default:
56b468fc 3260 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3261 proc->pid, thread->pid, cmd);
3262 return -EINVAL;
3263 }
3264 *consumed = ptr - buffer;
3265 }
3266 return 0;
3267}
3268
fb07ebc3
BP
3269static void binder_stat_br(struct binder_proc *proc,
3270 struct binder_thread *thread, uint32_t cmd)
355b0502 3271{
975a1ac9 3272 trace_binder_return(cmd);
355b0502 3273 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
0953c797
BJS
3274 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3275 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3276 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3277 }
3278}
3279
3280static int binder_has_proc_work(struct binder_proc *proc,
3281 struct binder_thread *thread)
3282{
72196393
TK
3283 return !binder_worklist_empty(proc, &proc->todo) ||
3284 thread->looper_need_return;
355b0502
GKH
3285}
3286
3287static int binder_has_thread_work(struct binder_thread *thread)
3288{
72196393
TK
3289 return !binder_worklist_empty(thread->proc, &thread->todo) ||
3290 thread->looper_need_return;
355b0502
GKH
3291}
3292
26b47d8a
TK
3293static int binder_put_node_cmd(struct binder_proc *proc,
3294 struct binder_thread *thread,
3295 void __user **ptrp,
3296 binder_uintptr_t node_ptr,
3297 binder_uintptr_t node_cookie,
3298 int node_debug_id,
3299 uint32_t cmd, const char *cmd_name)
3300{
3301 void __user *ptr = *ptrp;
3302
3303 if (put_user(cmd, (uint32_t __user *)ptr))
3304 return -EFAULT;
3305 ptr += sizeof(uint32_t);
3306
3307 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3308 return -EFAULT;
3309 ptr += sizeof(binder_uintptr_t);
3310
3311 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3312 return -EFAULT;
3313 ptr += sizeof(binder_uintptr_t);
3314
3315 binder_stat_br(proc, thread, cmd);
3316 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3317 proc->pid, thread->pid, cmd_name, node_debug_id,
3318 (u64)node_ptr, (u64)node_cookie);
3319
3320 *ptrp = ptr;
3321 return 0;
3322}
3323
355b0502
GKH
3324static int binder_thread_read(struct binder_proc *proc,
3325 struct binder_thread *thread,
da49889d
AH
3326 binder_uintptr_t binder_buffer, size_t size,
3327 binder_size_t *consumed, int non_block)
355b0502 3328{
da49889d 3329 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3330 void __user *ptr = buffer + *consumed;
3331 void __user *end = buffer + size;
3332
3333 int ret = 0;
3334 int wait_for_proc_work;
3335
3336 if (*consumed == 0) {
3337 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3338 return -EFAULT;
3339 ptr += sizeof(uint32_t);
3340 }
3341
3342retry:
3343 wait_for_proc_work = thread->transaction_stack == NULL &&
72196393 3344 binder_worklist_empty(proc, &thread->todo);
355b0502 3345
355b0502
GKH
3346 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3347 if (wait_for_proc_work)
3348 proc->ready_threads++;
975a1ac9
AH
3349
3350 binder_unlock(__func__);
3351
3352 trace_binder_wait_for_work(wait_for_proc_work,
3353 !!thread->transaction_stack,
72196393 3354 !binder_worklist_empty(proc, &thread->todo));
355b0502
GKH
3355 if (wait_for_proc_work) {
3356 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3357 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 3358 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
3359 proc->pid, thread->pid, thread->looper);
3360 wait_event_interruptible(binder_user_error_wait,
3361 binder_stop_on_user_error < 2);
3362 }
3363 binder_set_nice(proc->default_priority);
3364 if (non_block) {
3365 if (!binder_has_proc_work(proc, thread))
3366 ret = -EAGAIN;
3367 } else
e2610b26 3368 ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
355b0502
GKH
3369 } else {
3370 if (non_block) {
3371 if (!binder_has_thread_work(thread))
3372 ret = -EAGAIN;
3373 } else
e2610b26 3374 ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
355b0502 3375 }
975a1ac9
AH
3376
3377 binder_lock(__func__);
3378
355b0502
GKH
3379 if (wait_for_proc_work)
3380 proc->ready_threads--;
3381 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3382
3383 if (ret)
3384 return ret;
3385
3386 while (1) {
3387 uint32_t cmd;
3388 struct binder_transaction_data tr;
72196393
TK
3389 struct binder_work *w = NULL;
3390 struct list_head *list = NULL;
355b0502 3391 struct binder_transaction *t = NULL;
7a4408c6 3392 struct binder_thread *t_from;
355b0502 3393
ed29721e 3394 binder_inner_proc_lock(proc);
72196393
TK
3395 if (!binder_worklist_empty_ilocked(&thread->todo))
3396 list = &thread->todo;
3397 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3398 wait_for_proc_work)
3399 list = &proc->todo;
3400 else {
3401 binder_inner_proc_unlock(proc);
3402
395262a9 3403 /* no data added */
08dabcee 3404 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
3405 goto retry;
3406 break;
3407 }
3408
ed29721e
TK
3409 if (end - ptr < sizeof(tr) + 4) {
3410 binder_inner_proc_unlock(proc);
355b0502 3411 break;
ed29721e 3412 }
72196393 3413 w = binder_dequeue_work_head_ilocked(list);
355b0502
GKH
3414
3415 switch (w->type) {
3416 case BINDER_WORK_TRANSACTION: {
ed29721e 3417 binder_inner_proc_unlock(proc);
355b0502
GKH
3418 t = container_of(w, struct binder_transaction, work);
3419 } break;
26549d17
TK
3420 case BINDER_WORK_RETURN_ERROR: {
3421 struct binder_error *e = container_of(
3422 w, struct binder_error, work);
3423
3424 WARN_ON(e->cmd == BR_OK);
ed29721e 3425 binder_inner_proc_unlock(proc);
26549d17
TK
3426 if (put_user(e->cmd, (uint32_t __user *)ptr))
3427 return -EFAULT;
3428 e->cmd = BR_OK;
3429 ptr += sizeof(uint32_t);
3430
3431 binder_stat_br(proc, thread, cmd);
26549d17 3432 } break;
355b0502 3433 case BINDER_WORK_TRANSACTION_COMPLETE: {
ed29721e 3434 binder_inner_proc_unlock(proc);
355b0502
GKH
3435 cmd = BR_TRANSACTION_COMPLETE;
3436 if (put_user(cmd, (uint32_t __user *)ptr))
3437 return -EFAULT;
3438 ptr += sizeof(uint32_t);
3439
3440 binder_stat_br(proc, thread, cmd);
3441 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 3442 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 3443 proc->pid, thread->pid);
355b0502
GKH
3444 kfree(w);
3445 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3446 } break;
3447 case BINDER_WORK_NODE: {
3448 struct binder_node *node = container_of(w, struct binder_node, work);
26b47d8a
TK
3449 int strong, weak;
3450 binder_uintptr_t node_ptr = node->ptr;
3451 binder_uintptr_t node_cookie = node->cookie;
3452 int node_debug_id = node->debug_id;
3453 int has_weak_ref;
3454 int has_strong_ref;
3455 void __user *orig_ptr = ptr;
3456
3457 BUG_ON(proc != node->proc);
3458 strong = node->internal_strong_refs ||
3459 node->local_strong_refs;
3460 weak = !hlist_empty(&node->refs) ||
adc18842
TK
3461 node->local_weak_refs ||
3462 node->tmp_refs || strong;
26b47d8a
TK
3463 has_strong_ref = node->has_strong_ref;
3464 has_weak_ref = node->has_weak_ref;
3465
3466 if (weak && !has_weak_ref) {
355b0502
GKH
3467 node->has_weak_ref = 1;
3468 node->pending_weak_ref = 1;
3469 node->local_weak_refs++;
26b47d8a
TK
3470 }
3471 if (strong && !has_strong_ref) {
355b0502
GKH
3472 node->has_strong_ref = 1;
3473 node->pending_strong_ref = 1;
3474 node->local_strong_refs++;
26b47d8a
TK
3475 }
3476 if (!strong && has_strong_ref)
355b0502 3477 node->has_strong_ref = 0;
26b47d8a 3478 if (!weak && has_weak_ref)
355b0502 3479 node->has_weak_ref = 0;
26b47d8a
TK
3480 if (!weak && !strong) {
3481 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3482 "%d:%d node %d u%016llx c%016llx deleted\n",
3483 proc->pid, thread->pid,
3484 node_debug_id,
3485 (u64)node_ptr,
3486 (u64)node_cookie);
3487 rb_erase(&node->rb_node, &proc->nodes);
ed29721e
TK
3488 binder_inner_proc_unlock(proc);
3489 binder_free_node(node);
3490 } else
3491 binder_inner_proc_unlock(proc);
3492
26b47d8a
TK
3493 if (weak && !has_weak_ref)
3494 ret = binder_put_node_cmd(
3495 proc, thread, &ptr, node_ptr,
3496 node_cookie, node_debug_id,
3497 BR_INCREFS, "BR_INCREFS");
3498 if (!ret && strong && !has_strong_ref)
3499 ret = binder_put_node_cmd(
3500 proc, thread, &ptr, node_ptr,
3501 node_cookie, node_debug_id,
3502 BR_ACQUIRE, "BR_ACQUIRE");
3503 if (!ret && !strong && has_strong_ref)
3504 ret = binder_put_node_cmd(
3505 proc, thread, &ptr, node_ptr,
3506 node_cookie, node_debug_id,
3507 BR_RELEASE, "BR_RELEASE");
3508 if (!ret && !weak && has_weak_ref)
3509 ret = binder_put_node_cmd(
3510 proc, thread, &ptr, node_ptr,
3511 node_cookie, node_debug_id,
3512 BR_DECREFS, "BR_DECREFS");
3513 if (orig_ptr == ptr)
3514 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3515 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3516 proc->pid, thread->pid,
3517 node_debug_id,
3518 (u64)node_ptr,
3519 (u64)node_cookie);
3520 if (ret)
3521 return ret;
355b0502
GKH
3522 } break;
3523 case BINDER_WORK_DEAD_BINDER:
3524 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3525 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3526 struct binder_ref_death *death;
3527 uint32_t cmd;
3528
3529 death = container_of(w, struct binder_ref_death, work);
3530 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3531 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3532 else
3533 cmd = BR_DEAD_BINDER;
ed29721e
TK
3534 /*
3535 * TODO: there is a race condition between
3536 * death notification requests and delivery
3537 * of the notifications. This will be handled
3538 * in a later patch.
3539 */
3540 binder_inner_proc_unlock(proc);
355b0502
GKH
3541 if (put_user(cmd, (uint32_t __user *)ptr))
3542 return -EFAULT;
3543 ptr += sizeof(uint32_t);
da49889d
AH
3544 if (put_user(death->cookie,
3545 (binder_uintptr_t __user *)ptr))
355b0502 3546 return -EFAULT;
da49889d 3547 ptr += sizeof(binder_uintptr_t);
89334ab4 3548 binder_stat_br(proc, thread, cmd);
355b0502 3549 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3550 "%d:%d %s %016llx\n",
355b0502
GKH
3551 proc->pid, thread->pid,
3552 cmd == BR_DEAD_BINDER ?
3553 "BR_DEAD_BINDER" :
3554 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
da49889d 3555 (u64)death->cookie);
355b0502
GKH
3556
3557 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
355b0502
GKH
3558 kfree(death);
3559 binder_stats_deleted(BINDER_STAT_DEATH);
ed29721e
TK
3560 } else {
3561 binder_inner_proc_lock(proc);
72196393
TK
3562 binder_enqueue_work_ilocked(
3563 w, &proc->delivered_death);
ed29721e
TK
3564 binder_inner_proc_unlock(proc);
3565 }
355b0502
GKH
3566 if (cmd == BR_DEAD_BINDER)
3567 goto done; /* DEAD_BINDER notifications can cause transactions */
3568 } break;
3569 }
3570
3571 if (!t)
3572 continue;
3573
3574 BUG_ON(t->buffer == NULL);
3575 if (t->buffer->target_node) {
3576 struct binder_node *target_node = t->buffer->target_node;
10f62861 3577
355b0502
GKH
3578 tr.target.ptr = target_node->ptr;
3579 tr.cookie = target_node->cookie;
3580 t->saved_priority = task_nice(current);
3581 if (t->priority < target_node->min_priority &&
3582 !(t->flags & TF_ONE_WAY))
3583 binder_set_nice(t->priority);
3584 else if (!(t->flags & TF_ONE_WAY) ||
3585 t->saved_priority > target_node->min_priority)
3586 binder_set_nice(target_node->min_priority);
3587 cmd = BR_TRANSACTION;
3588 } else {
da49889d
AH
3589 tr.target.ptr = 0;
3590 tr.cookie = 0;
355b0502
GKH
3591 cmd = BR_REPLY;
3592 }
3593 tr.code = t->code;
3594 tr.flags = t->flags;
4a2ebb93 3595 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 3596
7a4408c6
TK
3597 t_from = binder_get_txn_from(t);
3598 if (t_from) {
3599 struct task_struct *sender = t_from->proc->tsk;
10f62861 3600
355b0502 3601 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 3602 task_active_pid_ns(current));
355b0502
GKH
3603 } else {
3604 tr.sender_pid = 0;
3605 }
3606
3607 tr.data_size = t->buffer->data_size;
3608 tr.offsets_size = t->buffer->offsets_size;
19c98724
TK
3609 tr.data.ptr.buffer = (binder_uintptr_t)
3610 ((uintptr_t)t->buffer->data +
3611 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
3612 tr.data.ptr.offsets = tr.data.ptr.buffer +
3613 ALIGN(t->buffer->data_size,
3614 sizeof(void *));
3615
7a4408c6
TK
3616 if (put_user(cmd, (uint32_t __user *)ptr)) {
3617 if (t_from)
3618 binder_thread_dec_tmpref(t_from);
355b0502 3619 return -EFAULT;
7a4408c6 3620 }
355b0502 3621 ptr += sizeof(uint32_t);
7a4408c6
TK
3622 if (copy_to_user(ptr, &tr, sizeof(tr))) {
3623 if (t_from)
3624 binder_thread_dec_tmpref(t_from);
355b0502 3625 return -EFAULT;
7a4408c6 3626 }
355b0502
GKH
3627 ptr += sizeof(tr);
3628
975a1ac9 3629 trace_binder_transaction_received(t);
355b0502
GKH
3630 binder_stat_br(proc, thread, cmd);
3631 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 3632 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
3633 proc->pid, thread->pid,
3634 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
3635 "BR_REPLY",
7a4408c6
TK
3636 t->debug_id, t_from ? t_from->proc->pid : 0,
3637 t_from ? t_from->pid : 0, cmd,
355b0502 3638 t->buffer->data_size, t->buffer->offsets_size,
da49889d 3639 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 3640
7a4408c6
TK
3641 if (t_from)
3642 binder_thread_dec_tmpref(t_from);
355b0502
GKH
3643 t->buffer->allow_user_free = 1;
3644 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
3645 t->to_parent = thread->transaction_stack;
3646 t->to_thread = thread;
3647 thread->transaction_stack = t;
3648 } else {
b6d282ce 3649 binder_free_transaction(t);
355b0502
GKH
3650 }
3651 break;
3652 }
3653
3654done:
3655
3656 *consumed = ptr - buffer;
3657 if (proc->requested_threads + proc->ready_threads == 0 &&
3658 proc->requested_threads_started < proc->max_threads &&
3659 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3660 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
3661 /*spawn a new thread if we leave this out */) {
3662 proc->requested_threads++;
3663 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3664 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
3665 proc->pid, thread->pid);
3666 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
3667 return -EFAULT;
89334ab4 3668 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
355b0502
GKH
3669 }
3670 return 0;
3671}
3672
72196393
TK
3673static void binder_release_work(struct binder_proc *proc,
3674 struct list_head *list)
355b0502
GKH
3675{
3676 struct binder_work *w;
10f62861 3677
72196393
TK
3678 while (1) {
3679 w = binder_dequeue_work_head(proc, list);
3680 if (!w)
3681 return;
3682
355b0502
GKH
3683 switch (w->type) {
3684 case BINDER_WORK_TRANSACTION: {
3685 struct binder_transaction *t;
3686
3687 t = container_of(w, struct binder_transaction, work);
675d66b0
AH
3688 if (t->buffer->target_node &&
3689 !(t->flags & TF_ONE_WAY)) {
355b0502 3690 binder_send_failed_reply(t, BR_DEAD_REPLY);
675d66b0
AH
3691 } else {
3692 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3693 "undelivered transaction %d\n",
675d66b0 3694 t->debug_id);
b6d282ce 3695 binder_free_transaction(t);
675d66b0 3696 }
355b0502 3697 } break;
26549d17
TK
3698 case BINDER_WORK_RETURN_ERROR: {
3699 struct binder_error *e = container_of(
3700 w, struct binder_error, work);
3701
3702 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
3703 "undelivered TRANSACTION_ERROR: %u\n",
3704 e->cmd);
3705 } break;
355b0502 3706 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 3707 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 3708 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
3709 kfree(w);
3710 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3711 } break;
675d66b0
AH
3712 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3713 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3714 struct binder_ref_death *death;
3715
3716 death = container_of(w, struct binder_ref_death, work);
3717 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
3718 "undelivered death notification, %016llx\n",
3719 (u64)death->cookie);
675d66b0
AH
3720 kfree(death);
3721 binder_stats_deleted(BINDER_STAT_DEATH);
3722 } break;
355b0502 3723 default:
56b468fc 3724 pr_err("unexpected work type, %d, not freed\n",
675d66b0 3725 w->type);
355b0502
GKH
3726 break;
3727 }
3728 }
3729
3730}
3731
3732static struct binder_thread *binder_get_thread(struct binder_proc *proc)
3733{
3734 struct binder_thread *thread = NULL;
3735 struct rb_node *parent = NULL;
3736 struct rb_node **p = &proc->threads.rb_node;
3737
3738 while (*p) {
3739 parent = *p;
3740 thread = rb_entry(parent, struct binder_thread, rb_node);
3741
3742 if (current->pid < thread->pid)
3743 p = &(*p)->rb_left;
3744 else if (current->pid > thread->pid)
3745 p = &(*p)->rb_right;
3746 else
3747 break;
3748 }
3749 if (*p == NULL) {
3750 thread = kzalloc(sizeof(*thread), GFP_KERNEL);
3751 if (thread == NULL)
3752 return NULL;
3753 binder_stats_created(BINDER_STAT_THREAD);
3754 thread->proc = proc;
3755 thread->pid = current->pid;
7a4408c6 3756 atomic_set(&thread->tmp_ref, 0);
355b0502
GKH
3757 init_waitqueue_head(&thread->wait);
3758 INIT_LIST_HEAD(&thread->todo);
3759 rb_link_node(&thread->rb_node, parent, p);
3760 rb_insert_color(&thread->rb_node, &proc->threads);
08dabcee 3761 thread->looper_need_return = true;
26549d17
TK
3762 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
3763 thread->return_error.cmd = BR_OK;
3764 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
3765 thread->reply_error.cmd = BR_OK;
355b0502
GKH
3766 }
3767 return thread;
3768}
3769
7a4408c6
TK
3770static void binder_free_proc(struct binder_proc *proc)
3771{
3772 BUG_ON(!list_empty(&proc->todo));
3773 BUG_ON(!list_empty(&proc->delivered_death));
3774 binder_alloc_deferred_release(&proc->alloc);
3775 put_task_struct(proc->tsk);
3776 binder_stats_deleted(BINDER_STAT_PROC);
3777 kfree(proc);
3778}
3779
3780static void binder_free_thread(struct binder_thread *thread)
3781{
3782 BUG_ON(!list_empty(&thread->todo));
3783 binder_stats_deleted(BINDER_STAT_THREAD);
3784 binder_proc_dec_tmpref(thread->proc);
3785 kfree(thread);
3786}
3787
3788static int binder_thread_release(struct binder_proc *proc,
3789 struct binder_thread *thread)
355b0502
GKH
3790{
3791 struct binder_transaction *t;
3792 struct binder_transaction *send_reply = NULL;
3793 int active_transactions = 0;
7a4408c6 3794 struct binder_transaction *last_t = NULL;
355b0502 3795
7a4408c6
TK
3796 /*
3797 * take a ref on the proc so it survives
3798 * after we remove this thread from proc->threads.
3799 * The corresponding dec is when we actually
3800 * free the thread in binder_free_thread()
3801 */
3802 proc->tmp_ref++;
3803 /*
3804 * take a ref on this thread to ensure it
3805 * survives while we are releasing it
3806 */
3807 atomic_inc(&thread->tmp_ref);
355b0502
GKH
3808 rb_erase(&thread->rb_node, &proc->threads);
3809 t = thread->transaction_stack;
7a4408c6
TK
3810 if (t) {
3811 spin_lock(&t->lock);
3812 if (t->to_thread == thread)
3813 send_reply = t;
3814 }
3815 thread->is_dead = true;
3816
355b0502 3817 while (t) {
7a4408c6 3818 last_t = t;
355b0502
GKH
3819 active_transactions++;
3820 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
3821 "release %d:%d transaction %d %s, still active\n",
3822 proc->pid, thread->pid,
355b0502
GKH
3823 t->debug_id,
3824 (t->to_thread == thread) ? "in" : "out");
3825
3826 if (t->to_thread == thread) {
3827 t->to_proc = NULL;
3828 t->to_thread = NULL;
3829 if (t->buffer) {
3830 t->buffer->transaction = NULL;
3831 t->buffer = NULL;
3832 }
3833 t = t->to_parent;
3834 } else if (t->from == thread) {
3835 t->from = NULL;
3836 t = t->from_parent;
3837 } else
3838 BUG();
7a4408c6
TK
3839 spin_unlock(&last_t->lock);
3840 if (t)
3841 spin_lock(&t->lock);
355b0502 3842 }
7a4408c6 3843
355b0502
GKH
3844 if (send_reply)
3845 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
72196393 3846 binder_release_work(proc, &thread->todo);
7a4408c6 3847 binder_thread_dec_tmpref(thread);
355b0502
GKH
3848 return active_transactions;
3849}
3850
3851static unsigned int binder_poll(struct file *filp,
3852 struct poll_table_struct *wait)
3853{
3854 struct binder_proc *proc = filp->private_data;
3855 struct binder_thread *thread = NULL;
3856 int wait_for_proc_work;
3857
975a1ac9
AH
3858 binder_lock(__func__);
3859
355b0502
GKH
3860 thread = binder_get_thread(proc);
3861
3862 wait_for_proc_work = thread->transaction_stack == NULL &&
72196393 3863 binder_worklist_empty(proc, &thread->todo);
975a1ac9
AH
3864
3865 binder_unlock(__func__);
355b0502
GKH
3866
3867 if (wait_for_proc_work) {
3868 if (binder_has_proc_work(proc, thread))
3869 return POLLIN;
3870 poll_wait(filp, &proc->wait, wait);
3871 if (binder_has_proc_work(proc, thread))
3872 return POLLIN;
3873 } else {
3874 if (binder_has_thread_work(thread))
3875 return POLLIN;
3876 poll_wait(filp, &thread->wait, wait);
3877 if (binder_has_thread_work(thread))
3878 return POLLIN;
3879 }
3880 return 0;
3881}
3882
78260ac6
TR
3883static int binder_ioctl_write_read(struct file *filp,
3884 unsigned int cmd, unsigned long arg,
3885 struct binder_thread *thread)
3886{
3887 int ret = 0;
3888 struct binder_proc *proc = filp->private_data;
3889 unsigned int size = _IOC_SIZE(cmd);
3890 void __user *ubuf = (void __user *)arg;
3891 struct binder_write_read bwr;
3892
3893 if (size != sizeof(struct binder_write_read)) {
3894 ret = -EINVAL;
3895 goto out;
3896 }
3897 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
3898 ret = -EFAULT;
3899 goto out;
3900 }
3901 binder_debug(BINDER_DEBUG_READ_WRITE,
3902 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
3903 proc->pid, thread->pid,
3904 (u64)bwr.write_size, (u64)bwr.write_buffer,
3905 (u64)bwr.read_size, (u64)bwr.read_buffer);
3906
3907 if (bwr.write_size > 0) {
3908 ret = binder_thread_write(proc, thread,
3909 bwr.write_buffer,
3910 bwr.write_size,
3911 &bwr.write_consumed);
3912 trace_binder_write_done(ret);
3913 if (ret < 0) {
3914 bwr.read_consumed = 0;
3915 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3916 ret = -EFAULT;
3917 goto out;
3918 }
3919 }
3920 if (bwr.read_size > 0) {
3921 ret = binder_thread_read(proc, thread, bwr.read_buffer,
3922 bwr.read_size,
3923 &bwr.read_consumed,
3924 filp->f_flags & O_NONBLOCK);
3925 trace_binder_read_done(ret);
72196393 3926 if (!binder_worklist_empty(proc, &proc->todo))
78260ac6
TR
3927 wake_up_interruptible(&proc->wait);
3928 if (ret < 0) {
3929 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
3930 ret = -EFAULT;
3931 goto out;
3932 }
3933 }
3934 binder_debug(BINDER_DEBUG_READ_WRITE,
3935 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
3936 proc->pid, thread->pid,
3937 (u64)bwr.write_consumed, (u64)bwr.write_size,
3938 (u64)bwr.read_consumed, (u64)bwr.read_size);
3939 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
3940 ret = -EFAULT;
3941 goto out;
3942 }
3943out:
3944 return ret;
3945}
3946
3947static int binder_ioctl_set_ctx_mgr(struct file *filp)
3948{
3949 int ret = 0;
3950 struct binder_proc *proc = filp->private_data;
342e5c90 3951 struct binder_context *context = proc->context;
c44b1231 3952 struct binder_node *new_node;
78260ac6
TR
3953 kuid_t curr_euid = current_euid();
3954
c44b1231 3955 mutex_lock(&context->context_mgr_node_lock);
342e5c90 3956 if (context->binder_context_mgr_node) {
78260ac6
TR
3957 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
3958 ret = -EBUSY;
3959 goto out;
3960 }
79af7307
SS
3961 ret = security_binder_set_context_mgr(proc->tsk);
3962 if (ret < 0)
3963 goto out;
342e5c90
MC
3964 if (uid_valid(context->binder_context_mgr_uid)) {
3965 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
3966 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
3967 from_kuid(&init_user_ns, curr_euid),
3968 from_kuid(&init_user_ns,
342e5c90 3969 context->binder_context_mgr_uid));
78260ac6
TR
3970 ret = -EPERM;
3971 goto out;
3972 }
3973 } else {
342e5c90 3974 context->binder_context_mgr_uid = curr_euid;
78260ac6 3975 }
c44b1231
TK
3976 new_node = binder_new_node(proc, 0, 0);
3977 if (!new_node) {
78260ac6
TR
3978 ret = -ENOMEM;
3979 goto out;
3980 }
c44b1231
TK
3981 new_node->local_weak_refs++;
3982 new_node->local_strong_refs++;
3983 new_node->has_strong_ref = 1;
3984 new_node->has_weak_ref = 1;
3985 context->binder_context_mgr_node = new_node;
adc18842 3986 binder_put_node(new_node);
78260ac6 3987out:
c44b1231 3988 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
3989 return ret;
3990}
3991
355b0502
GKH
3992static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3993{
3994 int ret;
3995 struct binder_proc *proc = filp->private_data;
3996 struct binder_thread *thread;
3997 unsigned int size = _IOC_SIZE(cmd);
3998 void __user *ubuf = (void __user *)arg;
3999
78260ac6
TR
4000 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4001 proc->pid, current->pid, cmd, arg);*/
355b0502 4002
975a1ac9
AH
4003 trace_binder_ioctl(cmd, arg);
4004
355b0502
GKH
4005 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4006 if (ret)
975a1ac9 4007 goto err_unlocked;
355b0502 4008
975a1ac9 4009 binder_lock(__func__);
355b0502
GKH
4010 thread = binder_get_thread(proc);
4011 if (thread == NULL) {
4012 ret = -ENOMEM;
4013 goto err;
4014 }
4015
4016 switch (cmd) {
78260ac6
TR
4017 case BINDER_WRITE_READ:
4018 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4019 if (ret)
355b0502 4020 goto err;
355b0502 4021 break;
355b0502
GKH
4022 case BINDER_SET_MAX_THREADS:
4023 if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
4024 ret = -EINVAL;
4025 goto err;
4026 }
4027 break;
4028 case BINDER_SET_CONTEXT_MGR:
78260ac6
TR
4029 ret = binder_ioctl_set_ctx_mgr(filp);
4030 if (ret)
355b0502 4031 goto err;
355b0502
GKH
4032 break;
4033 case BINDER_THREAD_EXIT:
56b468fc 4034 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 4035 proc->pid, thread->pid);
7a4408c6 4036 binder_thread_release(proc, thread);
355b0502
GKH
4037 thread = NULL;
4038 break;
36c89c0a
MM
4039 case BINDER_VERSION: {
4040 struct binder_version __user *ver = ubuf;
4041
355b0502
GKH
4042 if (size != sizeof(struct binder_version)) {
4043 ret = -EINVAL;
4044 goto err;
4045 }
36c89c0a
MM
4046 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4047 &ver->protocol_version)) {
355b0502
GKH
4048 ret = -EINVAL;
4049 goto err;
4050 }
4051 break;
36c89c0a 4052 }
355b0502
GKH
4053 default:
4054 ret = -EINVAL;
4055 goto err;
4056 }
4057 ret = 0;
4058err:
4059 if (thread)
08dabcee 4060 thread->looper_need_return = false;
975a1ac9 4061 binder_unlock(__func__);
355b0502
GKH
4062 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4063 if (ret && ret != -ERESTARTSYS)
56b468fc 4064 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
4065err_unlocked:
4066 trace_binder_ioctl_done(ret);
355b0502
GKH
4067 return ret;
4068}
4069
4070static void binder_vma_open(struct vm_area_struct *vma)
4071{
4072 struct binder_proc *proc = vma->vm_private_data;
10f62861 4073
355b0502 4074 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4075 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4076 proc->pid, vma->vm_start, vma->vm_end,
4077 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4078 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
4079}
4080
4081static void binder_vma_close(struct vm_area_struct *vma)
4082{
4083 struct binder_proc *proc = vma->vm_private_data;
10f62861 4084
355b0502 4085 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4086 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4087 proc->pid, vma->vm_start, vma->vm_end,
4088 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4089 (unsigned long)pgprot_val(vma->vm_page_prot));
19c98724 4090 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
4091 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4092}
4093
11bac800 4094static int binder_vm_fault(struct vm_fault *vmf)
ddac7d5f
VM
4095{
4096 return VM_FAULT_SIGBUS;
4097}
4098
7cbea8dc 4099static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
4100 .open = binder_vma_open,
4101 .close = binder_vma_close,
ddac7d5f 4102 .fault = binder_vm_fault,
355b0502
GKH
4103};
4104
19c98724
TK
4105static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4106{
4107 int ret;
4108 struct binder_proc *proc = filp->private_data;
4109 const char *failure_string;
4110
4111 if (proc->tsk != current->group_leader)
4112 return -EINVAL;
4113
4114 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4115 vma->vm_end = vma->vm_start + SZ_4M;
4116
4117 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4118 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4119 __func__, proc->pid, vma->vm_start, vma->vm_end,
4120 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4121 (unsigned long)pgprot_val(vma->vm_page_prot));
4122
4123 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4124 ret = -EPERM;
4125 failure_string = "bad vm_flags";
4126 goto err_bad_arg;
4127 }
4128 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4129 vma->vm_ops = &binder_vm_ops;
4130 vma->vm_private_data = proc;
4131
4132 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4133 if (ret)
4134 return ret;
4135 proc->files = get_files_struct(current);
4136 return 0;
4137
355b0502 4138err_bad_arg:
258767fe 4139 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
4140 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4141 return ret;
4142}
4143
4144static int binder_open(struct inode *nodp, struct file *filp)
4145{
4146 struct binder_proc *proc;
ac4812c5 4147 struct binder_device *binder_dev;
355b0502
GKH
4148
4149 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4150 current->group_leader->pid, current->pid);
4151
4152 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4153 if (proc == NULL)
4154 return -ENOMEM;
9630fe88
TK
4155 spin_lock_init(&proc->inner_lock);
4156 spin_lock_init(&proc->outer_lock);
c4ea41ba
TK
4157 get_task_struct(current->group_leader);
4158 proc->tsk = current->group_leader;
355b0502
GKH
4159 INIT_LIST_HEAD(&proc->todo);
4160 init_waitqueue_head(&proc->wait);
4161 proc->default_priority = task_nice(current);
ac4812c5
MC
4162 binder_dev = container_of(filp->private_data, struct binder_device,
4163 miscdev);
4164 proc->context = &binder_dev->context;
19c98724 4165 binder_alloc_init(&proc->alloc);
975a1ac9
AH
4166
4167 binder_lock(__func__);
4168
355b0502 4169 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
4170 proc->pid = current->group_leader->pid;
4171 INIT_LIST_HEAD(&proc->delivered_death);
4172 filp->private_data = proc;
975a1ac9
AH
4173
4174 binder_unlock(__func__);
355b0502 4175
c44b1231
TK
4176 mutex_lock(&binder_procs_lock);
4177 hlist_add_head(&proc->proc_node, &binder_procs);
4178 mutex_unlock(&binder_procs_lock);
4179
16b66554 4180 if (binder_debugfs_dir_entry_proc) {
355b0502 4181 char strbuf[11];
10f62861 4182
355b0502 4183 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
14db3181
MC
4184 /*
4185 * proc debug entries are shared between contexts, so
4186 * this will fail if the process tries to open the driver
4187 * again with a different context. The priting code will
4188 * anyway print all contexts that a given PID has, so this
4189 * is not a problem.
4190 */
16b66554 4191 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
14db3181
MC
4192 binder_debugfs_dir_entry_proc,
4193 (void *)(unsigned long)proc->pid,
4194 &binder_proc_fops);
355b0502
GKH
4195 }
4196
4197 return 0;
4198}
4199
4200static int binder_flush(struct file *filp, fl_owner_t id)
4201{
4202 struct binder_proc *proc = filp->private_data;
4203
4204 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4205
4206 return 0;
4207}
4208
4209static void binder_deferred_flush(struct binder_proc *proc)
4210{
4211 struct rb_node *n;
4212 int wake_count = 0;
10f62861 4213
355b0502
GKH
4214 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4215 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 4216
08dabcee 4217 thread->looper_need_return = true;
355b0502
GKH
4218 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4219 wake_up_interruptible(&thread->wait);
4220 wake_count++;
4221 }
4222 }
4223 wake_up_interruptible_all(&proc->wait);
4224
4225 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4226 "binder_flush: %d woke %d threads\n", proc->pid,
4227 wake_count);
4228}
4229
4230static int binder_release(struct inode *nodp, struct file *filp)
4231{
4232 struct binder_proc *proc = filp->private_data;
10f62861 4233
16b66554 4234 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
4235 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4236
4237 return 0;
4238}
4239
008fa749
ME
4240static int binder_node_release(struct binder_node *node, int refs)
4241{
4242 struct binder_ref *ref;
4243 int death = 0;
ed29721e 4244 struct binder_proc *proc = node->proc;
008fa749 4245
72196393 4246 binder_release_work(proc, &node->async_todo);
ed29721e
TK
4247
4248 binder_inner_proc_lock(proc);
72196393 4249 binder_dequeue_work_ilocked(&node->work);
adc18842
TK
4250 /*
4251 * The caller must have taken a temporary ref on the node,
4252 */
4253 BUG_ON(!node->tmp_refs);
4254 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
ed29721e
TK
4255 binder_inner_proc_unlock(proc);
4256 binder_free_node(node);
008fa749
ME
4257
4258 return refs;
4259 }
4260
4261 node->proc = NULL;
4262 node->local_strong_refs = 0;
4263 node->local_weak_refs = 0;
ed29721e 4264 binder_inner_proc_unlock(proc);
c44b1231
TK
4265
4266 spin_lock(&binder_dead_nodes_lock);
008fa749 4267 hlist_add_head(&node->dead_node, &binder_dead_nodes);
c44b1231 4268 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
4269
4270 hlist_for_each_entry(ref, &node->refs, node_entry) {
4271 refs++;
4272
4273 if (!ref->death)
e194fd8a 4274 continue;
008fa749
ME
4275
4276 death++;
4277
72196393 4278 binder_inner_proc_lock(ref->proc);
008fa749
ME
4279 if (list_empty(&ref->death->work.entry)) {
4280 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
72196393
TK
4281 binder_enqueue_work_ilocked(&ref->death->work,
4282 &ref->proc->todo);
008fa749
ME
4283 wake_up_interruptible(&ref->proc->wait);
4284 } else
4285 BUG();
72196393 4286 binder_inner_proc_unlock(ref->proc);
008fa749
ME
4287 }
4288
008fa749
ME
4289 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4290 "node %d now dead, refs %d, death %d\n",
4291 node->debug_id, refs, death);
adc18842 4292 binder_put_node(node);
008fa749
ME
4293
4294 return refs;
4295}
4296
355b0502
GKH
4297static void binder_deferred_release(struct binder_proc *proc)
4298{
342e5c90 4299 struct binder_context *context = proc->context;
355b0502 4300 struct rb_node *n;
19c98724 4301 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 4302
355b0502
GKH
4303 BUG_ON(proc->files);
4304
c44b1231 4305 mutex_lock(&binder_procs_lock);
355b0502 4306 hlist_del(&proc->proc_node);
c44b1231 4307 mutex_unlock(&binder_procs_lock);
53413e7d 4308
c44b1231 4309 mutex_lock(&context->context_mgr_node_lock);
342e5c90
MC
4310 if (context->binder_context_mgr_node &&
4311 context->binder_context_mgr_node->proc == proc) {
355b0502 4312 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
4313 "%s: %d context_mgr_node gone\n",
4314 __func__, proc->pid);
342e5c90 4315 context->binder_context_mgr_node = NULL;
355b0502 4316 }
c44b1231 4317 mutex_unlock(&context->context_mgr_node_lock);
7a4408c6
TK
4318 /*
4319 * Make sure proc stays alive after we
4320 * remove all the threads
4321 */
4322 proc->tmp_ref++;
355b0502 4323
7a4408c6 4324 proc->is_dead = true;
355b0502
GKH
4325 threads = 0;
4326 active_transactions = 0;
4327 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
4328 struct binder_thread *thread;
4329
4330 thread = rb_entry(n, struct binder_thread, rb_node);
355b0502 4331 threads++;
7a4408c6 4332 active_transactions += binder_thread_release(proc, thread);
355b0502 4333 }
53413e7d 4334
355b0502
GKH
4335 nodes = 0;
4336 incoming_refs = 0;
4337 while ((n = rb_first(&proc->nodes))) {
53413e7d 4338 struct binder_node *node;
355b0502 4339
53413e7d 4340 node = rb_entry(n, struct binder_node, rb_node);
355b0502 4341 nodes++;
adc18842
TK
4342 /*
4343 * take a temporary ref on the node before
4344 * calling binder_node_release() which will either
4345 * kfree() the node or call binder_put_node()
4346 */
4347 binder_inc_node_tmpref(node);
355b0502 4348 rb_erase(&node->rb_node, &proc->nodes);
008fa749 4349 incoming_refs = binder_node_release(node, incoming_refs);
355b0502 4350 }
53413e7d 4351
355b0502
GKH
4352 outgoing_refs = 0;
4353 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
4354 struct binder_ref *ref;
4355
4356 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 4357 outgoing_refs++;
372e3147
TK
4358 binder_cleanup_ref(ref);
4359 binder_free_ref(ref);
355b0502 4360 }
53413e7d 4361
72196393
TK
4362 binder_release_work(proc, &proc->todo);
4363 binder_release_work(proc, &proc->delivered_death);
355b0502 4364
355b0502 4365 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
19c98724 4366 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 4367 __func__, proc->pid, threads, nodes, incoming_refs,
19c98724 4368 outgoing_refs, active_transactions);
355b0502 4369
7a4408c6 4370 binder_proc_dec_tmpref(proc);
355b0502
GKH
4371}
4372
4373static void binder_deferred_func(struct work_struct *work)
4374{
4375 struct binder_proc *proc;
4376 struct files_struct *files;
4377
4378 int defer;
10f62861 4379
355b0502 4380 do {
975a1ac9 4381 binder_lock(__func__);
355b0502
GKH
4382 mutex_lock(&binder_deferred_lock);
4383 if (!hlist_empty(&binder_deferred_list)) {
4384 proc = hlist_entry(binder_deferred_list.first,
4385 struct binder_proc, deferred_work_node);
4386 hlist_del_init(&proc->deferred_work_node);
4387 defer = proc->deferred_work;
4388 proc->deferred_work = 0;
4389 } else {
4390 proc = NULL;
4391 defer = 0;
4392 }
4393 mutex_unlock(&binder_deferred_lock);
4394
4395 files = NULL;
4396 if (defer & BINDER_DEFERRED_PUT_FILES) {
4397 files = proc->files;
4398 if (files)
4399 proc->files = NULL;
4400 }
4401
4402 if (defer & BINDER_DEFERRED_FLUSH)
4403 binder_deferred_flush(proc);
4404
4405 if (defer & BINDER_DEFERRED_RELEASE)
4406 binder_deferred_release(proc); /* frees proc */
4407
975a1ac9 4408 binder_unlock(__func__);
355b0502
GKH
4409 if (files)
4410 put_files_struct(files);
4411 } while (proc);
4412}
4413static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4414
4415static void
4416binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4417{
4418 mutex_lock(&binder_deferred_lock);
4419 proc->deferred_work |= defer;
4420 if (hlist_unhashed(&proc->deferred_work_node)) {
4421 hlist_add_head(&proc->deferred_work_node,
4422 &binder_deferred_list);
1beba52d 4423 schedule_work(&binder_deferred_work);
355b0502
GKH
4424 }
4425 mutex_unlock(&binder_deferred_lock);
4426}
4427
5249f488
AH
4428static void print_binder_transaction(struct seq_file *m, const char *prefix,
4429 struct binder_transaction *t)
4430{
7a4408c6 4431 spin_lock(&t->lock);
5249f488
AH
4432 seq_printf(m,
4433 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
4434 prefix, t->debug_id, t,
4435 t->from ? t->from->proc->pid : 0,
4436 t->from ? t->from->pid : 0,
4437 t->to_proc ? t->to_proc->pid : 0,
4438 t->to_thread ? t->to_thread->pid : 0,
4439 t->code, t->flags, t->priority, t->need_reply);
7a4408c6
TK
4440 spin_unlock(&t->lock);
4441
355b0502 4442 if (t->buffer == NULL) {
5249f488
AH
4443 seq_puts(m, " buffer free\n");
4444 return;
355b0502 4445 }
5249f488
AH
4446 if (t->buffer->target_node)
4447 seq_printf(m, " node %d",
4448 t->buffer->target_node->debug_id);
4449 seq_printf(m, " size %zd:%zd data %p\n",
4450 t->buffer->data_size, t->buffer->offsets_size,
4451 t->buffer->data);
355b0502
GKH
4452}
4453
72196393
TK
4454static void print_binder_work_ilocked(struct seq_file *m, const char *prefix,
4455 const char *transaction_prefix,
4456 struct binder_work *w)
355b0502
GKH
4457{
4458 struct binder_node *node;
4459 struct binder_transaction *t;
4460
4461 switch (w->type) {
4462 case BINDER_WORK_TRANSACTION:
4463 t = container_of(w, struct binder_transaction, work);
5249f488 4464 print_binder_transaction(m, transaction_prefix, t);
355b0502 4465 break;
26549d17
TK
4466 case BINDER_WORK_RETURN_ERROR: {
4467 struct binder_error *e = container_of(
4468 w, struct binder_error, work);
4469
4470 seq_printf(m, "%stransaction error: %u\n",
4471 prefix, e->cmd);
4472 } break;
355b0502 4473 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 4474 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
4475 break;
4476 case BINDER_WORK_NODE:
4477 node = container_of(w, struct binder_node, work);
da49889d
AH
4478 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
4479 prefix, node->debug_id,
4480 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
4481 break;
4482 case BINDER_WORK_DEAD_BINDER:
5249f488 4483 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
4484 break;
4485 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 4486 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
4487 break;
4488 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 4489 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
4490 break;
4491 default:
5249f488 4492 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
4493 break;
4494 }
355b0502
GKH
4495}
4496
72196393
TK
4497static void print_binder_thread_ilocked(struct seq_file *m,
4498 struct binder_thread *thread,
4499 int print_always)
355b0502
GKH
4500{
4501 struct binder_transaction *t;
4502 struct binder_work *w;
5249f488
AH
4503 size_t start_pos = m->count;
4504 size_t header_pos;
355b0502 4505
72196393 4506 WARN_ON(!spin_is_locked(&thread->proc->inner_lock));
7a4408c6 4507 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
08dabcee 4508 thread->pid, thread->looper,
7a4408c6
TK
4509 thread->looper_need_return,
4510 atomic_read(&thread->tmp_ref));
5249f488 4511 header_pos = m->count;
355b0502
GKH
4512 t = thread->transaction_stack;
4513 while (t) {
355b0502 4514 if (t->from == thread) {
5249f488
AH
4515 print_binder_transaction(m,
4516 " outgoing transaction", t);
355b0502
GKH
4517 t = t->from_parent;
4518 } else if (t->to_thread == thread) {
5249f488
AH
4519 print_binder_transaction(m,
4520 " incoming transaction", t);
355b0502
GKH
4521 t = t->to_parent;
4522 } else {
5249f488 4523 print_binder_transaction(m, " bad transaction", t);
355b0502
GKH
4524 t = NULL;
4525 }
4526 }
4527 list_for_each_entry(w, &thread->todo, entry) {
72196393
TK
4528 print_binder_work_ilocked(m, " ",
4529 " pending transaction", w);
355b0502 4530 }
5249f488
AH
4531 if (!print_always && m->count == header_pos)
4532 m->count = start_pos;
355b0502
GKH
4533}
4534
5249f488 4535static void print_binder_node(struct seq_file *m, struct binder_node *node)
355b0502
GKH
4536{
4537 struct binder_ref *ref;
355b0502
GKH
4538 struct binder_work *w;
4539 int count;
4540
4541 count = 0;
b67bfe0d 4542 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
4543 count++;
4544
adc18842 4545 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 4546 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5249f488
AH
4547 node->has_strong_ref, node->has_weak_ref,
4548 node->local_strong_refs, node->local_weak_refs,
adc18842 4549 node->internal_strong_refs, count, node->tmp_refs);
355b0502 4550 if (count) {
5249f488 4551 seq_puts(m, " proc");
b67bfe0d 4552 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 4553 seq_printf(m, " %d", ref->proc->pid);
355b0502 4554 }
5249f488 4555 seq_puts(m, "\n");
72196393
TK
4556 if (node->proc) {
4557 binder_inner_proc_lock(node->proc);
4558 list_for_each_entry(w, &node->async_todo, entry)
4559 print_binder_work_ilocked(m, " ",
4560 " pending async transaction", w);
4561 binder_inner_proc_unlock(node->proc);
4562 }
355b0502
GKH
4563}
4564
5249f488 4565static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
355b0502 4566{
372e3147
TK
4567 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
4568 ref->data.debug_id, ref->data.desc,
4569 ref->node->proc ? "" : "dead ",
4570 ref->node->debug_id, ref->data.strong,
4571 ref->data.weak, ref->death);
355b0502
GKH
4572}
4573
5249f488
AH
4574static void print_binder_proc(struct seq_file *m,
4575 struct binder_proc *proc, int print_all)
355b0502
GKH
4576{
4577 struct binder_work *w;
4578 struct rb_node *n;
5249f488
AH
4579 size_t start_pos = m->count;
4580 size_t header_pos;
4581
4582 seq_printf(m, "proc %d\n", proc->pid);
14db3181 4583 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
4584 header_pos = m->count;
4585
72196393 4586 binder_inner_proc_lock(proc);
5249f488 4587 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
72196393 4588 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5249f488 4589 rb_node), print_all);
72196393 4590 binder_inner_proc_unlock(proc);
5249f488 4591 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
4592 struct binder_node *node = rb_entry(n, struct binder_node,
4593 rb_node);
4594 if (print_all || node->has_async_transaction)
5249f488 4595 print_binder_node(m, node);
355b0502
GKH
4596 }
4597 if (print_all) {
4598 for (n = rb_first(&proc->refs_by_desc);
5249f488 4599 n != NULL;
355b0502 4600 n = rb_next(n))
5249f488
AH
4601 print_binder_ref(m, rb_entry(n, struct binder_ref,
4602 rb_node_desc));
355b0502 4603 }
19c98724 4604 binder_alloc_print_allocated(m, &proc->alloc);
72196393 4605 binder_inner_proc_lock(proc);
5249f488 4606 list_for_each_entry(w, &proc->todo, entry)
72196393 4607 print_binder_work_ilocked(m, " ", " pending transaction", w);
355b0502 4608 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 4609 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
4610 break;
4611 }
72196393 4612 binder_inner_proc_unlock(proc);
5249f488
AH
4613 if (!print_all && m->count == header_pos)
4614 m->count = start_pos;
355b0502
GKH
4615}
4616
167bccbd 4617static const char * const binder_return_strings[] = {
355b0502
GKH
4618 "BR_ERROR",
4619 "BR_OK",
4620 "BR_TRANSACTION",
4621 "BR_REPLY",
4622 "BR_ACQUIRE_RESULT",
4623 "BR_DEAD_REPLY",
4624 "BR_TRANSACTION_COMPLETE",
4625 "BR_INCREFS",
4626 "BR_ACQUIRE",
4627 "BR_RELEASE",
4628 "BR_DECREFS",
4629 "BR_ATTEMPT_ACQUIRE",
4630 "BR_NOOP",
4631 "BR_SPAWN_LOOPER",
4632 "BR_FINISHED",
4633 "BR_DEAD_BINDER",
4634 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4635 "BR_FAILED_REPLY"
4636};
4637
167bccbd 4638static const char * const binder_command_strings[] = {
355b0502
GKH
4639 "BC_TRANSACTION",
4640 "BC_REPLY",
4641 "BC_ACQUIRE_RESULT",
4642 "BC_FREE_BUFFER",
4643 "BC_INCREFS",
4644 "BC_ACQUIRE",
4645 "BC_RELEASE",
4646 "BC_DECREFS",
4647 "BC_INCREFS_DONE",
4648 "BC_ACQUIRE_DONE",
4649 "BC_ATTEMPT_ACQUIRE",
4650 "BC_REGISTER_LOOPER",
4651 "BC_ENTER_LOOPER",
4652 "BC_EXIT_LOOPER",
4653 "BC_REQUEST_DEATH_NOTIFICATION",
4654 "BC_CLEAR_DEATH_NOTIFICATION",
7980240b
MC
4655 "BC_DEAD_BINDER_DONE",
4656 "BC_TRANSACTION_SG",
4657 "BC_REPLY_SG",
355b0502
GKH
4658};
4659
167bccbd 4660static const char * const binder_objstat_strings[] = {
355b0502
GKH
4661 "proc",
4662 "thread",
4663 "node",
4664 "ref",
4665 "death",
4666 "transaction",
4667 "transaction_complete"
4668};
4669
5249f488
AH
4670static void print_binder_stats(struct seq_file *m, const char *prefix,
4671 struct binder_stats *stats)
355b0502
GKH
4672{
4673 int i;
4674
4675 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 4676 ARRAY_SIZE(binder_command_strings));
355b0502 4677 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
0953c797
BJS
4678 int temp = atomic_read(&stats->bc[i]);
4679
4680 if (temp)
5249f488 4681 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4682 binder_command_strings[i], temp);
355b0502
GKH
4683 }
4684
4685 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 4686 ARRAY_SIZE(binder_return_strings));
355b0502 4687 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
0953c797
BJS
4688 int temp = atomic_read(&stats->br[i]);
4689
4690 if (temp)
5249f488 4691 seq_printf(m, "%s%s: %d\n", prefix,
0953c797 4692 binder_return_strings[i], temp);
355b0502
GKH
4693 }
4694
4695 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4696 ARRAY_SIZE(binder_objstat_strings));
355b0502 4697 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 4698 ARRAY_SIZE(stats->obj_deleted));
355b0502 4699 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
0953c797
BJS
4700 int created = atomic_read(&stats->obj_created[i]);
4701 int deleted = atomic_read(&stats->obj_deleted[i]);
4702
4703 if (created || deleted)
4704 seq_printf(m, "%s%s: active %d total %d\n",
4705 prefix,
5249f488 4706 binder_objstat_strings[i],
0953c797
BJS
4707 created - deleted,
4708 created);
355b0502 4709 }
355b0502
GKH
4710}
4711
5249f488
AH
4712static void print_binder_proc_stats(struct seq_file *m,
4713 struct binder_proc *proc)
355b0502
GKH
4714{
4715 struct binder_work *w;
4716 struct rb_node *n;
4717 int count, strong, weak;
4718
5249f488 4719 seq_printf(m, "proc %d\n", proc->pid);
14db3181 4720 seq_printf(m, "context %s\n", proc->context->name);
355b0502
GKH
4721 count = 0;
4722 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
4723 count++;
5249f488
AH
4724 seq_printf(m, " threads: %d\n", count);
4725 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
4726 " ready threads %d\n"
4727 " free async space %zd\n", proc->requested_threads,
4728 proc->requested_threads_started, proc->max_threads,
19c98724
TK
4729 proc->ready_threads,
4730 binder_alloc_get_free_async_space(&proc->alloc));
355b0502
GKH
4731 count = 0;
4732 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
4733 count++;
5249f488 4734 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
4735 count = 0;
4736 strong = 0;
4737 weak = 0;
4738 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
4739 struct binder_ref *ref = rb_entry(n, struct binder_ref,
4740 rb_node_desc);
4741 count++;
372e3147
TK
4742 strong += ref->data.strong;
4743 weak += ref->data.weak;
355b0502 4744 }
5249f488 4745 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 4746
19c98724 4747 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 4748 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
4749
4750 count = 0;
72196393 4751 binder_inner_proc_lock(proc);
355b0502 4752 list_for_each_entry(w, &proc->todo, entry) {
72196393 4753 if (w->type == BINDER_WORK_TRANSACTION)
355b0502 4754 count++;
355b0502 4755 }
72196393 4756 binder_inner_proc_unlock(proc);
5249f488 4757 seq_printf(m, " pending transactions: %d\n", count);
355b0502 4758
5249f488 4759 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
4760}
4761
4762
5249f488 4763static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
4764{
4765 struct binder_proc *proc;
355b0502 4766 struct binder_node *node;
355b0502 4767
1cf29cf4 4768 binder_lock(__func__);
355b0502 4769
5249f488 4770 seq_puts(m, "binder state:\n");
355b0502 4771
c44b1231 4772 spin_lock(&binder_dead_nodes_lock);
355b0502 4773 if (!hlist_empty(&binder_dead_nodes))
5249f488 4774 seq_puts(m, "dead nodes:\n");
b67bfe0d 4775 hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
5249f488 4776 print_binder_node(m, node);
c44b1231 4777 spin_unlock(&binder_dead_nodes_lock);
355b0502 4778
c44b1231 4779 mutex_lock(&binder_procs_lock);
b67bfe0d 4780 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4781 print_binder_proc(m, proc, 1);
c44b1231 4782 mutex_unlock(&binder_procs_lock);
1cf29cf4 4783 binder_unlock(__func__);
5249f488 4784 return 0;
355b0502
GKH
4785}
4786
5249f488 4787static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
4788{
4789 struct binder_proc *proc;
355b0502 4790
1cf29cf4 4791 binder_lock(__func__);
355b0502 4792
5249f488 4793 seq_puts(m, "binder stats:\n");
355b0502 4794
5249f488 4795 print_binder_stats(m, "", &binder_stats);
355b0502 4796
c44b1231 4797 mutex_lock(&binder_procs_lock);
b67bfe0d 4798 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4799 print_binder_proc_stats(m, proc);
c44b1231 4800 mutex_unlock(&binder_procs_lock);
1cf29cf4 4801 binder_unlock(__func__);
5249f488 4802 return 0;
355b0502
GKH
4803}
4804
5249f488 4805static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
4806{
4807 struct binder_proc *proc;
355b0502 4808
1cf29cf4 4809 binder_lock(__func__);
355b0502 4810
5249f488 4811 seq_puts(m, "binder transactions:\n");
c44b1231 4812 mutex_lock(&binder_procs_lock);
b67bfe0d 4813 hlist_for_each_entry(proc, &binder_procs, proc_node)
5249f488 4814 print_binder_proc(m, proc, 0);
c44b1231 4815 mutex_unlock(&binder_procs_lock);
1cf29cf4 4816 binder_unlock(__func__);
5249f488 4817 return 0;
355b0502
GKH
4818}
4819
5249f488 4820static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 4821{
83050a4e 4822 struct binder_proc *itr;
14db3181 4823 int pid = (unsigned long)m->private;
355b0502 4824
1cf29cf4 4825 binder_lock(__func__);
83050a4e 4826
c44b1231 4827 mutex_lock(&binder_procs_lock);
83050a4e 4828 hlist_for_each_entry(itr, &binder_procs, proc_node) {
14db3181
MC
4829 if (itr->pid == pid) {
4830 seq_puts(m, "binder proc state:\n");
4831 print_binder_proc(m, itr, 1);
83050a4e
RA
4832 }
4833 }
c44b1231
TK
4834 mutex_unlock(&binder_procs_lock);
4835
1cf29cf4 4836 binder_unlock(__func__);
5249f488 4837 return 0;
355b0502
GKH
4838}
4839
5249f488 4840static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
4841 struct binder_transaction_log_entry *e)
4842{
d99c7333
TK
4843 int debug_id = READ_ONCE(e->debug_id_done);
4844 /*
4845 * read barrier to guarantee debug_id_done read before
4846 * we print the log values
4847 */
4848 smp_rmb();
5249f488 4849 seq_printf(m,
d99c7333 4850 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
4851 e->debug_id, (e->call_type == 2) ? "reply" :
4852 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
14db3181 4853 e->from_thread, e->to_proc, e->to_thread, e->context_name,
57ada2fb
TK
4854 e->to_node, e->target_handle, e->data_size, e->offsets_size,
4855 e->return_error, e->return_error_param,
4856 e->return_error_line);
d99c7333
TK
4857 /*
4858 * read-barrier to guarantee read of debug_id_done after
4859 * done printing the fields of the entry
4860 */
4861 smp_rmb();
4862 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
4863 "\n" : " (incomplete)\n");
355b0502
GKH
4864}
4865
5249f488 4866static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 4867{
5249f488 4868 struct binder_transaction_log *log = m->private;
d99c7333
TK
4869 unsigned int log_cur = atomic_read(&log->cur);
4870 unsigned int count;
4871 unsigned int cur;
355b0502 4872 int i;
355b0502 4873
d99c7333
TK
4874 count = log_cur + 1;
4875 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
4876 0 : count % ARRAY_SIZE(log->entry);
4877 if (count > ARRAY_SIZE(log->entry) || log->full)
4878 count = ARRAY_SIZE(log->entry);
4879 for (i = 0; i < count; i++) {
4880 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
4881
4882 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 4883 }
5249f488 4884 return 0;
355b0502
GKH
4885}
4886
4887static const struct file_operations binder_fops = {
4888 .owner = THIS_MODULE,
4889 .poll = binder_poll,
4890 .unlocked_ioctl = binder_ioctl,
da49889d 4891 .compat_ioctl = binder_ioctl,
355b0502
GKH
4892 .mmap = binder_mmap,
4893 .open = binder_open,
4894 .flush = binder_flush,
4895 .release = binder_release,
4896};
4897
5249f488
AH
4898BINDER_DEBUG_ENTRY(state);
4899BINDER_DEBUG_ENTRY(stats);
4900BINDER_DEBUG_ENTRY(transactions);
4901BINDER_DEBUG_ENTRY(transaction_log);
4902
ac4812c5
MC
4903static int __init init_binder_device(const char *name)
4904{
4905 int ret;
4906 struct binder_device *binder_device;
4907
4908 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
4909 if (!binder_device)
4910 return -ENOMEM;
4911
4912 binder_device->miscdev.fops = &binder_fops;
4913 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
4914 binder_device->miscdev.name = name;
4915
4916 binder_device->context.binder_context_mgr_uid = INVALID_UID;
4917 binder_device->context.name = name;
c44b1231 4918 mutex_init(&binder_device->context.context_mgr_node_lock);
ac4812c5
MC
4919
4920 ret = misc_register(&binder_device->miscdev);
4921 if (ret < 0) {
4922 kfree(binder_device);
4923 return ret;
4924 }
4925
4926 hlist_add_head(&binder_device->hlist, &binder_devices);
4927
4928 return ret;
4929}
4930
355b0502
GKH
4931static int __init binder_init(void)
4932{
4933 int ret;
ac4812c5
MC
4934 char *device_name, *device_names;
4935 struct binder_device *device;
4936 struct hlist_node *tmp;
355b0502 4937
d99c7333
TK
4938 atomic_set(&binder_transaction_log.cur, ~0U);
4939 atomic_set(&binder_transaction_log_failed.cur, ~0U);
4940
16b66554
AH
4941 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
4942 if (binder_debugfs_dir_entry_root)
4943 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
4944 binder_debugfs_dir_entry_root);
ac4812c5 4945
16b66554
AH
4946 if (binder_debugfs_dir_entry_root) {
4947 debugfs_create_file("state",
4948 S_IRUGO,
4949 binder_debugfs_dir_entry_root,
4950 NULL,
4951 &binder_state_fops);
4952 debugfs_create_file("stats",
4953 S_IRUGO,
4954 binder_debugfs_dir_entry_root,
4955 NULL,
4956 &binder_stats_fops);
4957 debugfs_create_file("transactions",
4958 S_IRUGO,
4959 binder_debugfs_dir_entry_root,
4960 NULL,
4961 &binder_transactions_fops);
4962 debugfs_create_file("transaction_log",
4963 S_IRUGO,
4964 binder_debugfs_dir_entry_root,
4965 &binder_transaction_log,
4966 &binder_transaction_log_fops);
4967 debugfs_create_file("failed_transaction_log",
4968 S_IRUGO,
4969 binder_debugfs_dir_entry_root,
4970 &binder_transaction_log_failed,
4971 &binder_transaction_log_fops);
355b0502 4972 }
ac4812c5
MC
4973
4974 /*
4975 * Copy the module_parameter string, because we don't want to
4976 * tokenize it in-place.
4977 */
4978 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
4979 if (!device_names) {
4980 ret = -ENOMEM;
4981 goto err_alloc_device_names_failed;
4982 }
4983 strcpy(device_names, binder_devices_param);
4984
4985 while ((device_name = strsep(&device_names, ","))) {
4986 ret = init_binder_device(device_name);
4987 if (ret)
4988 goto err_init_binder_device_failed;
4989 }
4990
4991 return ret;
4992
4993err_init_binder_device_failed:
4994 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
4995 misc_deregister(&device->miscdev);
4996 hlist_del(&device->hlist);
4997 kfree(device);
4998 }
4999err_alloc_device_names_failed:
5000 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5001
355b0502
GKH
5002 return ret;
5003}
5004
5005device_initcall(binder_init);
5006
975a1ac9
AH
5007#define CREATE_TRACE_POINTS
5008#include "binder_trace.h"
5009
355b0502 5010MODULE_LICENSE("GPL v2");