UPSTREAM: android: binder: fix type mismatch warning
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
751c6a8e
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
02545935
MC
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
751c6a8e
TK
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
56b468fc
AS
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
355b0502
GKH
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
3c2a0909 57#include <linux/freezer.h>
355b0502
GKH
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
355b0502
GKH
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
16b66554 65#include <linux/debugfs.h>
355b0502
GKH
66#include <linux/rbtree.h>
67#include <linux/sched.h>
5249f488 68#include <linux/seq_file.h>
355b0502 69#include <linux/uaccess.h>
17cf22c3 70#include <linux/pid_namespace.h>
3b090f5e 71#include <linux/security.h>
751c6a8e 72#include <linux/spinlock.h>
355b0502 73
425bb8fc
MC
74#include <linux/sched/rt.h>
75#define MAX_NICE 19
76#define MIN_NICE -20
77#define NICE_WIDTH (MAX_NICE - MIN_NICE + 1)
78#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
79#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
80
e4078561
GKH
81#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
82#define BINDER_IPC_32BIT 1
83#endif
84
85#include <uapi/linux/android/binder.h>
809d9e93 86#include "binder_alloc.h"
975a1ac9 87#include "binder_trace.h"
355b0502 88
76341f6f 89static HLIST_HEAD(binder_deferred_list);
7eac155d 90static DEFINE_MUTEX(binder_deferred_lock);
7eac155d 91
bfd99b42 92static HLIST_HEAD(binder_devices);
7eac155d 93static HLIST_HEAD(binder_procs);
76341f6f
TK
94static DEFINE_MUTEX(binder_procs_lock);
95
7eac155d 96static HLIST_HEAD(binder_dead_nodes);
76341f6f 97static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 98
16b66554
AH
99static struct dentry *binder_debugfs_dir_entry_root;
100static struct dentry *binder_debugfs_dir_entry_proc;
64e01e83 101static atomic_t binder_last_id;
7eac155d 102static struct workqueue_struct *binder_deferred_workqueue;
355b0502 103
5249f488
AH
104#define BINDER_DEBUG_ENTRY(name) \
105static int binder_##name##_open(struct inode *inode, struct file *file) \
106{ \
16b66554 107 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
108} \
109\
110static const struct file_operations binder_##name##_fops = { \
111 .owner = THIS_MODULE, \
112 .open = binder_##name##_open, \
113 .read = seq_read, \
114 .llseek = seq_lseek, \
115 .release = single_release, \
116}
117
118static int binder_proc_show(struct seq_file *m, void *unused);
119BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
120
121/* This is only defined in include/asm-arm/sizes.h */
122#ifndef SZ_1K
123#define SZ_1K 0x400
124#endif
125
126#ifndef SZ_4M
127#define SZ_4M 0x400000
128#endif
129
130#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
131
132#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
133
134enum {
135 BINDER_DEBUG_USER_ERROR = 1U << 0,
136 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
137 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
138 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
139 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
140 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
141 BINDER_DEBUG_READ_WRITE = 1U << 6,
142 BINDER_DEBUG_USER_REFS = 1U << 7,
143 BINDER_DEBUG_THREADS = 1U << 8,
144 BINDER_DEBUG_TRANSACTION = 1U << 9,
145 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
146 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
147 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
69c33bb1 148 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
751c6a8e 149 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502 150};
eb68da36
UMB
151static uint32_t binder_debug_mask;
152
355b0502
GKH
153module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
154
bfd99b42
MC
155static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
156module_param_named(devices, binder_devices_param, charp, S_IRUGO);
157
355b0502
GKH
158static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
159static int binder_stop_on_user_error;
160
161static int binder_set_stop_on_user_error(const char *val,
162 struct kernel_param *kp)
163{
164 int ret;
01c8cbde 165
355b0502
GKH
166 ret = param_set_int(val, kp);
167 if (binder_stop_on_user_error < 2)
168 wake_up(&binder_user_error_wait);
169 return ret;
170}
171module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
172 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
173
174#define binder_debug(mask, x...) \
175 do { \
176 if (binder_debug_mask & mask) \
258767fe 177 pr_info(x); \
355b0502
GKH
178 } while (0)
179
180#define binder_user_error(x...) \
181 do { \
182 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 183 pr_info(x); \
355b0502
GKH
184 if (binder_stop_on_user_error) \
185 binder_stop_on_user_error = 2; \
186 } while (0)
187
17f2cab3
MC
188#define to_flat_binder_object(hdr) \
189 container_of(hdr, struct flat_binder_object, hdr)
190
191#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
192
e884979e
MC
193#define to_binder_buffer_object(hdr) \
194 container_of(hdr, struct binder_buffer_object, hdr)
195
2f82efbc
MC
196#define to_binder_fd_array_object(hdr) \
197 container_of(hdr, struct binder_fd_array_object, hdr)
198
355b0502
GKH
199enum binder_stat_types {
200 BINDER_STAT_PROC,
201 BINDER_STAT_THREAD,
202 BINDER_STAT_NODE,
203 BINDER_STAT_REF,
204 BINDER_STAT_DEATH,
205 BINDER_STAT_TRANSACTION,
206 BINDER_STAT_TRANSACTION_COMPLETE,
207 BINDER_STAT_COUNT
208};
209
210struct binder_stats {
340b5f86
BJS
211 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
212 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
213 atomic_t obj_created[BINDER_STAT_COUNT];
214 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
215};
216
7eac155d 217static struct binder_stats binder_stats;
355b0502
GKH
218
219static inline void binder_stats_deleted(enum binder_stat_types type)
220{
340b5f86 221 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
222}
223
224static inline void binder_stats_created(enum binder_stat_types type)
225{
340b5f86 226 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
227}
228
229struct binder_transaction_log_entry {
230 int debug_id;
7d94b2c7 231 int debug_id_done;
355b0502
GKH
232 int call_type;
233 int from_proc;
234 int from_thread;
235 int target_handle;
236 int to_proc;
237 int to_thread;
238 int to_node;
239 int data_size;
240 int offsets_size;
dfc99565
TK
241 int return_error_line;
242 uint32_t return_error;
243 uint32_t return_error_param;
a64af2cf 244 const char *context_name;
355b0502
GKH
245};
246struct binder_transaction_log {
7d94b2c7
TK
247 atomic_t cur;
248 bool full;
355b0502
GKH
249 struct binder_transaction_log_entry entry[32];
250};
7eac155d
DW
251static struct binder_transaction_log binder_transaction_log;
252static struct binder_transaction_log binder_transaction_log_failed;
355b0502
GKH
253
254static struct binder_transaction_log_entry *binder_transaction_log_add(
255 struct binder_transaction_log *log)
256{
257 struct binder_transaction_log_entry *e;
7d94b2c7 258 unsigned int cur = atomic_inc_return(&log->cur);
01c8cbde 259
7d94b2c7 260 if (cur >= ARRAY_SIZE(log->entry))
355b0502 261 log->full = 1;
7d94b2c7
TK
262 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
263 WRITE_ONCE(e->debug_id_done, 0);
264 /*
265 * write-barrier to synchronize access to e->debug_id_done.
266 * We make sure the initialized 0 value is seen before
267 * memset() other fields are zeroed by memset.
268 */
269 smp_wmb();
270 memset(e, 0, sizeof(*e));
355b0502
GKH
271 return e;
272}
273
913926d0
MC
274struct binder_context {
275 struct binder_node *binder_context_mgr_node;
76341f6f
TK
276 struct mutex context_mgr_node_lock;
277
913926d0 278 kuid_t binder_context_mgr_uid;
a64af2cf 279 const char *name;
913926d0
MC
280};
281
bfd99b42
MC
282struct binder_device {
283 struct hlist_node hlist;
284 struct miscdevice miscdev;
285 struct binder_context context;
913926d0
MC
286};
287
aad8732c
TK
288/**
289 * struct binder_work - work enqueued on a worklist
290 * @entry: node enqueued on list
291 * @type: type of work to be performed
292 *
293 * There are separate work lists for proc, thread, and node (async).
294 */
355b0502
GKH
295struct binder_work {
296 struct list_head entry;
aad8732c 297
355b0502
GKH
298 enum {
299 BINDER_WORK_TRANSACTION = 1,
300 BINDER_WORK_TRANSACTION_COMPLETE,
795aa6bc 301 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
302 BINDER_WORK_NODE,
303 BINDER_WORK_DEAD_BINDER,
304 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
305 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
306 } type;
307};
308
795aa6bc
TK
309struct binder_error {
310 struct binder_work work;
311 uint32_t cmd;
312};
313
751c6a8e
TK
314/**
315 * struct binder_node - binder node bookkeeping
316 * @debug_id: unique ID for debugging
317 * (invariant after initialized)
318 * @lock: lock for node fields
319 * @work: worklist element for node work
aad8732c 320 * (protected by @proc->inner_lock)
751c6a8e 321 * @rb_node: element for proc->nodes tree
ceeaf28f 322 * (protected by @proc->inner_lock)
751c6a8e
TK
323 * @dead_node: element for binder_dead_nodes list
324 * (protected by binder_dead_nodes_lock)
325 * @proc: binder_proc that owns this node
326 * (invariant after initialized)
327 * @refs: list of references on this node
25de59a5 328 * (protected by @lock)
751c6a8e
TK
329 * @internal_strong_refs: used to take strong references when
330 * initiating a transaction
ccca76b6
TK
331 * (protected by @proc->inner_lock if @proc
332 * and by @lock)
751c6a8e 333 * @local_weak_refs: weak user refs from local process
ccca76b6
TK
334 * (protected by @proc->inner_lock if @proc
335 * and by @lock)
751c6a8e 336 * @local_strong_refs: strong user refs from local process
ccca76b6
TK
337 * (protected by @proc->inner_lock if @proc
338 * and by @lock)
751c6a8e 339 * @tmp_refs: temporary kernel refs
ccca76b6
TK
340 * (protected by @proc->inner_lock while @proc
341 * is valid, and by binder_dead_nodes_lock
342 * if @proc is NULL. During inc/dec and node release
343 * it is also protected by @lock to provide safety
344 * as the node dies and @proc becomes NULL)
751c6a8e
TK
345 * @ptr: userspace pointer for node
346 * (invariant, no lock needed)
347 * @cookie: userspace cookie for node
348 * (invariant, no lock needed)
349 * @has_strong_ref: userspace notified of strong ref
ccca76b6
TK
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
751c6a8e 352 * @pending_strong_ref: userspace has acked notification of strong ref
ccca76b6
TK
353 * (protected by @proc->inner_lock if @proc
354 * and by @lock)
751c6a8e 355 * @has_weak_ref: userspace notified of weak ref
ccca76b6
TK
356 * (protected by @proc->inner_lock if @proc
357 * and by @lock)
751c6a8e 358 * @pending_weak_ref: userspace has acked notification of weak ref
ccca76b6
TK
359 * (protected by @proc->inner_lock if @proc
360 * and by @lock)
751c6a8e 361 * @has_async_transaction: async transaction to node in progress
25de59a5 362 * (protected by @lock)
fbb541d2
MC
363 * @sched_policy: minimum scheduling policy for node
364 * (invariant after initialized)
751c6a8e
TK
365 * @accept_fds: file descriptor operations supported for node
366 * (invariant after initialized)
367 * @min_priority: minimum scheduling priority
368 * (invariant after initialized)
21dfe3eb
MC
369 * @inherit_rt: inherit RT scheduling policy from caller
370 * (invariant after initialized)
751c6a8e 371 * @async_todo: list of async work items
aad8732c 372 * (protected by @proc->inner_lock)
751c6a8e
TK
373 *
374 * Bookkeeping structure for binder nodes.
375 */
355b0502
GKH
376struct binder_node {
377 int debug_id;
751c6a8e 378 spinlock_t lock;
355b0502
GKH
379 struct binder_work work;
380 union {
381 struct rb_node rb_node;
382 struct hlist_node dead_node;
383 };
384 struct binder_proc *proc;
385 struct hlist_head refs;
386 int internal_strong_refs;
387 int local_weak_refs;
388 int local_strong_refs;
9607bf70 389 int tmp_refs;
9c0a1d02
AH
390 binder_uintptr_t ptr;
391 binder_uintptr_t cookie;
ccca76b6
TK
392 struct {
393 /*
394 * bitfield elements protected by
395 * proc inner_lock
396 */
397 u8 has_strong_ref:1;
398 u8 pending_strong_ref:1;
399 u8 has_weak_ref:1;
400 u8 pending_weak_ref:1;
401 };
402 struct {
403 /*
404 * invariant after initialization
405 */
fbb541d2 406 u8 sched_policy:2;
21dfe3eb 407 u8 inherit_rt:1;
ccca76b6
TK
408 u8 accept_fds:1;
409 u8 min_priority;
410 };
411 bool has_async_transaction;
355b0502
GKH
412 struct list_head async_todo;
413};
414
415struct binder_ref_death {
aad8732c
TK
416 /**
417 * @work: worklist element for death notifications
418 * (protected by inner_lock of the proc that
419 * this ref belongs to)
420 */
355b0502 421 struct binder_work work;
9c0a1d02 422 binder_uintptr_t cookie;
355b0502
GKH
423};
424
bc65c39a
TK
425/**
426 * struct binder_ref_data - binder_ref counts and id
427 * @debug_id: unique ID for the ref
428 * @desc: unique userspace handle for ref
429 * @strong: strong ref count (debugging only if not locked)
430 * @weak: weak ref count (debugging only if not locked)
431 *
432 * Structure to hold ref count and ref id information. Since
433 * the actual ref can only be accessed with a lock, this structure
434 * is used to return information about the ref to callers of
435 * ref inc/dec functions.
436 */
437struct binder_ref_data {
438 int debug_id;
439 uint32_t desc;
440 int strong;
441 int weak;
442};
443
444/**
445 * struct binder_ref - struct to track references on nodes
446 * @data: binder_ref_data containing id, handle, and current refcounts
447 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
448 * @rb_node_node: node for lookup by @node in proc's rb_tree
449 * @node_entry: list entry for node->refs list in target node
25de59a5 450 * (protected by @node->lock)
bc65c39a
TK
451 * @proc: binder_proc containing ref
452 * @node: binder_node of target node. When cleaning up a
453 * ref for deletion in binder_cleanup_ref, a non-NULL
454 * @node indicates the node must be freed
455 * @death: pointer to death notification (ref_death) if requested
914f62a0 456 * (protected by @node->lock)
bc65c39a
TK
457 *
458 * Structure to track references from procA to target node (on procB). This
459 * structure is unsafe to access without holding @proc->outer_lock.
460 */
355b0502
GKH
461struct binder_ref {
462 /* Lookups needed: */
463 /* node + proc => ref (transaction) */
464 /* desc + proc => ref (transaction, inc/dec ref) */
465 /* node => refs + procs (proc exit) */
bc65c39a 466 struct binder_ref_data data;
355b0502
GKH
467 struct rb_node rb_node_desc;
468 struct rb_node rb_node_node;
469 struct hlist_node node_entry;
470 struct binder_proc *proc;
471 struct binder_node *node;
355b0502
GKH
472 struct binder_ref_death *death;
473};
474
355b0502
GKH
475enum binder_deferred_state {
476 BINDER_DEFERRED_PUT_FILES = 0x01,
477 BINDER_DEFERRED_FLUSH = 0x02,
478 BINDER_DEFERRED_RELEASE = 0x04,
479};
480
425bb8fc
MC
481/**
482 * struct binder_priority - scheduler policy and priority
483 * @sched_policy scheduler policy
484 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
485 *
486 * The binder driver supports inheriting the following scheduler policies:
487 * SCHED_NORMAL
488 * SCHED_BATCH
489 * SCHED_FIFO
490 * SCHED_RR
491 */
492struct binder_priority {
493 unsigned int sched_policy;
494 int prio;
495};
496
751c6a8e
TK
497/**
498 * struct binder_proc - binder process bookkeeping
499 * @proc_node: element for binder_procs list
500 * @threads: rbtree of binder_threads in this proc
aed01731 501 * (protected by @inner_lock)
751c6a8e
TK
502 * @nodes: rbtree of binder nodes associated with
503 * this proc ordered by node->ptr
ceeaf28f 504 * (protected by @inner_lock)
751c6a8e 505 * @refs_by_desc: rbtree of refs ordered by ref->desc
5717118f 506 * (protected by @outer_lock)
751c6a8e 507 * @refs_by_node: rbtree of refs ordered by ref->node
5717118f 508 * (protected by @outer_lock)
02545935
MC
509 * @waiting_threads: threads currently waiting for proc work
510 * (protected by @inner_lock)
751c6a8e
TK
511 * @pid PID of group_leader of process
512 * (invariant after initialized)
513 * @tsk task_struct for group_leader of process
514 * (invariant after initialized)
515 * @files files_struct for process
516 * (invariant after initialized)
517 * @deferred_work_node: element for binder_deferred_list
518 * (protected by binder_deferred_lock)
519 * @deferred_work: bitmap of deferred work to perform
520 * (protected by binder_deferred_lock)
521 * @is_dead: process is dead and awaiting free
522 * when outstanding transactions are cleaned up
aed01731 523 * (protected by @inner_lock)
751c6a8e 524 * @todo: list of work for this process
aad8732c 525 * (protected by @inner_lock)
751c6a8e
TK
526 * @wait: wait queue head to wait for proc work
527 * (invariant after initialized)
528 * @stats: per-process binder statistics
529 * (atomics, no lock needed)
530 * @delivered_death: list of delivered death notification
aad8732c 531 * (protected by @inner_lock)
751c6a8e 532 * @max_threads: cap on number of binder threads
e09d7b9a 533 * (protected by @inner_lock)
751c6a8e
TK
534 * @requested_threads: number of binder threads requested but not
535 * yet started. In current implementation, can
536 * only be 0 or 1.
e09d7b9a 537 * (protected by @inner_lock)
751c6a8e 538 * @requested_threads_started: number binder threads started
e09d7b9a 539 * (protected by @inner_lock)
751c6a8e 540 * @tmp_ref: temporary reference to indicate proc is in use
aed01731 541 * (protected by @inner_lock)
751c6a8e
TK
542 * @default_priority: default scheduler priority
543 * (invariant after initialized)
544 * @debugfs_entry: debugfs node
545 * @alloc: binder allocator bookkeeping
546 * @context: binder_context for this proc
547 * (invariant after initialized)
548 * @inner_lock: can nest under outer_lock and/or node lock
549 * @outer_lock: no nesting under innor or node lock
550 * Lock order: 1) outer, 2) node, 3) inner
551 *
552 * Bookkeeping structure for binder processes
553 */
355b0502
GKH
554struct binder_proc {
555 struct hlist_node proc_node;
556 struct rb_root threads;
557 struct rb_root nodes;
558 struct rb_root refs_by_desc;
559 struct rb_root refs_by_node;
02545935 560 struct list_head waiting_threads;
355b0502 561 int pid;
355b0502
GKH
562 struct task_struct *tsk;
563 struct files_struct *files;
564 struct hlist_node deferred_work_node;
565 int deferred_work;
82f6ad88 566 bool is_dead;
355b0502 567
355b0502
GKH
568 struct list_head todo;
569 wait_queue_head_t wait;
570 struct binder_stats stats;
571 struct list_head delivered_death;
572 int max_threads;
573 int requested_threads;
574 int requested_threads_started;
82f6ad88 575 int tmp_ref;
425bb8fc 576 struct binder_priority default_priority;
16b66554 577 struct dentry *debugfs_entry;
6939cbd5 578 struct binder_alloc alloc;
913926d0 579 struct binder_context *context;
751c6a8e
TK
580 spinlock_t inner_lock;
581 spinlock_t outer_lock;
355b0502
GKH
582};
583
584enum {
585 BINDER_LOOPER_STATE_REGISTERED = 0x01,
586 BINDER_LOOPER_STATE_ENTERED = 0x02,
587 BINDER_LOOPER_STATE_EXITED = 0x04,
588 BINDER_LOOPER_STATE_INVALID = 0x08,
589 BINDER_LOOPER_STATE_WAITING = 0x10,
02545935 590 BINDER_LOOPER_STATE_POLL = 0x20,
355b0502
GKH
591};
592
751c6a8e
TK
593/**
594 * struct binder_thread - binder thread bookkeeping
595 * @proc: binder process for this thread
596 * (invariant after initialization)
597 * @rb_node: element for proc->threads rbtree
aed01731 598 * (protected by @proc->inner_lock)
02545935
MC
599 * @waiting_thread_node: element for @proc->waiting_threads list
600 * (protected by @proc->inner_lock)
751c6a8e
TK
601 * @pid: PID for this thread
602 * (invariant after initialization)
603 * @looper: bitmap of looping state
604 * (only accessed by this thread)
605 * @looper_needs_return: looping thread needs to exit driver
606 * (no lock needed)
607 * @transaction_stack: stack of in-progress transactions for this thread
a45c586f 608 * (protected by @proc->inner_lock)
751c6a8e 609 * @todo: list of work to do for this thread
aad8732c 610 * (protected by @proc->inner_lock)
3fb0a5b6
MC
611 * @process_todo: whether work in @todo should be processed
612 * (protected by @proc->inner_lock)
751c6a8e
TK
613 * @return_error: transaction errors reported by this thread
614 * (only accessed by this thread)
615 * @reply_error: transaction errors reported by target thread
a45c586f 616 * (protected by @proc->inner_lock)
751c6a8e
TK
617 * @wait: wait queue for thread work
618 * @stats: per-thread statistics
619 * (atomics, no lock needed)
620 * @tmp_ref: temporary reference to indicate thread is in use
621 * (atomic since @proc->inner_lock cannot
622 * always be acquired)
623 * @is_dead: thread is dead and awaiting free
624 * when outstanding transactions are cleaned up
aed01731 625 * (protected by @proc->inner_lock)
c1dd125f 626 * @task: struct task_struct for this thread
751c6a8e
TK
627 *
628 * Bookkeeping structure for binder threads.
629 */
355b0502
GKH
630struct binder_thread {
631 struct binder_proc *proc;
632 struct rb_node rb_node;
02545935 633 struct list_head waiting_thread_node;
355b0502 634 int pid;
85ab7b9c
TK
635 int looper; /* only modified by this thread */
636 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
637 struct binder_transaction *transaction_stack;
638 struct list_head todo;
3fb0a5b6 639 bool process_todo;
795aa6bc
TK
640 struct binder_error return_error;
641 struct binder_error reply_error;
355b0502
GKH
642 wait_queue_head_t wait;
643 struct binder_stats stats;
82f6ad88
TK
644 atomic_t tmp_ref;
645 bool is_dead;
c1dd125f 646 struct task_struct *task;
355b0502
GKH
647};
648
649struct binder_transaction {
650 int debug_id;
651 struct binder_work work;
652 struct binder_thread *from;
653 struct binder_transaction *from_parent;
654 struct binder_proc *to_proc;
655 struct binder_thread *to_thread;
656 struct binder_transaction *to_parent;
657 unsigned need_reply:1;
658 /* unsigned is_dead:1; */ /* not used at the moment */
659
660 struct binder_buffer *buffer;
661 unsigned int code;
662 unsigned int flags;
425bb8fc
MC
663 struct binder_priority priority;
664 struct binder_priority saved_priority;
c1dd125f 665 bool set_priority_called;
4a2ebb93 666 kuid_t sender_euid;
82f6ad88
TK
667 /**
668 * @lock: protects @from, @to_proc, and @to_thread
669 *
670 * @from, @to_proc, and @to_thread can be set to NULL
671 * during thread teardown
672 */
673 spinlock_t lock;
355b0502
GKH
674};
675
751c6a8e
TK
676/**
677 * binder_proc_lock() - Acquire outer lock for given binder_proc
678 * @proc: struct binder_proc to acquire
679 *
680 * Acquires proc->outer_lock. Used to protect binder_ref
681 * structures associated with the given proc.
682 */
683#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
684static void
685_binder_proc_lock(struct binder_proc *proc, int line)
686{
687 binder_debug(BINDER_DEBUG_SPINLOCKS,
688 "%s: line=%d\n", __func__, line);
689 spin_lock(&proc->outer_lock);
690}
691
692/**
693 * binder_proc_unlock() - Release spinlock for given binder_proc
694 * @proc: struct binder_proc to acquire
695 *
696 * Release lock acquired via binder_proc_lock()
697 */
698#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
699static void
700_binder_proc_unlock(struct binder_proc *proc, int line)
701{
702 binder_debug(BINDER_DEBUG_SPINLOCKS,
703 "%s: line=%d\n", __func__, line);
704 spin_unlock(&proc->outer_lock);
705}
706
707/**
708 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
709 * @proc: struct binder_proc to acquire
710 *
711 * Acquires proc->inner_lock. Used to protect todo lists
712 */
713#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
714static void
715_binder_inner_proc_lock(struct binder_proc *proc, int line)
716{
717 binder_debug(BINDER_DEBUG_SPINLOCKS,
718 "%s: line=%d\n", __func__, line);
719 spin_lock(&proc->inner_lock);
720}
721
722/**
723 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
724 * @proc: struct binder_proc to acquire
725 *
726 * Release lock acquired via binder_inner_proc_lock()
727 */
728#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
729static void
730_binder_inner_proc_unlock(struct binder_proc *proc, int line)
731{
732 binder_debug(BINDER_DEBUG_SPINLOCKS,
733 "%s: line=%d\n", __func__, line);
734 spin_unlock(&proc->inner_lock);
735}
736
737/**
738 * binder_node_lock() - Acquire spinlock for given binder_node
739 * @node: struct binder_node to acquire
740 *
741 * Acquires node->lock. Used to protect binder_node fields
742 */
743#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
744static void
745_binder_node_lock(struct binder_node *node, int line)
746{
747 binder_debug(BINDER_DEBUG_SPINLOCKS,
748 "%s: line=%d\n", __func__, line);
749 spin_lock(&node->lock);
750}
751
752/**
753 * binder_node_unlock() - Release spinlock for given binder_proc
754 * @node: struct binder_node to acquire
755 *
756 * Release lock acquired via binder_node_lock()
757 */
758#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
759static void
760_binder_node_unlock(struct binder_node *node, int line)
761{
762 binder_debug(BINDER_DEBUG_SPINLOCKS,
763 "%s: line=%d\n", __func__, line);
764 spin_unlock(&node->lock);
765}
766
25de59a5
TK
767/**
768 * binder_node_inner_lock() - Acquire node and inner locks
769 * @node: struct binder_node to acquire
770 *
771 * Acquires node->lock. If node->proc also acquires
772 * proc->inner_lock. Used to protect binder_node fields
773 */
774#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
775static void
776_binder_node_inner_lock(struct binder_node *node, int line)
777{
778 binder_debug(BINDER_DEBUG_SPINLOCKS,
779 "%s: line=%d\n", __func__, line);
780 spin_lock(&node->lock);
781 if (node->proc)
782 binder_inner_proc_lock(node->proc);
783}
784
785/**
786 * binder_node_unlock() - Release node and inner locks
787 * @node: struct binder_node to acquire
788 *
789 * Release lock acquired via binder_node_lock()
790 */
791#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
792static void
793_binder_node_inner_unlock(struct binder_node *node, int line)
794{
795 struct binder_proc *proc = node->proc;
796
797 binder_debug(BINDER_DEBUG_SPINLOCKS,
798 "%s: line=%d\n", __func__, line);
799 if (proc)
800 binder_inner_proc_unlock(proc);
801 spin_unlock(&node->lock);
802}
803
aad8732c
TK
804static bool binder_worklist_empty_ilocked(struct list_head *list)
805{
806 return list_empty(list);
807}
808
809/**
810 * binder_worklist_empty() - Check if no items on the work list
811 * @proc: binder_proc associated with list
812 * @list: list to check
813 *
814 * Return: true if there are no items on list, else false
815 */
816static bool binder_worklist_empty(struct binder_proc *proc,
817 struct list_head *list)
818{
819 bool ret;
820
821 binder_inner_proc_lock(proc);
822 ret = binder_worklist_empty_ilocked(list);
823 binder_inner_proc_unlock(proc);
824 return ret;
825}
826
3fb0a5b6
MC
827/**
828 * binder_enqueue_work_ilocked() - Add an item to the work list
829 * @work: struct binder_work to add to list
830 * @target_list: list to add work to
831 *
832 * Adds the work to the specified list. Asserts that work
833 * is not already on a list.
834 *
835 * Requires the proc->inner_lock to be held.
836 */
aad8732c
TK
837static void
838binder_enqueue_work_ilocked(struct binder_work *work,
839 struct list_head *target_list)
840{
841 BUG_ON(target_list == NULL);
842 BUG_ON(work->entry.next && !list_empty(&work->entry));
843 list_add_tail(&work->entry, target_list);
844}
845
846/**
b8b45e4d 847 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
3fb0a5b6 848 * @thread: thread to queue work to
aad8732c 849 * @work: struct binder_work to add to list
aad8732c 850 *
3fb0a5b6
MC
851 * Adds the work to the todo list of the thread. Doesn't set the process_todo
852 * flag, which means that (if it wasn't already set) the thread will go to
853 * sleep without handling this work when it calls read.
854 *
855 * Requires the proc->inner_lock to be held.
aad8732c
TK
856 */
857static void
b8b45e4d
MC
858binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
859 struct binder_work *work)
aad8732c 860{
3fb0a5b6
MC
861 binder_enqueue_work_ilocked(work, &thread->todo);
862}
863
864/**
865 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
866 * @thread: thread to queue work to
867 * @work: struct binder_work to add to list
868 *
869 * Adds the work to the todo list of the thread, and enables processing
870 * of the todo queue.
871 *
872 * Requires the proc->inner_lock to be held.
873 */
874static void
875binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
876 struct binder_work *work)
877{
878 binder_enqueue_work_ilocked(work, &thread->todo);
879 thread->process_todo = true;
880}
881
882/**
883 * binder_enqueue_thread_work() - Add an item to the thread work list
884 * @thread: thread to queue work to
885 * @work: struct binder_work to add to list
886 *
887 * Adds the work to the todo list of the thread, and enables processing
888 * of the todo queue.
889 */
890static void
891binder_enqueue_thread_work(struct binder_thread *thread,
892 struct binder_work *work)
893{
894 binder_inner_proc_lock(thread->proc);
895 binder_enqueue_thread_work_ilocked(thread, work);
896 binder_inner_proc_unlock(thread->proc);
aad8732c
TK
897}
898
899static void
900binder_dequeue_work_ilocked(struct binder_work *work)
901{
902 list_del_init(&work->entry);
903}
904
905/**
906 * binder_dequeue_work() - Removes an item from the work list
907 * @proc: binder_proc associated with list
908 * @work: struct binder_work to remove from list
909 *
910 * Removes the specified work item from whatever list it is on.
911 * Can safely be called if work is not on any list.
912 */
913static void
914binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
915{
916 binder_inner_proc_lock(proc);
917 binder_dequeue_work_ilocked(work);
918 binder_inner_proc_unlock(proc);
919}
920
921static struct binder_work *binder_dequeue_work_head_ilocked(
922 struct list_head *list)
923{
924 struct binder_work *w;
925
926 w = list_first_entry_or_null(list, struct binder_work, entry);
927 if (w)
928 list_del_init(&w->entry);
929 return w;
930}
931
932/**
933 * binder_dequeue_work_head() - Dequeues the item at head of list
934 * @proc: binder_proc associated with list
935 * @list: list to dequeue head
936 *
937 * Removes the head of the list if there are items on the list
938 *
939 * Return: pointer dequeued binder_work, NULL if list was empty
940 */
941static struct binder_work *binder_dequeue_work_head(
942 struct binder_proc *proc,
943 struct list_head *list)
944{
945 struct binder_work *w;
946
947 binder_inner_proc_lock(proc);
948 w = binder_dequeue_work_head_ilocked(list);
949 binder_inner_proc_unlock(proc);
950 return w;
951}
952
355b0502
GKH
953static void
954binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
82f6ad88
TK
955static void binder_free_thread(struct binder_thread *thread);
956static void binder_free_proc(struct binder_proc *proc);
ceeaf28f 957static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
355b0502 958
efde99cd 959static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502
GKH
960{
961 struct files_struct *files = proc->files;
355b0502
GKH
962 unsigned long rlim_cur;
963 unsigned long irqs;
964
965 if (files == NULL)
966 return -ESRCH;
967
dcfadfa4
AV
968 if (!lock_task_sighand(proc->tsk, &irqs))
969 return -EMFILE;
bf202361 970
dcfadfa4
AV
971 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
972 unlock_task_sighand(proc->tsk, &irqs);
355b0502 973
c3238c9d 974 return __alloc_fd(files, 0, rlim_cur, flags);
355b0502
GKH
975}
976
977/*
978 * copied from fd_install
979 */
980static void task_fd_install(
981 struct binder_proc *proc, unsigned int fd, struct file *file)
982{
c3238c9d 983 if (proc->files)
f869e8a7 984 __fd_install(proc->files, fd, file);
355b0502
GKH
985}
986
987/*
988 * copied from sys_close
989 */
990static long task_close_fd(struct binder_proc *proc, unsigned int fd)
991{
355b0502
GKH
992 int retval;
993
483ce1d4 994 if (proc->files == NULL)
355b0502
GKH
995 return -ESRCH;
996
483ce1d4 997 retval = __close_fd(proc->files, fd);
355b0502
GKH
998 /* can't restart close syscall because file table entry was cleared */
999 if (unlikely(retval == -ERESTARTSYS ||
1000 retval == -ERESTARTNOINTR ||
1001 retval == -ERESTARTNOHAND ||
1002 retval == -ERESTART_RESTARTBLOCK))
1003 retval = -EINTR;
1004
1005 return retval;
355b0502
GKH
1006}
1007
02545935
MC
1008static bool binder_has_work_ilocked(struct binder_thread *thread,
1009 bool do_proc_work)
1010{
3fb0a5b6 1011 return thread->process_todo ||
02545935
MC
1012 thread->looper_need_return ||
1013 (do_proc_work &&
1014 !binder_worklist_empty_ilocked(&thread->proc->todo));
1015}
1016
1017static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1018{
1019 bool has_work;
1020
1021 binder_inner_proc_lock(thread->proc);
1022 has_work = binder_has_work_ilocked(thread, do_proc_work);
1023 binder_inner_proc_unlock(thread->proc);
1024
1025 return has_work;
1026}
1027
1028static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1029{
1030 return !thread->transaction_stack &&
1031 binder_worklist_empty_ilocked(&thread->todo) &&
1032 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1033 BINDER_LOOPER_STATE_REGISTERED));
1034}
1035
1036static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1037 bool sync)
1038{
1039 struct rb_node *n;
1040 struct binder_thread *thread;
1041
1042 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1043 thread = rb_entry(n, struct binder_thread, rb_node);
1044 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1045 binder_available_for_proc_work_ilocked(thread)) {
1046 if (sync)
1047 wake_up_interruptible_sync(&thread->wait);
1048 else
1049 wake_up_interruptible(&thread->wait);
1050 }
1051 }
1052}
1053
1200efe9
MC
1054/**
1055 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1056 * @proc: process to select a thread from
1057 *
1058 * Note that calling this function moves the thread off the waiting_threads
1059 * list, so it can only be woken up by the caller of this function, or a
1060 * signal. Therefore, callers *should* always wake up the thread this function
1061 * returns.
1062 *
1063 * Return: If there's a thread currently waiting for process work,
1064 * returns that thread. Otherwise returns NULL.
1065 */
1066static struct binder_thread *
1067binder_select_thread_ilocked(struct binder_proc *proc)
02545935
MC
1068{
1069 struct binder_thread *thread;
1070
72afa5d7 1071 assert_spin_locked(&proc->inner_lock);
02545935
MC
1072 thread = list_first_entry_or_null(&proc->waiting_threads,
1073 struct binder_thread,
1074 waiting_thread_node);
1075
1200efe9 1076 if (thread)
02545935 1077 list_del_init(&thread->waiting_thread_node);
1200efe9
MC
1078
1079 return thread;
1080}
1081
1082/**
1083 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1084 * @proc: process to wake up a thread in
1085 * @thread: specific thread to wake-up (may be NULL)
1086 * @sync: whether to do a synchronous wake-up
1087 *
1088 * This function wakes up a thread in the @proc process.
1089 * The caller may provide a specific thread to wake-up in
1090 * the @thread parameter. If @thread is NULL, this function
1091 * will wake up threads that have called poll().
1092 *
1093 * Note that for this function to work as expected, callers
1094 * should first call binder_select_thread() to find a thread
1095 * to handle the work (if they don't have a thread already),
1096 * and pass the result into the @thread parameter.
1097 */
1098static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1099 struct binder_thread *thread,
1100 bool sync)
1101{
72afa5d7 1102 assert_spin_locked(&proc->inner_lock);
1200efe9
MC
1103
1104 if (thread) {
02545935
MC
1105 if (sync)
1106 wake_up_interruptible_sync(&thread->wait);
1107 else
1108 wake_up_interruptible(&thread->wait);
1109 return;
1110 }
1111
1112 /* Didn't find a thread waiting for proc work; this can happen
1113 * in two scenarios:
1114 * 1. All threads are busy handling transactions
1115 * In that case, one of those threads should call back into
1116 * the kernel driver soon and pick up this work.
1117 * 2. Threads are using the (e)poll interface, in which case
1118 * they may be blocked on the waitqueue without having been
1119 * added to waiting_threads. For this case, we just iterate
1120 * over all threads not handling transaction work, and
1121 * wake them all up. We wake all because we don't know whether
1122 * a thread that called into (e)poll is handling non-binder
1123 * work currently.
1124 */
1125 binder_wakeup_poll_threads_ilocked(proc, sync);
1126}
1127
1200efe9
MC
1128static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1129{
1130 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1131
1132 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1133}
1134
425bb8fc
MC
1135static bool is_rt_policy(int policy)
1136{
1137 return policy == SCHED_FIFO || policy == SCHED_RR;
1138}
1139
1140static bool is_fair_policy(int policy)
1141{
1142 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1143}
1144
1145static bool binder_supported_policy(int policy)
1146{
1147 return is_fair_policy(policy) || is_rt_policy(policy);
1148}
1149
1150static int to_userspace_prio(int policy, int kernel_priority)
1151{
1152 if (is_fair_policy(policy))
1153 return PRIO_TO_NICE(kernel_priority);
1154 else
1155 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1156}
1157
1158static int to_kernel_prio(int policy, int user_priority)
1159{
1160 if (is_fair_policy(policy))
1161 return NICE_TO_PRIO(user_priority);
1162 else
1163 return MAX_USER_RT_PRIO - 1 - user_priority;
1164}
1165
dadba0f5
MC
1166static void binder_do_set_priority(struct task_struct *task,
1167 struct binder_priority desired,
1168 bool verify)
355b0502 1169{
425bb8fc
MC
1170 int priority; /* user-space prio value */
1171 bool has_cap_nice;
1172 unsigned int policy = desired.sched_policy;
01c8cbde 1173
425bb8fc 1174 if (task->policy == policy && task->normal_prio == desired.prio)
355b0502 1175 return;
425bb8fc
MC
1176
1177 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1178
1179 priority = to_userspace_prio(policy, desired.prio);
1180
dadba0f5 1181 if (verify && is_rt_policy(policy) && !has_cap_nice) {
425bb8fc
MC
1182 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1183
1184 if (max_rtprio == 0) {
1185 policy = SCHED_NORMAL;
1186 priority = MIN_NICE;
1187 } else if (priority > max_rtprio) {
1188 priority = max_rtprio;
1189 }
355b0502 1190 }
425bb8fc 1191
dadba0f5 1192 if (verify && is_fair_policy(policy) && !has_cap_nice) {
425bb8fc
MC
1193 long min_nice = (MAX_NICE - task_rlimit(task, RLIMIT_NICE) + 1);
1194
1195 if (min_nice > MAX_NICE) {
1196 binder_user_error("%d RLIMIT_NICE not set\n",
1197 task->pid);
1198 return;
1199 } else if (priority < min_nice) {
1200 priority = min_nice;
1201 }
1202 }
1203
1204 if (policy != desired.sched_policy ||
1205 to_kernel_prio(policy, priority) != desired.prio)
1206 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1207 "%d: priority %d not allowed, using %d instead\n",
1208 task->pid, desired.prio,
1209 to_kernel_prio(policy, priority));
1210
83e562b3
MC
1211 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1212 to_kernel_prio(policy, priority),
1213 desired.prio);
1214
425bb8fc
MC
1215 /* Set the actual priority */
1216 if (task->policy != policy || is_rt_policy(policy)) {
1217 struct sched_param params;
1218
1219 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1220
1221 sched_setscheduler_nocheck(task,
1222 policy | SCHED_RESET_ON_FORK,
1223 &params);
1224 }
1225 if (is_fair_policy(policy))
1226 set_user_nice(task, priority);
355b0502
GKH
1227}
1228
dadba0f5
MC
1229static void binder_set_priority(struct task_struct *task,
1230 struct binder_priority desired)
1231{
1232 binder_do_set_priority(task, desired, /* verify = */ true);
1233}
1234
1235static void binder_restore_priority(struct task_struct *task,
1236 struct binder_priority desired)
1237{
1238 binder_do_set_priority(task, desired, /* verify = */ false);
1239}
1240
c1dd125f
MC
1241static void binder_transaction_priority(struct task_struct *task,
1242 struct binder_transaction *t,
21dfe3eb
MC
1243 struct binder_priority node_prio,
1244 bool inherit_rt)
c1dd125f 1245{
8ff8351e 1246 struct binder_priority desired_prio = t->priority;
c1dd125f
MC
1247
1248 if (t->set_priority_called)
1249 return;
1250
1251 t->set_priority_called = true;
1252 t->saved_priority.sched_policy = task->policy;
1253 t->saved_priority.prio = task->normal_prio;
1254
21dfe3eb
MC
1255 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1256 desired_prio.prio = NICE_TO_PRIO(0);
1257 desired_prio.sched_policy = SCHED_NORMAL;
21dfe3eb 1258 }
c1dd125f
MC
1259
1260 if (node_prio.prio < t->priority.prio ||
1261 (node_prio.prio == t->priority.prio &&
1262 node_prio.sched_policy == SCHED_FIFO)) {
1263 /*
1264 * In case the minimum priority on the node is
1265 * higher (lower value), use that priority. If
1266 * the priority is the same, but the node uses
1267 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1268 * run unbounded, unlike SCHED_RR.
1269 */
1270 desired_prio = node_prio;
1271 }
1272
1273 binder_set_priority(task, desired_prio);
1274}
1275
ceeaf28f
TK
1276static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1277 binder_uintptr_t ptr)
355b0502
GKH
1278{
1279 struct rb_node *n = proc->nodes.rb_node;
1280 struct binder_node *node;
1281
72afa5d7 1282 assert_spin_locked(&proc->inner_lock);
ceeaf28f 1283
355b0502
GKH
1284 while (n) {
1285 node = rb_entry(n, struct binder_node, rb_node);
1286
1287 if (ptr < node->ptr)
1288 n = n->rb_left;
1289 else if (ptr > node->ptr)
1290 n = n->rb_right;
9607bf70
TK
1291 else {
1292 /*
1293 * take an implicit weak reference
1294 * to ensure node stays alive until
1295 * call to binder_put_node()
1296 */
ceeaf28f 1297 binder_inc_node_tmpref_ilocked(node);
355b0502 1298 return node;
9607bf70 1299 }
355b0502
GKH
1300 }
1301 return NULL;
1302}
1303
ceeaf28f
TK
1304static struct binder_node *binder_get_node(struct binder_proc *proc,
1305 binder_uintptr_t ptr)
1306{
1307 struct binder_node *node;
1308
1309 binder_inner_proc_lock(proc);
1310 node = binder_get_node_ilocked(proc, ptr);
1311 binder_inner_proc_unlock(proc);
1312 return node;
1313}
1314
1315static struct binder_node *binder_init_node_ilocked(
1316 struct binder_proc *proc,
1317 struct binder_node *new_node,
1318 struct flat_binder_object *fp)
355b0502
GKH
1319{
1320 struct rb_node **p = &proc->nodes.rb_node;
1321 struct rb_node *parent = NULL;
1322 struct binder_node *node;
25de59a5
TK
1323 binder_uintptr_t ptr = fp ? fp->binder : 0;
1324 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1325 __u32 flags = fp ? fp->flags : 0;
fbb541d2 1326 s8 priority;
355b0502 1327
72afa5d7
MC
1328 assert_spin_locked(&proc->inner_lock);
1329
355b0502 1330 while (*p) {
ceeaf28f 1331
355b0502
GKH
1332 parent = *p;
1333 node = rb_entry(parent, struct binder_node, rb_node);
1334
1335 if (ptr < node->ptr)
1336 p = &(*p)->rb_left;
1337 else if (ptr > node->ptr)
1338 p = &(*p)->rb_right;
ceeaf28f
TK
1339 else {
1340 /*
1341 * A matching node is already in
1342 * the rb tree. Abandon the init
1343 * and return it.
1344 */
1345 binder_inc_node_tmpref_ilocked(node);
1346 return node;
1347 }
355b0502 1348 }
ceeaf28f 1349 node = new_node;
355b0502 1350 binder_stats_created(BINDER_STAT_NODE);
9607bf70 1351 node->tmp_refs++;
355b0502
GKH
1352 rb_link_node(&node->rb_node, parent, p);
1353 rb_insert_color(&node->rb_node, &proc->nodes);
64e01e83 1354 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1355 node->proc = proc;
1356 node->ptr = ptr;
1357 node->cookie = cookie;
1358 node->work.type = BINDER_WORK_NODE;
fbb541d2 1359 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
b4b39b68 1360 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
fbb541d2
MC
1361 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1362 node->min_priority = to_kernel_prio(node->sched_policy, priority);
25de59a5 1363 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
21dfe3eb 1364 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
751c6a8e 1365 spin_lock_init(&node->lock);
355b0502
GKH
1366 INIT_LIST_HEAD(&node->work.entry);
1367 INIT_LIST_HEAD(&node->async_todo);
1368 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
9c0a1d02 1369 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 1370 proc->pid, current->pid, node->debug_id,
9c0a1d02 1371 (u64)node->ptr, (u64)node->cookie);
ceeaf28f
TK
1372
1373 return node;
1374}
1375
1376static struct binder_node *binder_new_node(struct binder_proc *proc,
1377 struct flat_binder_object *fp)
1378{
1379 struct binder_node *node;
1380 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1381
1382 if (!new_node)
1383 return NULL;
1384 binder_inner_proc_lock(proc);
1385 node = binder_init_node_ilocked(proc, new_node, fp);
1386 binder_inner_proc_unlock(proc);
1387 if (node != new_node)
1388 /*
1389 * The node was already added by another thread
1390 */
1391 kfree(new_node);
1392
355b0502
GKH
1393 return node;
1394}
1395
ccca76b6 1396static void binder_free_node(struct binder_node *node)
355b0502 1397{
ccca76b6
TK
1398 kfree(node);
1399 binder_stats_deleted(BINDER_STAT_NODE);
1400}
1401
25de59a5
TK
1402static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1403 int internal,
1404 struct list_head *target_list)
ccca76b6 1405{
25de59a5
TK
1406 struct binder_proc *proc = node->proc;
1407
72afa5d7 1408 assert_spin_locked(&node->lock);
25de59a5 1409 if (proc)
72afa5d7 1410 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1411 if (strong) {
1412 if (internal) {
1413 if (target_list == NULL &&
1414 node->internal_strong_refs == 0 &&
913926d0
MC
1415 !(node->proc &&
1416 node == node->proc->context->
1417 binder_context_mgr_node &&
1418 node->has_strong_ref)) {
56b468fc
AS
1419 pr_err("invalid inc strong node for %d\n",
1420 node->debug_id);
355b0502
GKH
1421 return -EINVAL;
1422 }
1423 node->internal_strong_refs++;
1424 } else
1425 node->local_strong_refs++;
1426 if (!node->has_strong_ref && target_list) {
aad8732c 1427 binder_dequeue_work_ilocked(&node->work);
3fb0a5b6
MC
1428 /*
1429 * Note: this function is the only place where we queue
1430 * directly to a thread->todo without using the
1431 * corresponding binder_enqueue_thread_work() helper
1432 * functions; in this case it's ok to not set the
1433 * process_todo flag, since we know this node work will
1434 * always be followed by other work that starts queue
1435 * processing: in case of synchronous transactions, a
1436 * BR_REPLY or BR_ERROR; in case of oneway
1437 * transactions, a BR_TRANSACTION_COMPLETE.
1438 */
aad8732c 1439 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1440 }
1441 } else {
1442 if (!internal)
1443 node->local_weak_refs++;
1444 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1445 if (target_list == NULL) {
56b468fc
AS
1446 pr_err("invalid inc weak node for %d\n",
1447 node->debug_id);
355b0502
GKH
1448 return -EINVAL;
1449 }
3fb0a5b6
MC
1450 /*
1451 * See comment above
1452 */
aad8732c 1453 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1454 }
1455 }
1456 return 0;
1457}
1458
ccca76b6
TK
1459static int binder_inc_node(struct binder_node *node, int strong, int internal,
1460 struct list_head *target_list)
1461{
1462 int ret;
1463
25de59a5
TK
1464 binder_node_inner_lock(node);
1465 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1466 binder_node_inner_unlock(node);
ccca76b6
TK
1467
1468 return ret;
1469}
1470
25de59a5
TK
1471static bool binder_dec_node_nilocked(struct binder_node *node,
1472 int strong, int internal)
355b0502 1473{
ccca76b6
TK
1474 struct binder_proc *proc = node->proc;
1475
72afa5d7 1476 assert_spin_locked(&node->lock);
ccca76b6 1477 if (proc)
72afa5d7 1478 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1479 if (strong) {
1480 if (internal)
1481 node->internal_strong_refs--;
1482 else
1483 node->local_strong_refs--;
1484 if (node->local_strong_refs || node->internal_strong_refs)
ccca76b6 1485 return false;
355b0502
GKH
1486 } else {
1487 if (!internal)
1488 node->local_weak_refs--;
9607bf70
TK
1489 if (node->local_weak_refs || node->tmp_refs ||
1490 !hlist_empty(&node->refs))
ccca76b6 1491 return false;
355b0502 1492 }
ccca76b6
TK
1493
1494 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502 1495 if (list_empty(&node->work.entry)) {
aad8732c 1496 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1200efe9 1497 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
1498 }
1499 } else {
1500 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
9607bf70 1501 !node->local_weak_refs && !node->tmp_refs) {
ccca76b6 1502 if (proc) {
aad8732c
TK
1503 binder_dequeue_work_ilocked(&node->work);
1504 rb_erase(&node->rb_node, &proc->nodes);
355b0502 1505 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1506 "refless node %d deleted\n",
355b0502
GKH
1507 node->debug_id);
1508 } else {
aad8732c 1509 BUG_ON(!list_empty(&node->work.entry));
76341f6f 1510 spin_lock(&binder_dead_nodes_lock);
ccca76b6
TK
1511 /*
1512 * tmp_refs could have changed so
1513 * check it again
1514 */
1515 if (node->tmp_refs) {
1516 spin_unlock(&binder_dead_nodes_lock);
1517 return false;
1518 }
355b0502 1519 hlist_del(&node->dead_node);
76341f6f 1520 spin_unlock(&binder_dead_nodes_lock);
355b0502 1521 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1522 "dead node %d deleted\n",
355b0502
GKH
1523 node->debug_id);
1524 }
ccca76b6 1525 return true;
355b0502
GKH
1526 }
1527 }
ccca76b6
TK
1528 return false;
1529}
355b0502 1530
ccca76b6
TK
1531static void binder_dec_node(struct binder_node *node, int strong, int internal)
1532{
1533 bool free_node;
1534
25de59a5
TK
1535 binder_node_inner_lock(node);
1536 free_node = binder_dec_node_nilocked(node, strong, internal);
1537 binder_node_inner_unlock(node);
ccca76b6
TK
1538 if (free_node)
1539 binder_free_node(node);
1540}
1541
1542static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1543{
1544 /*
1545 * No call to binder_inc_node() is needed since we
1546 * don't need to inform userspace of any changes to
1547 * tmp_refs
1548 */
1549 node->tmp_refs++;
355b0502
GKH
1550}
1551
9607bf70
TK
1552/**
1553 * binder_inc_node_tmpref() - take a temporary reference on node
1554 * @node: node to reference
1555 *
1556 * Take reference on node to prevent the node from being freed
ccca76b6
TK
1557 * while referenced only by a local variable. The inner lock is
1558 * needed to serialize with the node work on the queue (which
1559 * isn't needed after the node is dead). If the node is dead
1560 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1561 * node->tmp_refs against dead-node-only cases where the node
1562 * lock cannot be acquired (eg traversing the dead node list to
1563 * print nodes)
9607bf70
TK
1564 */
1565static void binder_inc_node_tmpref(struct binder_node *node)
1566{
25de59a5 1567 binder_node_lock(node);
ccca76b6
TK
1568 if (node->proc)
1569 binder_inner_proc_lock(node->proc);
1570 else
1571 spin_lock(&binder_dead_nodes_lock);
1572 binder_inc_node_tmpref_ilocked(node);
1573 if (node->proc)
1574 binder_inner_proc_unlock(node->proc);
1575 else
1576 spin_unlock(&binder_dead_nodes_lock);
25de59a5 1577 binder_node_unlock(node);
9607bf70
TK
1578}
1579
1580/**
1581 * binder_dec_node_tmpref() - remove a temporary reference on node
1582 * @node: node to reference
1583 *
1584 * Release temporary reference on node taken via binder_inc_node_tmpref()
1585 */
1586static void binder_dec_node_tmpref(struct binder_node *node)
1587{
ccca76b6
TK
1588 bool free_node;
1589
25de59a5
TK
1590 binder_node_inner_lock(node);
1591 if (!node->proc)
ccca76b6 1592 spin_lock(&binder_dead_nodes_lock);
9607bf70
TK
1593 node->tmp_refs--;
1594 BUG_ON(node->tmp_refs < 0);
ccca76b6
TK
1595 if (!node->proc)
1596 spin_unlock(&binder_dead_nodes_lock);
9607bf70
TK
1597 /*
1598 * Call binder_dec_node() to check if all refcounts are 0
1599 * and cleanup is needed. Calling with strong=0 and internal=1
1600 * causes no actual reference to be released in binder_dec_node().
1601 * If that changes, a change is needed here too.
1602 */
25de59a5
TK
1603 free_node = binder_dec_node_nilocked(node, 0, 1);
1604 binder_node_inner_unlock(node);
ccca76b6
TK
1605 if (free_node)
1606 binder_free_node(node);
9607bf70
TK
1607}
1608
1609static void binder_put_node(struct binder_node *node)
1610{
1611 binder_dec_node_tmpref(node);
1612}
355b0502 1613
5717118f
TK
1614static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1615 u32 desc, bool need_strong_ref)
355b0502
GKH
1616{
1617 struct rb_node *n = proc->refs_by_desc.rb_node;
1618 struct binder_ref *ref;
1619
1620 while (n) {
1621 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1622
bc65c39a 1623 if (desc < ref->data.desc) {
355b0502 1624 n = n->rb_left;
bc65c39a 1625 } else if (desc > ref->data.desc) {
355b0502 1626 n = n->rb_right;
bc65c39a 1627 } else if (need_strong_ref && !ref->data.strong) {
d78eaaf9
AH
1628 binder_user_error("tried to use weak ref as strong ref\n");
1629 return NULL;
1630 } else {
355b0502 1631 return ref;
d78eaaf9 1632 }
355b0502
GKH
1633 }
1634 return NULL;
1635}
1636
bc65c39a 1637/**
5717118f 1638 * binder_get_ref_for_node_olocked() - get the ref associated with given node
bc65c39a
TK
1639 * @proc: binder_proc that owns the ref
1640 * @node: binder_node of target
1641 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1642 *
1643 * Look up the ref for the given node and return it if it exists
1644 *
1645 * If it doesn't exist and the caller provides a newly allocated
1646 * ref, initialize the fields of the newly allocated ref and insert
1647 * into the given proc rb_trees and node refs list.
1648 *
1649 * Return: the ref for node. It is possible that another thread
1650 * allocated/initialized the ref first in which case the
1651 * returned ref would be different than the passed-in
1652 * new_ref. new_ref must be kfree'd by the caller in
1653 * this case.
1654 */
5717118f
TK
1655static struct binder_ref *binder_get_ref_for_node_olocked(
1656 struct binder_proc *proc,
1657 struct binder_node *node,
1658 struct binder_ref *new_ref)
355b0502 1659{
bc65c39a 1660 struct binder_context *context = proc->context;
355b0502
GKH
1661 struct rb_node **p = &proc->refs_by_node.rb_node;
1662 struct rb_node *parent = NULL;
bc65c39a
TK
1663 struct binder_ref *ref;
1664 struct rb_node *n;
355b0502
GKH
1665
1666 while (*p) {
1667 parent = *p;
1668 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1669
1670 if (node < ref->node)
1671 p = &(*p)->rb_left;
1672 else if (node > ref->node)
1673 p = &(*p)->rb_right;
1674 else
1675 return ref;
1676 }
bc65c39a 1677 if (!new_ref)
355b0502 1678 return NULL;
bc65c39a 1679
355b0502 1680 binder_stats_created(BINDER_STAT_REF);
bc65c39a 1681 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1682 new_ref->proc = proc;
1683 new_ref->node = node;
1684 rb_link_node(&new_ref->rb_node_node, parent, p);
1685 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1686
bc65c39a 1687 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1688 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1689 ref = rb_entry(n, struct binder_ref, rb_node_desc);
bc65c39a 1690 if (ref->data.desc > new_ref->data.desc)
355b0502 1691 break;
bc65c39a 1692 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1693 }
1694
1695 p = &proc->refs_by_desc.rb_node;
1696 while (*p) {
1697 parent = *p;
1698 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1699
bc65c39a 1700 if (new_ref->data.desc < ref->data.desc)
355b0502 1701 p = &(*p)->rb_left;
bc65c39a 1702 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1703 p = &(*p)->rb_right;
1704 else
1705 BUG();
1706 }
1707 rb_link_node(&new_ref->rb_node_desc, parent, p);
1708 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
25de59a5
TK
1709
1710 binder_node_lock(node);
cc13edea 1711 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1712
cc13edea
TK
1713 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1714 "%d new ref %d desc %d for node %d\n",
bc65c39a 1715 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
cc13edea 1716 node->debug_id);
25de59a5 1717 binder_node_unlock(node);
355b0502
GKH
1718 return new_ref;
1719}
1720
5717118f 1721static void binder_cleanup_ref_olocked(struct binder_ref *ref)
355b0502 1722{
ccca76b6 1723 bool delete_node = false;
ccca76b6 1724
355b0502 1725 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1726 "%d delete ref %d desc %d for node %d\n",
bc65c39a 1727 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1728 ref->node->debug_id);
355b0502
GKH
1729
1730 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1731 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
bc65c39a 1732
25de59a5 1733 binder_node_inner_lock(ref->node);
bc65c39a 1734 if (ref->data.strong)
25de59a5 1735 binder_dec_node_nilocked(ref->node, 1, 1);
bc65c39a 1736
355b0502 1737 hlist_del(&ref->node_entry);
25de59a5
TK
1738 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1739 binder_node_inner_unlock(ref->node);
ccca76b6
TK
1740 /*
1741 * Clear ref->node unless we want the caller to free the node
1742 */
1743 if (!delete_node) {
1744 /*
1745 * The caller uses ref->node to determine
1746 * whether the node needs to be freed. Clear
1747 * it since the node is still alive.
1748 */
1749 ref->node = NULL;
1750 }
bc65c39a 1751
355b0502
GKH
1752 if (ref->death) {
1753 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1754 "%d delete ref %d desc %d has death notification\n",
bc65c39a
TK
1755 ref->proc->pid, ref->data.debug_id,
1756 ref->data.desc);
aad8732c 1757 binder_dequeue_work(ref->proc, &ref->death->work);
355b0502
GKH
1758 binder_stats_deleted(BINDER_STAT_DEATH);
1759 }
355b0502
GKH
1760 binder_stats_deleted(BINDER_STAT_REF);
1761}
1762
bc65c39a 1763/**
5717118f 1764 * binder_inc_ref_olocked() - increment the ref for given handle
bc65c39a
TK
1765 * @ref: ref to be incremented
1766 * @strong: if true, strong increment, else weak
1767 * @target_list: list to queue node work on
1768 *
5717118f 1769 * Increment the ref. @ref->proc->outer_lock must be held on entry
bc65c39a
TK
1770 *
1771 * Return: 0, if successful, else errno
1772 */
5717118f
TK
1773static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1774 struct list_head *target_list)
355b0502
GKH
1775{
1776 int ret;
01c8cbde 1777
355b0502 1778 if (strong) {
bc65c39a 1779 if (ref->data.strong == 0) {
355b0502
GKH
1780 ret = binder_inc_node(ref->node, 1, 1, target_list);
1781 if (ret)
1782 return ret;
1783 }
bc65c39a 1784 ref->data.strong++;
355b0502 1785 } else {
bc65c39a 1786 if (ref->data.weak == 0) {
355b0502
GKH
1787 ret = binder_inc_node(ref->node, 0, 1, target_list);
1788 if (ret)
1789 return ret;
1790 }
bc65c39a 1791 ref->data.weak++;
355b0502
GKH
1792 }
1793 return 0;
1794}
1795
bc65c39a
TK
1796/**
1797 * binder_dec_ref() - dec the ref for given handle
1798 * @ref: ref to be decremented
1799 * @strong: if true, strong decrement, else weak
1800 *
1801 * Decrement the ref.
1802 *
bc65c39a
TK
1803 * Return: true if ref is cleaned up and ready to be freed
1804 */
5717118f 1805static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
355b0502
GKH
1806{
1807 if (strong) {
bc65c39a 1808 if (ref->data.strong == 0) {
56b468fc 1809 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
bc65c39a
TK
1810 ref->proc->pid, ref->data.debug_id,
1811 ref->data.desc, ref->data.strong,
1812 ref->data.weak);
1813 return false;
355b0502 1814 }
bc65c39a 1815 ref->data.strong--;
ccca76b6
TK
1816 if (ref->data.strong == 0)
1817 binder_dec_node(ref->node, strong, 1);
355b0502 1818 } else {
bc65c39a 1819 if (ref->data.weak == 0) {
56b468fc 1820 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
bc65c39a
TK
1821 ref->proc->pid, ref->data.debug_id,
1822 ref->data.desc, ref->data.strong,
1823 ref->data.weak);
1824 return false;
355b0502 1825 }
bc65c39a 1826 ref->data.weak--;
355b0502 1827 }
bc65c39a 1828 if (ref->data.strong == 0 && ref->data.weak == 0) {
5717118f 1829 binder_cleanup_ref_olocked(ref);
bc65c39a
TK
1830 return true;
1831 }
1832 return false;
1833}
1834
1835/**
1836 * binder_get_node_from_ref() - get the node from the given proc/desc
1837 * @proc: proc containing the ref
1838 * @desc: the handle associated with the ref
1839 * @need_strong_ref: if true, only return node if ref is strong
1840 * @rdata: the id/refcount data for the ref
1841 *
1842 * Given a proc and ref handle, return the associated binder_node
1843 *
1844 * Return: a binder_node or NULL if not found or not strong when strong required
1845 */
1846static struct binder_node *binder_get_node_from_ref(
1847 struct binder_proc *proc,
1848 u32 desc, bool need_strong_ref,
1849 struct binder_ref_data *rdata)
1850{
1851 struct binder_node *node;
1852 struct binder_ref *ref;
1853
5717118f
TK
1854 binder_proc_lock(proc);
1855 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
bc65c39a
TK
1856 if (!ref)
1857 goto err_no_ref;
1858 node = ref->node;
9607bf70
TK
1859 /*
1860 * Take an implicit reference on the node to ensure
1861 * it stays alive until the call to binder_put_node()
1862 */
1863 binder_inc_node_tmpref(node);
bc65c39a
TK
1864 if (rdata)
1865 *rdata = ref->data;
5717118f 1866 binder_proc_unlock(proc);
bc65c39a
TK
1867
1868 return node;
1869
1870err_no_ref:
5717118f 1871 binder_proc_unlock(proc);
bc65c39a
TK
1872 return NULL;
1873}
1874
1875/**
1876 * binder_free_ref() - free the binder_ref
1877 * @ref: ref to free
1878 *
ccca76b6
TK
1879 * Free the binder_ref. Free the binder_node indicated by ref->node
1880 * (if non-NULL) and the binder_ref_death indicated by ref->death.
bc65c39a
TK
1881 */
1882static void binder_free_ref(struct binder_ref *ref)
1883{
ccca76b6
TK
1884 if (ref->node)
1885 binder_free_node(ref->node);
bc65c39a
TK
1886 kfree(ref->death);
1887 kfree(ref);
1888}
1889
1890/**
1891 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1892 * @proc: proc containing the ref
1893 * @desc: the handle associated with the ref
1894 * @increment: true=inc reference, false=dec reference
1895 * @strong: true=strong reference, false=weak reference
1896 * @rdata: the id/refcount data for the ref
1897 *
1898 * Given a proc and ref handle, increment or decrement the ref
1899 * according to "increment" arg.
1900 *
1901 * Return: 0 if successful, else errno
1902 */
1903static int binder_update_ref_for_handle(struct binder_proc *proc,
1904 uint32_t desc, bool increment, bool strong,
1905 struct binder_ref_data *rdata)
1906{
1907 int ret = 0;
1908 struct binder_ref *ref;
1909 bool delete_ref = false;
1910
5717118f
TK
1911 binder_proc_lock(proc);
1912 ref = binder_get_ref_olocked(proc, desc, strong);
bc65c39a
TK
1913 if (!ref) {
1914 ret = -EINVAL;
1915 goto err_no_ref;
1916 }
1917 if (increment)
5717118f 1918 ret = binder_inc_ref_olocked(ref, strong, NULL);
bc65c39a 1919 else
5717118f 1920 delete_ref = binder_dec_ref_olocked(ref, strong);
bc65c39a
TK
1921
1922 if (rdata)
1923 *rdata = ref->data;
5717118f 1924 binder_proc_unlock(proc);
bc65c39a
TK
1925
1926 if (delete_ref)
1927 binder_free_ref(ref);
1928 return ret;
1929
1930err_no_ref:
5717118f 1931 binder_proc_unlock(proc);
bc65c39a
TK
1932 return ret;
1933}
1934
1935/**
1936 * binder_dec_ref_for_handle() - dec the ref for given handle
1937 * @proc: proc containing the ref
1938 * @desc: the handle associated with the ref
1939 * @strong: true=strong reference, false=weak reference
1940 * @rdata: the id/refcount data for the ref
1941 *
1942 * Just calls binder_update_ref_for_handle() to decrement the ref.
1943 *
1944 * Return: 0 if successful, else errno
1945 */
1946static int binder_dec_ref_for_handle(struct binder_proc *proc,
1947 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1948{
1949 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1950}
1951
1952
1953/**
1954 * binder_inc_ref_for_node() - increment the ref for given proc/node
1955 * @proc: proc containing the ref
1956 * @node: target node
1957 * @strong: true=strong reference, false=weak reference
1958 * @target_list: worklist to use if node is incremented
1959 * @rdata: the id/refcount data for the ref
1960 *
1961 * Given a proc and node, increment the ref. Create the ref if it
1962 * doesn't already exist
1963 *
1964 * Return: 0 if successful, else errno
1965 */
1966static int binder_inc_ref_for_node(struct binder_proc *proc,
1967 struct binder_node *node,
1968 bool strong,
1969 struct list_head *target_list,
1970 struct binder_ref_data *rdata)
1971{
1972 struct binder_ref *ref;
1973 struct binder_ref *new_ref = NULL;
1974 int ret = 0;
1975
5717118f
TK
1976 binder_proc_lock(proc);
1977 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
bc65c39a 1978 if (!ref) {
5717118f 1979 binder_proc_unlock(proc);
bc65c39a
TK
1980 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1981 if (!new_ref)
1982 return -ENOMEM;
5717118f
TK
1983 binder_proc_lock(proc);
1984 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
bc65c39a 1985 }
5717118f 1986 ret = binder_inc_ref_olocked(ref, strong, target_list);
bc65c39a 1987 *rdata = ref->data;
5717118f 1988 binder_proc_unlock(proc);
bc65c39a
TK
1989 if (new_ref && ref != new_ref)
1990 /*
1991 * Another thread created the ref first so
1992 * free the one we allocated
1993 */
1994 kfree(new_ref);
1995 return ret;
355b0502
GKH
1996}
1997
a45c586f
MC
1998static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1999 struct binder_transaction *t)
355b0502 2000{
7323366b 2001 BUG_ON(!target_thread);
72afa5d7 2002 assert_spin_locked(&target_thread->proc->inner_lock);
7323366b
TK
2003 BUG_ON(target_thread->transaction_stack != t);
2004 BUG_ON(target_thread->transaction_stack->from != target_thread);
2005 target_thread->transaction_stack =
2006 target_thread->transaction_stack->from_parent;
2007 t->from = NULL;
2008}
2009
82f6ad88
TK
2010/**
2011 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2012 * @thread: thread to decrement
2013 *
2014 * A thread needs to be kept alive while being used to create or
2015 * handle a transaction. binder_get_txn_from() is used to safely
2016 * extract t->from from a binder_transaction and keep the thread
2017 * indicated by t->from from being freed. When done with that
2018 * binder_thread, this function is called to decrement the
2019 * tmp_ref and free if appropriate (thread has been released
2020 * and no transaction being processed by the driver)
2021 */
2022static void binder_thread_dec_tmpref(struct binder_thread *thread)
2023{
2024 /*
2025 * atomic is used to protect the counter value while
2026 * it cannot reach zero or thread->is_dead is false
82f6ad88 2027 */
aed01731 2028 binder_inner_proc_lock(thread->proc);
82f6ad88
TK
2029 atomic_dec(&thread->tmp_ref);
2030 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
aed01731 2031 binder_inner_proc_unlock(thread->proc);
82f6ad88
TK
2032 binder_free_thread(thread);
2033 return;
2034 }
aed01731 2035 binder_inner_proc_unlock(thread->proc);
82f6ad88
TK
2036}
2037
2038/**
2039 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2040 * @proc: proc to decrement
2041 *
2042 * A binder_proc needs to be kept alive while being used to create or
2043 * handle a transaction. proc->tmp_ref is incremented when
2044 * creating a new transaction or the binder_proc is currently in-use
2045 * by threads that are being released. When done with the binder_proc,
2046 * this function is called to decrement the counter and free the
2047 * proc if appropriate (proc has been released, all threads have
2048 * been released and not currenly in-use to process a transaction).
2049 */
2050static void binder_proc_dec_tmpref(struct binder_proc *proc)
2051{
aed01731 2052 binder_inner_proc_lock(proc);
82f6ad88
TK
2053 proc->tmp_ref--;
2054 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2055 !proc->tmp_ref) {
aed01731 2056 binder_inner_proc_unlock(proc);
82f6ad88
TK
2057 binder_free_proc(proc);
2058 return;
2059 }
aed01731 2060 binder_inner_proc_unlock(proc);
82f6ad88
TK
2061}
2062
2063/**
2064 * binder_get_txn_from() - safely extract the "from" thread in transaction
2065 * @t: binder transaction for t->from
2066 *
2067 * Atomically return the "from" thread and increment the tmp_ref
2068 * count for the thread to ensure it stays alive until
2069 * binder_thread_dec_tmpref() is called.
2070 *
2071 * Return: the value of t->from
2072 */
2073static struct binder_thread *binder_get_txn_from(
2074 struct binder_transaction *t)
2075{
2076 struct binder_thread *from;
2077
2078 spin_lock(&t->lock);
2079 from = t->from;
2080 if (from)
2081 atomic_inc(&from->tmp_ref);
2082 spin_unlock(&t->lock);
2083 return from;
2084}
2085
a45c586f
MC
2086/**
2087 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2088 * @t: binder transaction for t->from
2089 *
2090 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2091 * to guarantee that the thread cannot be released while operating on it.
2092 * The caller must call binder_inner_proc_unlock() to release the inner lock
2093 * as well as call binder_dec_thread_txn() to release the reference.
2094 *
2095 * Return: the value of t->from
2096 */
2097static struct binder_thread *binder_get_txn_from_and_acq_inner(
2098 struct binder_transaction *t)
2099{
2100 struct binder_thread *from;
2101
2102 from = binder_get_txn_from(t);
2103 if (!from)
2104 return NULL;
2105 binder_inner_proc_lock(from->proc);
2106 if (t->from) {
2107 BUG_ON(from != t->from);
2108 return from;
2109 }
2110 binder_inner_proc_unlock(from->proc);
2111 binder_thread_dec_tmpref(from);
2112 return NULL;
2113}
2114
7323366b
TK
2115static void binder_free_transaction(struct binder_transaction *t)
2116{
355b0502
GKH
2117 if (t->buffer)
2118 t->buffer->transaction = NULL;
2119 kfree(t);
2120 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2121}
2122
2123static void binder_send_failed_reply(struct binder_transaction *t,
2124 uint32_t error_code)
2125{
2126 struct binder_thread *target_thread;
d6733a8a 2127 struct binder_transaction *next;
01c8cbde 2128
355b0502
GKH
2129 BUG_ON(t->flags & TF_ONE_WAY);
2130 while (1) {
a45c586f 2131 target_thread = binder_get_txn_from_and_acq_inner(t);
355b0502 2132 if (target_thread) {
795aa6bc
TK
2133 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2134 "send failed reply for transaction %d to %d:%d\n",
2135 t->debug_id,
2136 target_thread->proc->pid,
2137 target_thread->pid);
2138
a45c586f 2139 binder_pop_transaction_ilocked(target_thread, t);
795aa6bc
TK
2140 if (target_thread->reply_error.cmd == BR_OK) {
2141 target_thread->reply_error.cmd = error_code;
3fb0a5b6
MC
2142 binder_enqueue_thread_work_ilocked(
2143 target_thread,
2144 &target_thread->reply_error.work);
355b0502
GKH
2145 wake_up_interruptible(&target_thread->wait);
2146 } else {
795aa6bc
TK
2147 WARN(1, "Unexpected reply error: %u\n",
2148 target_thread->reply_error.cmd);
355b0502 2149 }
a45c586f 2150 binder_inner_proc_unlock(target_thread->proc);
82f6ad88 2151 binder_thread_dec_tmpref(target_thread);
795aa6bc 2152 binder_free_transaction(t);
355b0502 2153 return;
d6733a8a
LT
2154 }
2155 next = t->from_parent;
2156
2157 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2158 "send failed reply for transaction %d, target dead\n",
2159 t->debug_id);
2160
7323366b 2161 binder_free_transaction(t);
d6733a8a 2162 if (next == NULL) {
355b0502 2163 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d6733a8a
LT
2164 "reply failed, no target thread at root\n");
2165 return;
355b0502 2166 }
d6733a8a
LT
2167 t = next;
2168 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2169 "reply failed, no target thread -- retry %d\n",
2170 t->debug_id);
355b0502
GKH
2171 }
2172}
2173
1d6de5d0
MC
2174/**
2175 * binder_cleanup_transaction() - cleans up undelivered transaction
2176 * @t: transaction that needs to be cleaned up
2177 * @reason: reason the transaction wasn't delivered
2178 * @error_code: error to return to caller (if synchronous call)
2179 */
2180static void binder_cleanup_transaction(struct binder_transaction *t,
2181 const char *reason,
2182 uint32_t error_code)
2183{
2184 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2185 binder_send_failed_reply(t, error_code);
2186 } else {
2187 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2188 "undelivered transaction %d, %s\n",
2189 t->debug_id, reason);
2190 binder_free_transaction(t);
2191 }
2192}
2193
17f2cab3
MC
2194/**
2195 * binder_validate_object() - checks for a valid metadata object in a buffer.
2196 * @buffer: binder_buffer that we're parsing.
2197 * @offset: offset in the buffer at which to validate an object.
2198 *
2199 * Return: If there's a valid metadata object at @offset in @buffer, the
2200 * size of that object. Otherwise, it returns zero.
2201 */
2202static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2203{
2204 /* Check if we can read a header first */
2205 struct binder_object_header *hdr;
2206 size_t object_size = 0;
2207
2208 if (offset > buffer->data_size - sizeof(*hdr) ||
2209 buffer->data_size < sizeof(*hdr) ||
2210 !IS_ALIGNED(offset, sizeof(u32)))
2211 return 0;
2212
2213 /* Ok, now see if we can read a complete object. */
2214 hdr = (struct binder_object_header *)(buffer->data + offset);
2215 switch (hdr->type) {
2216 case BINDER_TYPE_BINDER:
2217 case BINDER_TYPE_WEAK_BINDER:
2218 case BINDER_TYPE_HANDLE:
2219 case BINDER_TYPE_WEAK_HANDLE:
2220 object_size = sizeof(struct flat_binder_object);
2221 break;
2222 case BINDER_TYPE_FD:
2223 object_size = sizeof(struct binder_fd_object);
2224 break;
e884979e
MC
2225 case BINDER_TYPE_PTR:
2226 object_size = sizeof(struct binder_buffer_object);
2227 break;
2f82efbc
MC
2228 case BINDER_TYPE_FDA:
2229 object_size = sizeof(struct binder_fd_array_object);
2230 break;
17f2cab3
MC
2231 default:
2232 return 0;
2233 }
2234 if (offset <= buffer->data_size - object_size &&
2235 buffer->data_size >= object_size)
2236 return object_size;
2237 else
2238 return 0;
2239}
2240
e884979e
MC
2241/**
2242 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2243 * @b: binder_buffer containing the object
2244 * @index: index in offset array at which the binder_buffer_object is
2245 * located
2246 * @start: points to the start of the offset array
2247 * @num_valid: the number of valid offsets in the offset array
2248 *
2249 * Return: If @index is within the valid range of the offset array
2250 * described by @start and @num_valid, and if there's a valid
2251 * binder_buffer_object at the offset found in index @index
2252 * of the offset array, that object is returned. Otherwise,
2253 * %NULL is returned.
2254 * Note that the offset found in index @index itself is not
2255 * verified; this function assumes that @num_valid elements
2256 * from @start were previously verified to have valid offsets.
2257 */
2258static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2259 binder_size_t index,
2260 binder_size_t *start,
2261 binder_size_t num_valid)
2262{
2263 struct binder_buffer_object *buffer_obj;
2264 binder_size_t *offp;
2265
2266 if (index >= num_valid)
2267 return NULL;
2268
2269 offp = start + index;
2270 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2271 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2272 return NULL;
2273
2274 return buffer_obj;
2275}
2276
2277/**
2278 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2279 * @b: transaction buffer
2280 * @objects_start start of objects buffer
2281 * @buffer: binder_buffer_object in which to fix up
2282 * @offset: start offset in @buffer to fix up
2283 * @last_obj: last binder_buffer_object that we fixed up in
2284 * @last_min_offset: minimum fixup offset in @last_obj
2285 *
2286 * Return: %true if a fixup in buffer @buffer at offset @offset is
2287 * allowed.
2288 *
2289 * For safety reasons, we only allow fixups inside a buffer to happen
2290 * at increasing offsets; additionally, we only allow fixup on the last
2291 * buffer object that was verified, or one of its parents.
2292 *
2293 * Example of what is allowed:
2294 *
2295 * A
2296 * B (parent = A, offset = 0)
2297 * C (parent = A, offset = 16)
2298 * D (parent = C, offset = 0)
2299 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2300 *
2301 * Examples of what is not allowed:
2302 *
2303 * Decreasing offsets within the same parent:
2304 * A
2305 * C (parent = A, offset = 16)
2306 * B (parent = A, offset = 0) // decreasing offset within A
2307 *
2308 * Referring to a parent that wasn't the last object or any of its parents:
2309 * A
2310 * B (parent = A, offset = 0)
2311 * C (parent = A, offset = 0)
2312 * C (parent = A, offset = 16)
2313 * D (parent = B, offset = 0) // B is not A or any of A's parents
2314 */
2315static bool binder_validate_fixup(struct binder_buffer *b,
2316 binder_size_t *objects_start,
2317 struct binder_buffer_object *buffer,
2318 binder_size_t fixup_offset,
2319 struct binder_buffer_object *last_obj,
2320 binder_size_t last_min_offset)
2321{
2322 if (!last_obj) {
2323 /* Nothing to fix up in */
2324 return false;
2325 }
2326
2327 while (last_obj != buffer) {
2328 /*
2329 * Safe to retrieve the parent of last_obj, since it
2330 * was already previously verified by the driver.
2331 */
2332 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2333 return false;
2334 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2335 last_obj = (struct binder_buffer_object *)
2336 (b->data + *(objects_start + last_obj->parent));
2337 }
2338 return (fixup_offset >= last_min_offset);
2339}
2340
355b0502
GKH
2341static void binder_transaction_buffer_release(struct binder_proc *proc,
2342 struct binder_buffer *buffer,
9c0a1d02 2343 binder_size_t *failed_at)
355b0502 2344{
e884979e 2345 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
2346 int debug_id = buffer->debug_id;
2347
2348 binder_debug(BINDER_DEBUG_TRANSACTION,
3877fd57 2349 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
2350 proc->pid, buffer->debug_id,
2351 buffer->data_size, buffer->offsets_size, failed_at);
2352
2353 if (buffer->target_node)
2354 binder_dec_node(buffer->target_node, 1, 0);
2355
e884979e
MC
2356 off_start = (binder_size_t *)(buffer->data +
2357 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
2358 if (failed_at)
2359 off_end = failed_at;
2360 else
e884979e
MC
2361 off_end = (void *)off_start + buffer->offsets_size;
2362 for (offp = off_start; offp < off_end; offp++) {
17f2cab3
MC
2363 struct binder_object_header *hdr;
2364 size_t object_size = binder_validate_object(buffer, *offp);
01c8cbde 2365
17f2cab3
MC
2366 if (object_size == 0) {
2367 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
9c0a1d02 2368 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
2369 continue;
2370 }
17f2cab3
MC
2371 hdr = (struct binder_object_header *)(buffer->data + *offp);
2372 switch (hdr->type) {
355b0502
GKH
2373 case BINDER_TYPE_BINDER:
2374 case BINDER_TYPE_WEAK_BINDER: {
17f2cab3
MC
2375 struct flat_binder_object *fp;
2376 struct binder_node *node;
01c8cbde 2377
17f2cab3
MC
2378 fp = to_flat_binder_object(hdr);
2379 node = binder_get_node(proc, fp->binder);
355b0502 2380 if (node == NULL) {
9c0a1d02
AH
2381 pr_err("transaction release %d bad node %016llx\n",
2382 debug_id, (u64)fp->binder);
355b0502
GKH
2383 break;
2384 }
2385 binder_debug(BINDER_DEBUG_TRANSACTION,
9c0a1d02
AH
2386 " node %d u%016llx\n",
2387 node->debug_id, (u64)node->ptr);
17f2cab3
MC
2388 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2389 0);
9607bf70 2390 binder_put_node(node);
355b0502
GKH
2391 } break;
2392 case BINDER_TYPE_HANDLE:
2393 case BINDER_TYPE_WEAK_HANDLE: {
17f2cab3 2394 struct flat_binder_object *fp;
bc65c39a
TK
2395 struct binder_ref_data rdata;
2396 int ret;
01c8cbde 2397
17f2cab3 2398 fp = to_flat_binder_object(hdr);
bc65c39a
TK
2399 ret = binder_dec_ref_for_handle(proc, fp->handle,
2400 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2401
2402 if (ret) {
2403 pr_err("transaction release %d bad handle %d, ret = %d\n",
2404 debug_id, fp->handle, ret);
355b0502
GKH
2405 break;
2406 }
2407 binder_debug(BINDER_DEBUG_TRANSACTION,
bc65c39a
TK
2408 " ref %d desc %d\n",
2409 rdata.debug_id, rdata.desc);
355b0502
GKH
2410 } break;
2411
17f2cab3
MC
2412 case BINDER_TYPE_FD: {
2413 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2414
355b0502 2415 binder_debug(BINDER_DEBUG_TRANSACTION,
17f2cab3 2416 " fd %d\n", fp->fd);
355b0502 2417 if (failed_at)
17f2cab3
MC
2418 task_close_fd(proc, fp->fd);
2419 } break;
e884979e
MC
2420 case BINDER_TYPE_PTR:
2421 /*
2422 * Nothing to do here, this will get cleaned up when the
2423 * transaction buffer gets freed
2424 */
2425 break;
2f82efbc
MC
2426 case BINDER_TYPE_FDA: {
2427 struct binder_fd_array_object *fda;
2428 struct binder_buffer_object *parent;
2429 uintptr_t parent_buffer;
2430 u32 *fd_array;
2431 size_t fd_index;
2432 binder_size_t fd_buf_size;
2433
2434 fda = to_binder_fd_array_object(hdr);
2435 parent = binder_validate_ptr(buffer, fda->parent,
2436 off_start,
2437 offp - off_start);
2438 if (!parent) {
2439 pr_err("transaction release %d bad parent offset",
2440 debug_id);
2441 continue;
2442 }
2443 /*
2444 * Since the parent was already fixed up, convert it
2445 * back to kernel address space to access it
2446 */
2447 parent_buffer = parent->buffer -
69c33bb1
TK
2448 binder_alloc_get_user_buffer_offset(
2449 &proc->alloc);
2f82efbc
MC
2450
2451 fd_buf_size = sizeof(u32) * fda->num_fds;
2452 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2453 pr_err("transaction release %d invalid number of fds (%lld)\n",
2454 debug_id, (u64)fda->num_fds);
2455 continue;
2456 }
2457 if (fd_buf_size > parent->length ||
2458 fda->parent_offset > parent->length - fd_buf_size) {
2459 /* No space for all file descriptors here. */
2460 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2461 debug_id, (u64)fda->num_fds);
2462 continue;
2463 }
07bfd892 2464 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2f82efbc
MC
2465 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2466 task_close_fd(proc, fd_array[fd_index]);
2467 } break;
355b0502 2468 default:
3c2a0909 2469 pr_err("transaction release %d bad object type %x\n",
17f2cab3 2470 debug_id, hdr->type);
355b0502
GKH
2471 break;
2472 }
2473 }
2474}
2475
b4bdab80
MC
2476static int binder_translate_binder(struct flat_binder_object *fp,
2477 struct binder_transaction *t,
2478 struct binder_thread *thread)
2479{
2480 struct binder_node *node;
b4bdab80
MC
2481 struct binder_proc *proc = thread->proc;
2482 struct binder_proc *target_proc = t->to_proc;
bc65c39a 2483 struct binder_ref_data rdata;
9607bf70 2484 int ret = 0;
b4bdab80
MC
2485
2486 node = binder_get_node(proc, fp->binder);
2487 if (!node) {
25de59a5 2488 node = binder_new_node(proc, fp);
b4bdab80
MC
2489 if (!node)
2490 return -ENOMEM;
b4bdab80
MC
2491 }
2492 if (fp->cookie != node->cookie) {
2493 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2494 proc->pid, thread->pid, (u64)fp->binder,
2495 node->debug_id, (u64)fp->cookie,
2496 (u64)node->cookie);
9607bf70
TK
2497 ret = -EINVAL;
2498 goto done;
2499 }
2500 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2501 ret = -EPERM;
2502 goto done;
b4bdab80 2503 }
b4bdab80 2504
bc65c39a
TK
2505 ret = binder_inc_ref_for_node(target_proc, node,
2506 fp->hdr.type == BINDER_TYPE_BINDER,
2507 &thread->todo, &rdata);
2508 if (ret)
9607bf70 2509 goto done;
b4bdab80
MC
2510
2511 if (fp->hdr.type == BINDER_TYPE_BINDER)
2512 fp->hdr.type = BINDER_TYPE_HANDLE;
2513 else
2514 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2515 fp->binder = 0;
bc65c39a 2516 fp->handle = rdata.desc;
b4bdab80 2517 fp->cookie = 0;
b4bdab80 2518
bc65c39a 2519 trace_binder_transaction_node_to_ref(t, node, &rdata);
b4bdab80
MC
2520 binder_debug(BINDER_DEBUG_TRANSACTION,
2521 " node %d u%016llx -> ref %d desc %d\n",
2522 node->debug_id, (u64)node->ptr,
bc65c39a 2523 rdata.debug_id, rdata.desc);
9607bf70
TK
2524done:
2525 binder_put_node(node);
2526 return ret;
b4bdab80
MC
2527}
2528
2529static int binder_translate_handle(struct flat_binder_object *fp,
2530 struct binder_transaction *t,
2531 struct binder_thread *thread)
2532{
b4bdab80
MC
2533 struct binder_proc *proc = thread->proc;
2534 struct binder_proc *target_proc = t->to_proc;
bc65c39a
TK
2535 struct binder_node *node;
2536 struct binder_ref_data src_rdata;
9607bf70 2537 int ret = 0;
b4bdab80 2538
bc65c39a
TK
2539 node = binder_get_node_from_ref(proc, fp->handle,
2540 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2541 if (!node) {
b4bdab80
MC
2542 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2543 proc->pid, thread->pid, fp->handle);
2544 return -EINVAL;
2545 }
9607bf70
TK
2546 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2547 ret = -EPERM;
2548 goto done;
2549 }
b4bdab80 2550
25de59a5 2551 binder_node_lock(node);
bc65c39a 2552 if (node->proc == target_proc) {
b4bdab80
MC
2553 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2554 fp->hdr.type = BINDER_TYPE_BINDER;
2555 else
2556 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
bc65c39a
TK
2557 fp->binder = node->ptr;
2558 fp->cookie = node->cookie;
25de59a5
TK
2559 if (node->proc)
2560 binder_inner_proc_lock(node->proc);
2561 binder_inc_node_nilocked(node,
2562 fp->hdr.type == BINDER_TYPE_BINDER,
2563 0, NULL);
2564 if (node->proc)
2565 binder_inner_proc_unlock(node->proc);
bc65c39a 2566 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
b4bdab80
MC
2567 binder_debug(BINDER_DEBUG_TRANSACTION,
2568 " ref %d desc %d -> node %d u%016llx\n",
bc65c39a
TK
2569 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2570 (u64)node->ptr);
25de59a5 2571 binder_node_unlock(node);
b4bdab80 2572 } else {
bc65c39a 2573 struct binder_ref_data dest_rdata;
b4bdab80 2574
25de59a5 2575 binder_node_unlock(node);
bc65c39a
TK
2576 ret = binder_inc_ref_for_node(target_proc, node,
2577 fp->hdr.type == BINDER_TYPE_HANDLE,
2578 NULL, &dest_rdata);
2579 if (ret)
9607bf70 2580 goto done;
b4bdab80
MC
2581
2582 fp->binder = 0;
bc65c39a 2583 fp->handle = dest_rdata.desc;
b4bdab80 2584 fp->cookie = 0;
bc65c39a
TK
2585 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2586 &dest_rdata);
b4bdab80
MC
2587 binder_debug(BINDER_DEBUG_TRANSACTION,
2588 " ref %d desc %d -> ref %d desc %d (node %d)\n",
bc65c39a
TK
2589 src_rdata.debug_id, src_rdata.desc,
2590 dest_rdata.debug_id, dest_rdata.desc,
2591 node->debug_id);
b4bdab80 2592 }
9607bf70
TK
2593done:
2594 binder_put_node(node);
2595 return ret;
b4bdab80
MC
2596}
2597
2598static int binder_translate_fd(int fd,
2599 struct binder_transaction *t,
2600 struct binder_thread *thread,
2601 struct binder_transaction *in_reply_to)
2602{
2603 struct binder_proc *proc = thread->proc;
2604 struct binder_proc *target_proc = t->to_proc;
2605 int target_fd;
2606 struct file *file;
2607 int ret;
2608 bool target_allows_fd;
2609
2610 if (in_reply_to)
2611 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2612 else
2613 target_allows_fd = t->buffer->target_node->accept_fds;
2614 if (!target_allows_fd) {
2615 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2616 proc->pid, thread->pid,
2617 in_reply_to ? "reply" : "transaction",
2618 fd);
2619 ret = -EPERM;
2620 goto err_fd_not_accepted;
2621 }
2622
2623 file = fget(fd);
2624 if (!file) {
2625 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2626 proc->pid, thread->pid, fd);
2627 ret = -EBADF;
2628 goto err_fget;
2629 }
2630 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2631 if (ret < 0) {
2632 ret = -EPERM;
2633 goto err_security;
2634 }
2635
2636 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2637 if (target_fd < 0) {
2638 ret = -ENOMEM;
2639 goto err_get_unused_fd;
2640 }
2641 task_fd_install(target_proc, target_fd, file);
2642 trace_binder_transaction_fd(t, fd, target_fd);
2643 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2644 fd, target_fd);
2645
2646 return target_fd;
2647
2648err_get_unused_fd:
2649err_security:
2650 fput(file);
2651err_fget:
2652err_fd_not_accepted:
2653 return ret;
2654}
2655
2f82efbc
MC
2656static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2657 struct binder_buffer_object *parent,
2658 struct binder_transaction *t,
2659 struct binder_thread *thread,
2660 struct binder_transaction *in_reply_to)
2661{
2662 binder_size_t fdi, fd_buf_size, num_installed_fds;
2663 int target_fd;
2664 uintptr_t parent_buffer;
2665 u32 *fd_array;
2666 struct binder_proc *proc = thread->proc;
2667 struct binder_proc *target_proc = t->to_proc;
2668
2669 fd_buf_size = sizeof(u32) * fda->num_fds;
2670 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2671 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2672 proc->pid, thread->pid, (u64)fda->num_fds);
2673 return -EINVAL;
2674 }
2675 if (fd_buf_size > parent->length ||
2676 fda->parent_offset > parent->length - fd_buf_size) {
2677 /* No space for all file descriptors here. */
2678 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2679 proc->pid, thread->pid, (u64)fda->num_fds);
2680 return -EINVAL;
2681 }
2682 /*
2683 * Since the parent was already fixed up, convert it
2684 * back to the kernel address space to access it
2685 */
69c33bb1
TK
2686 parent_buffer = parent->buffer -
2687 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
07bfd892 2688 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2f82efbc
MC
2689 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2690 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2691 proc->pid, thread->pid);
2692 return -EINVAL;
2693 }
2694 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2695 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2696 in_reply_to);
2697 if (target_fd < 0)
2698 goto err_translate_fd_failed;
2699 fd_array[fdi] = target_fd;
2700 }
2701 return 0;
2702
2703err_translate_fd_failed:
2704 /*
2705 * Failed to allocate fd or security error, free fds
2706 * installed so far.
2707 */
2708 num_installed_fds = fdi;
2709 for (fdi = 0; fdi < num_installed_fds; fdi++)
2710 task_close_fd(target_proc, fd_array[fdi]);
2711 return target_fd;
2712}
2713
e884979e
MC
2714static int binder_fixup_parent(struct binder_transaction *t,
2715 struct binder_thread *thread,
2716 struct binder_buffer_object *bp,
2717 binder_size_t *off_start,
2718 binder_size_t num_valid,
2719 struct binder_buffer_object *last_fixup_obj,
2720 binder_size_t last_fixup_min_off)
2721{
2722 struct binder_buffer_object *parent;
2723 u8 *parent_buffer;
2724 struct binder_buffer *b = t->buffer;
2725 struct binder_proc *proc = thread->proc;
2726 struct binder_proc *target_proc = t->to_proc;
2727
2728 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2729 return 0;
2730
2731 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2732 if (!parent) {
2733 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2734 proc->pid, thread->pid);
2735 return -EINVAL;
2736 }
2737
2738 if (!binder_validate_fixup(b, off_start,
2739 parent, bp->parent_offset,
2740 last_fixup_obj,
2741 last_fixup_min_off)) {
2742 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2743 proc->pid, thread->pid);
2744 return -EINVAL;
2745 }
2746
2747 if (parent->length < sizeof(binder_uintptr_t) ||
2748 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2749 /* No space for a pointer here! */
2750 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2751 proc->pid, thread->pid);
2752 return -EINVAL;
2753 }
07bfd892 2754 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
69c33bb1
TK
2755 binder_alloc_get_user_buffer_offset(
2756 &target_proc->alloc));
e884979e
MC
2757 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2758
2759 return 0;
2760}
2761
1200efe9
MC
2762/**
2763 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2764 * @t: transaction to send
2765 * @proc: process to send the transaction to
2766 * @thread: thread in @proc to send the transaction to (may be NULL)
2767 *
2768 * This function queues a transaction to the specified process. It will try
2769 * to find a thread in the target process to handle the transaction and
2770 * wake it up. If no thread is found, the work is queued to the proc
2771 * waitqueue.
2772 *
2773 * If the @thread parameter is not NULL, the transaction is always queued
2774 * to the waitlist of that specific thread.
2775 *
2776 * Return: true if the transactions was successfully queued
2777 * false if the target process or thread is dead
2778 */
2779static bool binder_proc_transaction(struct binder_transaction *t,
2780 struct binder_proc *proc,
2781 struct binder_thread *thread)
2782{
1200efe9 2783 struct binder_node *node = t->buffer->target_node;
c1dd125f 2784 struct binder_priority node_prio;
1200efe9 2785 bool oneway = !!(t->flags & TF_ONE_WAY);
3fb0a5b6 2786 bool pending_async = false;
1200efe9
MC
2787
2788 BUG_ON(!node);
2789 binder_node_lock(node);
c1dd125f
MC
2790 node_prio.prio = node->min_priority;
2791 node_prio.sched_policy = node->sched_policy;
2792
1200efe9
MC
2793 if (oneway) {
2794 BUG_ON(thread);
2795 if (node->has_async_transaction) {
3fb0a5b6 2796 pending_async = true;
1200efe9
MC
2797 } else {
2798 node->has_async_transaction = 1;
2799 }
2800 }
2801
2802 binder_inner_proc_lock(proc);
2803
2804 if (proc->is_dead || (thread && thread->is_dead)) {
2805 binder_inner_proc_unlock(proc);
2806 binder_node_unlock(node);
2807 return false;
2808 }
2809
3fb0a5b6 2810 if (!thread && !pending_async)
1200efe9
MC
2811 thread = binder_select_thread_ilocked(proc);
2812
c1dd125f 2813 if (thread) {
21dfe3eb
MC
2814 binder_transaction_priority(thread->task, t, node_prio,
2815 node->inherit_rt);
3fb0a5b6
MC
2816 binder_enqueue_thread_work_ilocked(thread, &t->work);
2817 } else if (!pending_async) {
2818 binder_enqueue_work_ilocked(&t->work, &proc->todo);
c1dd125f 2819 } else {
3fb0a5b6 2820 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
c1dd125f 2821 }
1200efe9 2822
3fb0a5b6 2823 if (!pending_async)
1200efe9
MC
2824 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2825
2826 binder_inner_proc_unlock(proc);
2827 binder_node_unlock(node);
2828
2829 return true;
2830}
2831
361f527d
TK
2832/**
2833 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2834 * @node: struct binder_node for which to get refs
2835 * @proc: returns @node->proc if valid
2836 * @error: if no @proc then returns BR_DEAD_REPLY
2837 *
2838 * User-space normally keeps the node alive when creating a transaction
2839 * since it has a reference to the target. The local strong ref keeps it
2840 * alive if the sending process dies before the target process processes
2841 * the transaction. If the source process is malicious or has a reference
2842 * counting bug, relying on the local strong ref can fail.
2843 *
2844 * Since user-space can cause the local strong ref to go away, we also take
2845 * a tmpref on the node to ensure it survives while we are constructing
2846 * the transaction. We also need a tmpref on the proc while we are
2847 * constructing the transaction, so we take that here as well.
2848 *
2849 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2850 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2851 * target proc has died, @error is set to BR_DEAD_REPLY
2852 */
2853static struct binder_node *binder_get_node_refs_for_txn(
2854 struct binder_node *node,
2855 struct binder_proc **procp,
2856 uint32_t *error)
2857{
2858 struct binder_node *target_node = NULL;
2859
2860 binder_node_inner_lock(node);
2861 if (node->proc) {
2862 target_node = node;
2863 binder_inc_node_nilocked(node, 1, 0, NULL);
2864 binder_inc_node_tmpref_ilocked(node);
2865 node->proc->tmp_ref++;
2866 *procp = node->proc;
2867 } else
2868 *error = BR_DEAD_REPLY;
2869 binder_node_inner_unlock(node);
2870
2871 return target_node;
2872}
2873
355b0502
GKH
2874static void binder_transaction(struct binder_proc *proc,
2875 struct binder_thread *thread,
d18dbf2f
MC
2876 struct binder_transaction_data *tr, int reply,
2877 binder_size_t extra_buffers_size)
355b0502 2878{
b4bdab80 2879 int ret;
355b0502
GKH
2880 struct binder_transaction *t;
2881 struct binder_work *tcomplete;
e884979e 2882 binder_size_t *offp, *off_end, *off_start;
e71dbe5a 2883 binder_size_t off_min;
e884979e 2884 u8 *sg_bufp, *sg_buf_end;
82f6ad88 2885 struct binder_proc *target_proc = NULL;
355b0502
GKH
2886 struct binder_thread *target_thread = NULL;
2887 struct binder_node *target_node = NULL;
355b0502
GKH
2888 struct binder_transaction *in_reply_to = NULL;
2889 struct binder_transaction_log_entry *e;
dfc99565
TK
2890 uint32_t return_error = 0;
2891 uint32_t return_error_param = 0;
2892 uint32_t return_error_line = 0;
e884979e
MC
2893 struct binder_buffer_object *last_fixup_obj = NULL;
2894 binder_size_t last_fixup_min_off = 0;
913926d0 2895 struct binder_context *context = proc->context;
7d94b2c7 2896 int t_debug_id = atomic_inc_return(&binder_last_id);
355b0502 2897
7eac155d 2898 e = binder_transaction_log_add(&binder_transaction_log);
7d94b2c7 2899 e->debug_id = t_debug_id;
355b0502
GKH
2900 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2901 e->from_proc = proc->pid;
2902 e->from_thread = thread->pid;
2903 e->target_handle = tr->target.handle;
2904 e->data_size = tr->data_size;
2905 e->offsets_size = tr->offsets_size;
a64af2cf 2906 e->context_name = proc->context->name;
355b0502
GKH
2907
2908 if (reply) {
a45c586f 2909 binder_inner_proc_lock(proc);
355b0502
GKH
2910 in_reply_to = thread->transaction_stack;
2911 if (in_reply_to == NULL) {
a45c586f 2912 binder_inner_proc_unlock(proc);
56b468fc 2913 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2914 proc->pid, thread->pid);
2915 return_error = BR_FAILED_REPLY;
dfc99565
TK
2916 return_error_param = -EPROTO;
2917 return_error_line = __LINE__;
355b0502
GKH
2918 goto err_empty_call_stack;
2919 }
355b0502 2920 if (in_reply_to->to_thread != thread) {
82f6ad88 2921 spin_lock(&in_reply_to->lock);
56b468fc 2922 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2923 proc->pid, thread->pid, in_reply_to->debug_id,
2924 in_reply_to->to_proc ?
2925 in_reply_to->to_proc->pid : 0,
2926 in_reply_to->to_thread ?
2927 in_reply_to->to_thread->pid : 0);
82f6ad88 2928 spin_unlock(&in_reply_to->lock);
a45c586f 2929 binder_inner_proc_unlock(proc);
355b0502 2930 return_error = BR_FAILED_REPLY;
dfc99565
TK
2931 return_error_param = -EPROTO;
2932 return_error_line = __LINE__;
355b0502
GKH
2933 in_reply_to = NULL;
2934 goto err_bad_call_stack;
2935 }
2936 thread->transaction_stack = in_reply_to->to_parent;
a45c586f 2937 binder_inner_proc_unlock(proc);
a45c586f 2938 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
355b0502
GKH
2939 if (target_thread == NULL) {
2940 return_error = BR_DEAD_REPLY;
dfc99565 2941 return_error_line = __LINE__;
355b0502
GKH
2942 goto err_dead_binder;
2943 }
2944 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2945 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2946 proc->pid, thread->pid,
2947 target_thread->transaction_stack ?
2948 target_thread->transaction_stack->debug_id : 0,
2949 in_reply_to->debug_id);
a45c586f 2950 binder_inner_proc_unlock(target_thread->proc);
355b0502 2951 return_error = BR_FAILED_REPLY;
dfc99565
TK
2952 return_error_param = -EPROTO;
2953 return_error_line = __LINE__;
355b0502
GKH
2954 in_reply_to = NULL;
2955 target_thread = NULL;
2956 goto err_dead_binder;
2957 }
2958 target_proc = target_thread->proc;
82f6ad88 2959 target_proc->tmp_ref++;
a45c586f 2960 binder_inner_proc_unlock(target_thread->proc);
355b0502
GKH
2961 } else {
2962 if (tr->target.handle) {
2963 struct binder_ref *ref;
01c8cbde 2964
0b62b556
TK
2965 /*
2966 * There must already be a strong ref
2967 * on this node. If so, do a strong
2968 * increment on the node to ensure it
2969 * stays alive until the transaction is
2970 * done.
2971 */
5717118f
TK
2972 binder_proc_lock(proc);
2973 ref = binder_get_ref_olocked(proc, tr->target.handle,
2974 true);
0b62b556 2975 if (ref) {
361f527d
TK
2976 target_node = binder_get_node_refs_for_txn(
2977 ref->node, &target_proc,
2978 &return_error);
2979 } else {
56b468fc 2980 binder_user_error("%d:%d got transaction to invalid handle\n",
361f527d 2981 proc->pid, thread->pid);
355b0502 2982 return_error = BR_FAILED_REPLY;
355b0502 2983 }
361f527d 2984 binder_proc_unlock(proc);
355b0502 2985 } else {
76341f6f 2986 mutex_lock(&context->context_mgr_node_lock);
913926d0 2987 target_node = context->binder_context_mgr_node;
361f527d
TK
2988 if (target_node)
2989 target_node = binder_get_node_refs_for_txn(
2990 target_node, &target_proc,
2991 &return_error);
2992 else
355b0502 2993 return_error = BR_DEAD_REPLY;
76341f6f 2994 mutex_unlock(&context->context_mgr_node_lock);
355b0502 2995 }
361f527d
TK
2996 if (!target_node) {
2997 /*
2998 * return_error is set above
2999 */
3000 return_error_param = -EINVAL;
dfc99565 3001 return_error_line = __LINE__;
355b0502
GKH
3002 goto err_dead_binder;
3003 }
361f527d 3004 e->to_node = target_node->debug_id;
82f6ad88
TK
3005 if (security_binder_transaction(proc->tsk,
3006 target_proc->tsk) < 0) {
3b090f5e 3007 return_error = BR_FAILED_REPLY;
dfc99565
TK
3008 return_error_param = -EPERM;
3009 return_error_line = __LINE__;
3b090f5e
S
3010 goto err_invalid_target_handle;
3011 }
a45c586f 3012 binder_inner_proc_lock(proc);
355b0502
GKH
3013 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3014 struct binder_transaction *tmp;
01c8cbde 3015
355b0502
GKH
3016 tmp = thread->transaction_stack;
3017 if (tmp->to_thread != thread) {
82f6ad88 3018 spin_lock(&tmp->lock);
56b468fc 3019 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
3020 proc->pid, thread->pid, tmp->debug_id,
3021 tmp->to_proc ? tmp->to_proc->pid : 0,
3022 tmp->to_thread ?
3023 tmp->to_thread->pid : 0);
82f6ad88 3024 spin_unlock(&tmp->lock);
a45c586f 3025 binder_inner_proc_unlock(proc);
355b0502 3026 return_error = BR_FAILED_REPLY;
dfc99565
TK
3027 return_error_param = -EPROTO;
3028 return_error_line = __LINE__;
355b0502
GKH
3029 goto err_bad_call_stack;
3030 }
3031 while (tmp) {
82f6ad88
TK
3032 struct binder_thread *from;
3033
3034 spin_lock(&tmp->lock);
3035 from = tmp->from;
3036 if (from && from->proc == target_proc) {
3037 atomic_inc(&from->tmp_ref);
3038 target_thread = from;
3039 spin_unlock(&tmp->lock);
3040 break;
3041 }
3042 spin_unlock(&tmp->lock);
355b0502
GKH
3043 tmp = tmp->from_parent;
3044 }
3045 }
a45c586f 3046 binder_inner_proc_unlock(proc);
355b0502 3047 }
1200efe9 3048 if (target_thread)
355b0502 3049 e->to_thread = target_thread->pid;
355b0502
GKH
3050 e->to_proc = target_proc->pid;
3051
3052 /* TODO: reuse incoming transaction for reply */
d31f0c40 3053 t = kzalloc(sizeof(*t), GFP_KERNEL);
355b0502
GKH
3054 if (t == NULL) {
3055 return_error = BR_FAILED_REPLY;
dfc99565
TK
3056 return_error_param = -ENOMEM;
3057 return_error_line = __LINE__;
355b0502
GKH
3058 goto err_alloc_t_failed;
3059 }
3060 binder_stats_created(BINDER_STAT_TRANSACTION);
82f6ad88 3061 spin_lock_init(&t->lock);
355b0502 3062
d31f0c40 3063 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
355b0502
GKH
3064 if (tcomplete == NULL) {
3065 return_error = BR_FAILED_REPLY;
dfc99565
TK
3066 return_error_param = -ENOMEM;
3067 return_error_line = __LINE__;
355b0502
GKH
3068 goto err_alloc_tcomplete_failed;
3069 }
3070 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3071
7d94b2c7 3072 t->debug_id = t_debug_id;
355b0502
GKH
3073
3074 if (reply)
3075 binder_debug(BINDER_DEBUG_TRANSACTION,
d18dbf2f 3076 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
3077 proc->pid, thread->pid, t->debug_id,
3078 target_proc->pid, target_thread->pid,
9c0a1d02
AH
3079 (u64)tr->data.ptr.buffer,
3080 (u64)tr->data.ptr.offsets,
d18dbf2f
MC
3081 (u64)tr->data_size, (u64)tr->offsets_size,
3082 (u64)extra_buffers_size);
355b0502
GKH
3083 else
3084 binder_debug(BINDER_DEBUG_TRANSACTION,
d18dbf2f 3085 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
3086 proc->pid, thread->pid, t->debug_id,
3087 target_proc->pid, target_node->debug_id,
9c0a1d02
AH
3088 (u64)tr->data.ptr.buffer,
3089 (u64)tr->data.ptr.offsets,
d18dbf2f
MC
3090 (u64)tr->data_size, (u64)tr->offsets_size,
3091 (u64)extra_buffers_size);
355b0502
GKH
3092
3093 if (!reply && !(tr->flags & TF_ONE_WAY))
3094 t->from = thread;
3095 else
3096 t->from = NULL;
1da696b1 3097 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
3098 t->to_proc = target_proc;
3099 t->to_thread = target_thread;
3100 t->code = tr->code;
3101 t->flags = tr->flags;
425bb8fc
MC
3102 if (!(t->flags & TF_ONE_WAY) &&
3103 binder_supported_policy(current->policy)) {
3104 /* Inherit supported policies for synchronous transactions */
3105 t->priority.sched_policy = current->policy;
3106 t->priority.prio = current->normal_prio;
3107 } else {
3108 /* Otherwise, fall back to the default priority */
3109 t->priority = target_proc->default_priority;
3110 }
975a1ac9
AH
3111
3112 trace_binder_transaction(reply, t, target_node);
3113
69c33bb1 3114 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
d18dbf2f
MC
3115 tr->offsets_size, extra_buffers_size,
3116 !reply && (t->flags & TF_ONE_WAY));
dfc99565
TK
3117 if (IS_ERR(t->buffer)) {
3118 /*
3119 * -ESRCH indicates VMA cleared. The target is dying.
3120 */
3121 return_error_param = PTR_ERR(t->buffer);
3122 return_error = return_error_param == -ESRCH ?
3123 BR_DEAD_REPLY : BR_FAILED_REPLY;
3124 return_error_line = __LINE__;
3125 t->buffer = NULL;
355b0502
GKH
3126 goto err_binder_alloc_buf_failed;
3127 }
3128 t->buffer->allow_user_free = 0;
3129 t->buffer->debug_id = t->debug_id;
3130 t->buffer->transaction = t;
3131 t->buffer->target_node = target_node;
975a1ac9 3132 trace_binder_transaction_alloc_buf(t->buffer);
e884979e
MC
3133 off_start = (binder_size_t *)(t->buffer->data +
3134 ALIGN(tr->data_size, sizeof(void *)));
3135 offp = off_start;
355b0502 3136
d31f0c40 3137 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
9c0a1d02 3138 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
3139 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3140 proc->pid, thread->pid);
355b0502 3141 return_error = BR_FAILED_REPLY;
dfc99565
TK
3142 return_error_param = -EFAULT;
3143 return_error_line = __LINE__;
355b0502
GKH
3144 goto err_copy_data_failed;
3145 }
d31f0c40 3146 if (copy_from_user(offp, (const void __user *)(uintptr_t)
9c0a1d02 3147 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
3148 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3149 proc->pid, thread->pid);
355b0502 3150 return_error = BR_FAILED_REPLY;
dfc99565
TK
3151 return_error_param = -EFAULT;
3152 return_error_line = __LINE__;
355b0502
GKH
3153 goto err_copy_data_failed;
3154 }
9c0a1d02
AH
3155 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3156 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3157 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 3158 return_error = BR_FAILED_REPLY;
dfc99565
TK
3159 return_error_param = -EINVAL;
3160 return_error_line = __LINE__;
355b0502
GKH
3161 goto err_bad_offset;
3162 }
e884979e
MC
3163 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3164 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3165 proc->pid, thread->pid,
3166 extra_buffers_size);
3167 return_error = BR_FAILED_REPLY;
dfc99565
TK
3168 return_error_param = -EINVAL;
3169 return_error_line = __LINE__;
e884979e
MC
3170 goto err_bad_offset;
3171 }
3172 off_end = (void *)off_start + tr->offsets_size;
3173 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3174 sg_buf_end = sg_bufp + extra_buffers_size;
e71dbe5a 3175 off_min = 0;
355b0502 3176 for (; offp < off_end; offp++) {
17f2cab3
MC
3177 struct binder_object_header *hdr;
3178 size_t object_size = binder_validate_object(t->buffer, *offp);
01c8cbde 3179
17f2cab3
MC
3180 if (object_size == 0 || *offp < off_min) {
3181 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
e71dbe5a
AH
3182 proc->pid, thread->pid, (u64)*offp,
3183 (u64)off_min,
17f2cab3 3184 (u64)t->buffer->data_size);
355b0502 3185 return_error = BR_FAILED_REPLY;
dfc99565
TK
3186 return_error_param = -EINVAL;
3187 return_error_line = __LINE__;
355b0502
GKH
3188 goto err_bad_offset;
3189 }
17f2cab3
MC
3190
3191 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3192 off_min = *offp + object_size;
3193 switch (hdr->type) {
355b0502
GKH
3194 case BINDER_TYPE_BINDER:
3195 case BINDER_TYPE_WEAK_BINDER: {
17f2cab3 3196 struct flat_binder_object *fp;
01c8cbde 3197
17f2cab3 3198 fp = to_flat_binder_object(hdr);
b4bdab80
MC
3199 ret = binder_translate_binder(fp, t, thread);
3200 if (ret < 0) {
355b0502 3201 return_error = BR_FAILED_REPLY;
dfc99565
TK
3202 return_error_param = ret;
3203 return_error_line = __LINE__;
b4bdab80 3204 goto err_translate_failed;
355b0502 3205 }
355b0502
GKH
3206 } break;
3207 case BINDER_TYPE_HANDLE:
3208 case BINDER_TYPE_WEAK_HANDLE: {
17f2cab3 3209 struct flat_binder_object *fp;
01c8cbde 3210
17f2cab3 3211 fp = to_flat_binder_object(hdr);
b4bdab80
MC
3212 ret = binder_translate_handle(fp, t, thread);
3213 if (ret < 0) {
3b090f5e 3214 return_error = BR_FAILED_REPLY;
dfc99565
TK
3215 return_error_param = ret;
3216 return_error_line = __LINE__;
b4bdab80 3217 goto err_translate_failed;
355b0502
GKH
3218 }
3219 } break;
3220
3221 case BINDER_TYPE_FD: {
17f2cab3 3222 struct binder_fd_object *fp = to_binder_fd_object(hdr);
b4bdab80
MC
3223 int target_fd = binder_translate_fd(fp->fd, t, thread,
3224 in_reply_to);
355b0502 3225
355b0502 3226 if (target_fd < 0) {
355b0502 3227 return_error = BR_FAILED_REPLY;
dfc99565
TK
3228 return_error_param = target_fd;
3229 return_error_line = __LINE__;
b4bdab80 3230 goto err_translate_failed;
355b0502 3231 }
17f2cab3
MC
3232 fp->pad_binder = 0;
3233 fp->fd = target_fd;
355b0502 3234 } break;
2f82efbc
MC
3235 case BINDER_TYPE_FDA: {
3236 struct binder_fd_array_object *fda =
3237 to_binder_fd_array_object(hdr);
3238 struct binder_buffer_object *parent =
3239 binder_validate_ptr(t->buffer, fda->parent,
3240 off_start,
3241 offp - off_start);
3242 if (!parent) {
3243 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3244 proc->pid, thread->pid);
3245 return_error = BR_FAILED_REPLY;
dfc99565
TK
3246 return_error_param = -EINVAL;
3247 return_error_line = __LINE__;
2f82efbc
MC
3248 goto err_bad_parent;
3249 }
3250 if (!binder_validate_fixup(t->buffer, off_start,
3251 parent, fda->parent_offset,
3252 last_fixup_obj,
3253 last_fixup_min_off)) {
3254 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3255 proc->pid, thread->pid);
3256 return_error = BR_FAILED_REPLY;
dfc99565
TK
3257 return_error_param = -EINVAL;
3258 return_error_line = __LINE__;
2f82efbc
MC
3259 goto err_bad_parent;
3260 }
3261 ret = binder_translate_fd_array(fda, parent, t, thread,
3262 in_reply_to);
3263 if (ret < 0) {
3264 return_error = BR_FAILED_REPLY;
dfc99565
TK
3265 return_error_param = ret;
3266 return_error_line = __LINE__;
2f82efbc
MC
3267 goto err_translate_failed;
3268 }
3269 last_fixup_obj = parent;
3270 last_fixup_min_off =
3271 fda->parent_offset + sizeof(u32) * fda->num_fds;
3272 } break;
e884979e
MC
3273 case BINDER_TYPE_PTR: {
3274 struct binder_buffer_object *bp =
3275 to_binder_buffer_object(hdr);
3276 size_t buf_left = sg_buf_end - sg_bufp;
3277
3278 if (bp->length > buf_left) {
3279 binder_user_error("%d:%d got transaction with too large buffer\n",
3280 proc->pid, thread->pid);
3281 return_error = BR_FAILED_REPLY;
dfc99565
TK
3282 return_error_param = -EINVAL;
3283 return_error_line = __LINE__;
e884979e
MC
3284 goto err_bad_offset;
3285 }
c9f9a1ad
DW
3286 if (copy_from_user(sg_bufp,
3287 (const void __user *)(uintptr_t)
3288 bp->buffer, bp->length)) {
e884979e
MC
3289 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3290 proc->pid, thread->pid);
dfc99565 3291 return_error_param = -EFAULT;
e884979e 3292 return_error = BR_FAILED_REPLY;
dfc99565 3293 return_error_line = __LINE__;
e884979e
MC
3294 goto err_copy_data_failed;
3295 }
3296 /* Fixup buffer pointer to target proc address space */
3297 bp->buffer = (uintptr_t)sg_bufp +
69c33bb1
TK
3298 binder_alloc_get_user_buffer_offset(
3299 &target_proc->alloc);
e884979e
MC
3300 sg_bufp += ALIGN(bp->length, sizeof(u64));
3301
3302 ret = binder_fixup_parent(t, thread, bp, off_start,
3303 offp - off_start,
3304 last_fixup_obj,
3305 last_fixup_min_off);
3306 if (ret < 0) {
3307 return_error = BR_FAILED_REPLY;
dfc99565
TK
3308 return_error_param = ret;
3309 return_error_line = __LINE__;
e884979e
MC
3310 goto err_translate_failed;
3311 }
3312 last_fixup_obj = bp;
3313 last_fixup_min_off = 0;
3314 } break;
355b0502 3315 default:
3c2a0909 3316 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
17f2cab3 3317 proc->pid, thread->pid, hdr->type);
355b0502 3318 return_error = BR_FAILED_REPLY;
dfc99565
TK
3319 return_error_param = -EINVAL;
3320 return_error_line = __LINE__;
355b0502
GKH
3321 goto err_bad_object_type;
3322 }
3323 }
531e255c 3324 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
25de59a5 3325 t->work.type = BINDER_WORK_TRANSACTION;
531e255c 3326
355b0502 3327 if (reply) {
3fb0a5b6 3328 binder_enqueue_thread_work(thread, tcomplete);
a45c586f
MC
3329 binder_inner_proc_lock(target_proc);
3330 if (target_thread->is_dead) {
3331 binder_inner_proc_unlock(target_proc);
82f6ad88 3332 goto err_dead_proc_or_thread;
a45c586f 3333 }
355b0502 3334 BUG_ON(t->buffer->async_transaction != 0);
a45c586f 3335 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3fb0a5b6 3336 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
a45c586f 3337 binder_inner_proc_unlock(target_proc);
1200efe9 3338 wake_up_interruptible_sync(&target_thread->wait);
dadba0f5 3339 binder_restore_priority(current, in_reply_to->saved_priority);
7323366b 3340 binder_free_transaction(in_reply_to);
355b0502
GKH
3341 } else if (!(t->flags & TF_ONE_WAY)) {
3342 BUG_ON(t->buffer->async_transaction != 0);
a45c586f 3343 binder_inner_proc_lock(proc);
b8b45e4d
MC
3344 /*
3345 * Defer the TRANSACTION_COMPLETE, so we don't return to
3346 * userspace immediately; this allows the target process to
3347 * immediately start processing this transaction, reducing
3348 * latency. We will then return the TRANSACTION_COMPLETE when
3349 * the target replies (or there is an error).
3350 */
3351 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
355b0502
GKH
3352 t->need_reply = 1;
3353 t->from_parent = thread->transaction_stack;
3354 thread->transaction_stack = t;
a45c586f 3355 binder_inner_proc_unlock(proc);
1200efe9 3356 if (!binder_proc_transaction(t, target_proc, target_thread)) {
a45c586f
MC
3357 binder_inner_proc_lock(proc);
3358 binder_pop_transaction_ilocked(thread, t);
3359 binder_inner_proc_unlock(proc);
82f6ad88
TK
3360 goto err_dead_proc_or_thread;
3361 }
355b0502
GKH
3362 } else {
3363 BUG_ON(target_node == NULL);
3364 BUG_ON(t->buffer->async_transaction != 1);
3fb0a5b6 3365 binder_enqueue_thread_work(thread, tcomplete);
1200efe9 3366 if (!binder_proc_transaction(t, target_proc, NULL))
82f6ad88 3367 goto err_dead_proc_or_thread;
d999e705 3368 }
82f6ad88
TK
3369 if (target_thread)
3370 binder_thread_dec_tmpref(target_thread);
3371 binder_proc_dec_tmpref(target_proc);
361f527d
TK
3372 if (target_node)
3373 binder_dec_node_tmpref(target_node);
7d94b2c7
TK
3374 /*
3375 * write barrier to synchronize with initialization
3376 * of log entry
3377 */
3378 smp_wmb();
3379 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
3380 return;
3381
82f6ad88
TK
3382err_dead_proc_or_thread:
3383 return_error = BR_DEAD_REPLY;
3384 return_error_line = __LINE__;
41fad392 3385 binder_dequeue_work(proc, tcomplete);
b4bdab80 3386err_translate_failed:
355b0502
GKH
3387err_bad_object_type:
3388err_bad_offset:
2f82efbc 3389err_bad_parent:
355b0502 3390err_copy_data_failed:
975a1ac9 3391 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 3392 binder_transaction_buffer_release(target_proc, t->buffer, offp);
361f527d
TK
3393 if (target_node)
3394 binder_dec_node_tmpref(target_node);
0b62b556 3395 target_node = NULL;
355b0502 3396 t->buffer->transaction = NULL;
69c33bb1 3397 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502
GKH
3398err_binder_alloc_buf_failed:
3399 kfree(tcomplete);
3400 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3401err_alloc_tcomplete_failed:
3402 kfree(t);
3403 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3404err_alloc_t_failed:
3405err_bad_call_stack:
3406err_empty_call_stack:
3407err_dead_binder:
3408err_invalid_target_handle:
82f6ad88
TK
3409 if (target_thread)
3410 binder_thread_dec_tmpref(target_thread);
3411 if (target_proc)
3412 binder_proc_dec_tmpref(target_proc);
361f527d 3413 if (target_node) {
0b62b556 3414 binder_dec_node(target_node, 1, 0);
361f527d
TK
3415 binder_dec_node_tmpref(target_node);
3416 }
0b62b556 3417
355b0502 3418 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
dfc99565
TK
3419 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3420 proc->pid, thread->pid, return_error, return_error_param,
3421 (u64)tr->data_size, (u64)tr->offsets_size,
3422 return_error_line);
355b0502
GKH
3423
3424 {
3425 struct binder_transaction_log_entry *fe;
01c8cbde 3426
dfc99565
TK
3427 e->return_error = return_error;
3428 e->return_error_param = return_error_param;
3429 e->return_error_line = return_error_line;
7eac155d 3430 fe = binder_transaction_log_add(&binder_transaction_log_failed);
355b0502 3431 *fe = *e;
7d94b2c7
TK
3432 /*
3433 * write barrier to synchronize with initialization
3434 * of log entry
3435 */
3436 smp_wmb();
3437 WRITE_ONCE(e->debug_id_done, t_debug_id);
3438 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
3439 }
3440
795aa6bc 3441 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 3442 if (in_reply_to) {
dadba0f5 3443 binder_restore_priority(current, in_reply_to->saved_priority);
795aa6bc 3444 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3fb0a5b6 3445 binder_enqueue_thread_work(thread, &thread->return_error.work);
355b0502 3446 binder_send_failed_reply(in_reply_to, return_error);
795aa6bc
TK
3447 } else {
3448 thread->return_error.cmd = return_error;
3fb0a5b6 3449 binder_enqueue_thread_work(thread, &thread->return_error.work);
795aa6bc 3450 }
355b0502
GKH
3451}
3452
9c0a1d02
AH
3453int binder_thread_write(struct binder_proc *proc,
3454 struct binder_thread *thread,
3455 binder_uintptr_t binder_buffer, size_t size,
3456 binder_size_t *consumed)
355b0502
GKH
3457{
3458 uint32_t cmd;
913926d0 3459 struct binder_context *context = proc->context;
9c0a1d02 3460 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3461 void __user *ptr = buffer + *consumed;
3462 void __user *end = buffer + size;
3463
795aa6bc 3464 while (ptr < end && thread->return_error.cmd == BR_OK) {
bc65c39a
TK
3465 int ret;
3466
d31f0c40 3467 if (get_user(cmd, (uint32_t __user *)ptr))
355b0502
GKH
3468 return -EFAULT;
3469 ptr += sizeof(uint32_t);
975a1ac9 3470 trace_binder_command(cmd);
7eac155d 3471 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
340b5f86
BJS
3472 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3473 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3474 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
3475 }
3476 switch (cmd) {
3477 case BC_INCREFS:
3478 case BC_ACQUIRE:
3479 case BC_RELEASE:
3480 case BC_DECREFS: {
3481 uint32_t target;
355b0502 3482 const char *debug_string;
bc65c39a
TK
3483 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3484 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3485 struct binder_ref_data rdata;
355b0502 3486
d31f0c40 3487 if (get_user(target, (uint32_t __user *)ptr))
355b0502 3488 return -EFAULT;
76341f6f 3489
355b0502 3490 ptr += sizeof(uint32_t);
bc65c39a
TK
3491 ret = -1;
3492 if (increment && !target) {
76341f6f 3493 struct binder_node *ctx_mgr_node;
76341f6f
TK
3494 mutex_lock(&context->context_mgr_node_lock);
3495 ctx_mgr_node = context->binder_context_mgr_node;
bc65c39a
TK
3496 if (ctx_mgr_node)
3497 ret = binder_inc_ref_for_node(
3498 proc, ctx_mgr_node,
3499 strong, NULL, &rdata);
76341f6f
TK
3500 mutex_unlock(&context->context_mgr_node_lock);
3501 }
bc65c39a
TK
3502 if (ret)
3503 ret = binder_update_ref_for_handle(
3504 proc, target, increment, strong,
3505 &rdata);
3506 if (!ret && rdata.desc != target) {
3507 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3508 proc->pid, thread->pid,
3509 target, rdata.desc);
355b0502
GKH
3510 }
3511 switch (cmd) {
3512 case BC_INCREFS:
3513 debug_string = "IncRefs";
355b0502
GKH
3514 break;
3515 case BC_ACQUIRE:
3516 debug_string = "Acquire";
355b0502
GKH
3517 break;
3518 case BC_RELEASE:
3519 debug_string = "Release";
355b0502
GKH
3520 break;
3521 case BC_DECREFS:
3522 default:
3523 debug_string = "DecRefs";
bc65c39a
TK
3524 break;
3525 }
3526 if (ret) {
3527 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3528 proc->pid, thread->pid, debug_string,
3529 strong, target, ret);
355b0502
GKH
3530 break;
3531 }
354ac4b4 3532 binder_debug(BINDER_DEBUG_USER_REFS,
bc65c39a
TK
3533 "%d:%d %s ref %d desc %d s %d w %d\n",
3534 proc->pid, thread->pid, debug_string,
3535 rdata.debug_id, rdata.desc, rdata.strong,
3536 rdata.weak);
8f35edf4 3537 break;
355b0502
GKH
3538 }
3539 case BC_INCREFS_DONE:
3540 case BC_ACQUIRE_DONE: {
9c0a1d02
AH
3541 binder_uintptr_t node_ptr;
3542 binder_uintptr_t cookie;
355b0502 3543 struct binder_node *node;
25de59a5 3544 bool free_node;
355b0502 3545
d31f0c40 3546 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3547 return -EFAULT;
9c0a1d02 3548 ptr += sizeof(binder_uintptr_t);
d31f0c40 3549 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3550 return -EFAULT;
9c0a1d02 3551 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
3552 node = binder_get_node(proc, node_ptr);
3553 if (node == NULL) {
9c0a1d02 3554 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
3555 proc->pid, thread->pid,
3556 cmd == BC_INCREFS_DONE ?
3557 "BC_INCREFS_DONE" :
3558 "BC_ACQUIRE_DONE",
9c0a1d02 3559 (u64)node_ptr);
355b0502
GKH
3560 break;
3561 }
3562 if (cookie != node->cookie) {
9c0a1d02 3563 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
3564 proc->pid, thread->pid,
3565 cmd == BC_INCREFS_DONE ?
3566 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
9c0a1d02
AH
3567 (u64)node_ptr, node->debug_id,
3568 (u64)cookie, (u64)node->cookie);
9607bf70 3569 binder_put_node(node);
355b0502
GKH
3570 break;
3571 }
25de59a5 3572 binder_node_inner_lock(node);
355b0502
GKH
3573 if (cmd == BC_ACQUIRE_DONE) {
3574 if (node->pending_strong_ref == 0) {
56b468fc 3575 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
3576 proc->pid, thread->pid,
3577 node->debug_id);
25de59a5 3578 binder_node_inner_unlock(node);
9607bf70 3579 binder_put_node(node);
355b0502
GKH
3580 break;
3581 }
3582 node->pending_strong_ref = 0;
3583 } else {
3584 if (node->pending_weak_ref == 0) {
56b468fc 3585 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
3586 proc->pid, thread->pid,
3587 node->debug_id);
25de59a5 3588 binder_node_inner_unlock(node);
9607bf70 3589 binder_put_node(node);
355b0502
GKH
3590 break;
3591 }
3592 node->pending_weak_ref = 0;
3593 }
25de59a5
TK
3594 free_node = binder_dec_node_nilocked(node,
3595 cmd == BC_ACQUIRE_DONE, 0);
3596 WARN_ON(free_node);
355b0502 3597 binder_debug(BINDER_DEBUG_USER_REFS,
9607bf70 3598 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
3599 proc->pid, thread->pid,
3600 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
9607bf70
TK
3601 node->debug_id, node->local_strong_refs,
3602 node->local_weak_refs, node->tmp_refs);
25de59a5 3603 binder_node_inner_unlock(node);
9607bf70 3604 binder_put_node(node);
355b0502
GKH
3605 break;
3606 }
3607 case BC_ATTEMPT_ACQUIRE:
56b468fc 3608 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
3609 return -EINVAL;
3610 case BC_ACQUIRE_RESULT:
56b468fc 3611 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
3612 return -EINVAL;
3613
3614 case BC_FREE_BUFFER: {
9c0a1d02 3615 binder_uintptr_t data_ptr;
355b0502
GKH
3616 struct binder_buffer *buffer;
3617
d31f0c40 3618 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3619 return -EFAULT;
9c0a1d02 3620 ptr += sizeof(binder_uintptr_t);
355b0502 3621
382658e8
TK
3622 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3623 data_ptr);
355b0502 3624 if (buffer == NULL) {
9c0a1d02
AH
3625 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
3626 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3627 break;
3628 }
3629 if (!buffer->allow_user_free) {
9c0a1d02
AH
3630 binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
3631 proc->pid, thread->pid, (u64)data_ptr);
355b0502
GKH
3632 break;
3633 }
3634 binder_debug(BINDER_DEBUG_FREE_BUFFER,
9c0a1d02
AH
3635 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3636 proc->pid, thread->pid, (u64)data_ptr,
3637 buffer->debug_id,
355b0502
GKH
3638 buffer->transaction ? "active" : "finished");
3639
3640 if (buffer->transaction) {
3641 buffer->transaction->buffer = NULL;
3642 buffer->transaction = NULL;
3643 }
3644 if (buffer->async_transaction && buffer->target_node) {
aad8732c
TK
3645 struct binder_node *buf_node;
3646 struct binder_work *w;
3647
3648 buf_node = buffer->target_node;
25de59a5 3649 binder_node_inner_lock(buf_node);
aad8732c
TK
3650 BUG_ON(!buf_node->has_async_transaction);
3651 BUG_ON(buf_node->proc != proc);
aad8732c
TK
3652 w = binder_dequeue_work_head_ilocked(
3653 &buf_node->async_todo);
9b9577b0 3654 if (!w) {
aad8732c 3655 buf_node->has_async_transaction = 0;
9b9577b0 3656 } else {
aad8732c 3657 binder_enqueue_work_ilocked(
9b9577b0
MC
3658 w, &proc->todo);
3659 binder_wakeup_proc_ilocked(proc);
3660 }
25de59a5 3661 binder_node_inner_unlock(buf_node);
355b0502 3662 }
975a1ac9 3663 trace_binder_transaction_buffer_release(buffer);
355b0502 3664 binder_transaction_buffer_release(proc, buffer, NULL);
69c33bb1 3665 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
3666 break;
3667 }
3668
e884979e
MC
3669 case BC_TRANSACTION_SG:
3670 case BC_REPLY_SG: {
3671 struct binder_transaction_data_sg tr;
3672
c9f9a1ad 3673 if (copy_from_user(&tr, ptr, sizeof(tr)))
e884979e
MC
3674 return -EFAULT;
3675 ptr += sizeof(tr);
3676 binder_transaction(proc, thread, &tr.transaction_data,
3677 cmd == BC_REPLY_SG, tr.buffers_size);
3678 break;
3679 }
355b0502
GKH
3680 case BC_TRANSACTION:
3681 case BC_REPLY: {
3682 struct binder_transaction_data tr;
3683
d31f0c40 3684 if (copy_from_user(&tr, ptr, sizeof(tr)))
355b0502
GKH
3685 return -EFAULT;
3686 ptr += sizeof(tr);
d18dbf2f
MC
3687 binder_transaction(proc, thread, &tr,
3688 cmd == BC_REPLY, 0);
355b0502
GKH
3689 break;
3690 }
3691
3692 case BC_REGISTER_LOOPER:
3693 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3694 "%d:%d BC_REGISTER_LOOPER\n",
355b0502 3695 proc->pid, thread->pid);
e09d7b9a 3696 binder_inner_proc_lock(proc);
355b0502
GKH
3697 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3698 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3699 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
3700 proc->pid, thread->pid);
3701 } else if (proc->requested_threads == 0) {
3702 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3703 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
3704 proc->pid, thread->pid);
3705 } else {
3706 proc->requested_threads--;
3707 proc->requested_threads_started++;
3708 }
3709 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
e09d7b9a 3710 binder_inner_proc_unlock(proc);
355b0502
GKH
3711 break;
3712 case BC_ENTER_LOOPER:
3713 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3714 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
3715 proc->pid, thread->pid);
3716 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3717 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3718 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
3719 proc->pid, thread->pid);
3720 }
3721 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3722 break;
3723 case BC_EXIT_LOOPER:
3724 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3725 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
3726 proc->pid, thread->pid);
3727 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3728 break;
3729
3730 case BC_REQUEST_DEATH_NOTIFICATION:
3731 case BC_CLEAR_DEATH_NOTIFICATION: {
3732 uint32_t target;
9c0a1d02 3733 binder_uintptr_t cookie;
355b0502 3734 struct binder_ref *ref;
5717118f 3735 struct binder_ref_death *death = NULL;
355b0502 3736
d31f0c40 3737 if (get_user(target, (uint32_t __user *)ptr))
355b0502
GKH
3738 return -EFAULT;
3739 ptr += sizeof(uint32_t);
d31f0c40 3740 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3741 return -EFAULT;
9c0a1d02 3742 ptr += sizeof(binder_uintptr_t);
5717118f
TK
3743 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3744 /*
3745 * Allocate memory for death notification
3746 * before taking lock
3747 */
3748 death = kzalloc(sizeof(*death), GFP_KERNEL);
3749 if (death == NULL) {
3750 WARN_ON(thread->return_error.cmd !=
3751 BR_OK);
3752 thread->return_error.cmd = BR_ERROR;
3fb0a5b6
MC
3753 binder_enqueue_thread_work(
3754 thread,
3755 &thread->return_error.work);
5717118f
TK
3756 binder_debug(
3757 BINDER_DEBUG_FAILED_TRANSACTION,
3758 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3759 proc->pid, thread->pid);
3760 break;
3761 }
3762 }
3763 binder_proc_lock(proc);
3764 ref = binder_get_ref_olocked(proc, target, false);
355b0502 3765 if (ref == NULL) {
56b468fc 3766 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
3767 proc->pid, thread->pid,
3768 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3769 "BC_REQUEST_DEATH_NOTIFICATION" :
3770 "BC_CLEAR_DEATH_NOTIFICATION",
3771 target);
5717118f
TK
3772 binder_proc_unlock(proc);
3773 kfree(death);
355b0502
GKH
3774 break;
3775 }
3776
3777 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
9c0a1d02 3778 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
3779 proc->pid, thread->pid,
3780 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3781 "BC_REQUEST_DEATH_NOTIFICATION" :
3782 "BC_CLEAR_DEATH_NOTIFICATION",
bc65c39a
TK
3783 (u64)cookie, ref->data.debug_id,
3784 ref->data.desc, ref->data.strong,
3785 ref->data.weak, ref->node->debug_id);
355b0502 3786
914f62a0 3787 binder_node_lock(ref->node);
355b0502
GKH
3788 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3789 if (ref->death) {
56b468fc 3790 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502 3791 proc->pid, thread->pid);
914f62a0 3792 binder_node_unlock(ref->node);
5717118f
TK
3793 binder_proc_unlock(proc);
3794 kfree(death);
355b0502
GKH
3795 break;
3796 }
3797 binder_stats_created(BINDER_STAT_DEATH);
3798 INIT_LIST_HEAD(&death->work.entry);
3799 death->cookie = cookie;
3800 ref->death = death;
3801 if (ref->node->proc == NULL) {
3802 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
bd3a2956
MC
3803
3804 binder_inner_proc_lock(proc);
3805 binder_enqueue_work_ilocked(
3806 &ref->death->work, &proc->todo);
3807 binder_wakeup_proc_ilocked(proc);
3808 binder_inner_proc_unlock(proc);
355b0502
GKH
3809 }
3810 } else {
3811 if (ref->death == NULL) {
56b468fc 3812 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502 3813 proc->pid, thread->pid);
25de59a5 3814 binder_node_unlock(ref->node);
5717118f 3815 binder_proc_unlock(proc);
355b0502
GKH
3816 break;
3817 }
3818 death = ref->death;
3819 if (death->cookie != cookie) {
9c0a1d02 3820 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3821 proc->pid, thread->pid,
9c0a1d02
AH
3822 (u64)death->cookie,
3823 (u64)cookie);
25de59a5 3824 binder_node_unlock(ref->node);
5717118f 3825 binder_proc_unlock(proc);
355b0502
GKH
3826 break;
3827 }
3828 ref->death = NULL;
aad8732c 3829 binder_inner_proc_lock(proc);
355b0502
GKH
3830 if (list_empty(&death->work.entry)) {
3831 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
aad8732c
TK
3832 if (thread->looper &
3833 (BINDER_LOOPER_STATE_REGISTERED |
3834 BINDER_LOOPER_STATE_ENTERED))
3fb0a5b6
MC
3835 binder_enqueue_thread_work_ilocked(
3836 thread,
3837 &death->work);
aad8732c
TK
3838 else {
3839 binder_enqueue_work_ilocked(
3840 &death->work,
3841 &proc->todo);
02545935 3842 binder_wakeup_proc_ilocked(
1200efe9 3843 proc);
355b0502
GKH
3844 }
3845 } else {
3846 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3847 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3848 }
aad8732c 3849 binder_inner_proc_unlock(proc);
355b0502 3850 }
914f62a0 3851 binder_node_unlock(ref->node);
5717118f 3852 binder_proc_unlock(proc);
355b0502
GKH
3853 } break;
3854 case BC_DEAD_BINDER_DONE: {
3855 struct binder_work *w;
9c0a1d02 3856 binder_uintptr_t cookie;
355b0502 3857 struct binder_ref_death *death = NULL;
01c8cbde 3858
d31f0c40 3859 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3860 return -EFAULT;
3861
92716603 3862 ptr += sizeof(cookie);
aad8732c
TK
3863 binder_inner_proc_lock(proc);
3864 list_for_each_entry(w, &proc->delivered_death,
3865 entry) {
3866 struct binder_ref_death *tmp_death =
3867 container_of(w,
3868 struct binder_ref_death,
3869 work);
01c8cbde 3870
355b0502
GKH
3871 if (tmp_death->cookie == cookie) {
3872 death = tmp_death;
3873 break;
3874 }
3875 }
3876 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3877fd57
DW
3877 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3878 proc->pid, thread->pid, (u64)cookie,
3879 death);
355b0502 3880 if (death == NULL) {
9c0a1d02
AH
3881 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3882 proc->pid, thread->pid, (u64)cookie);
aad8732c 3883 binder_inner_proc_unlock(proc);
355b0502
GKH
3884 break;
3885 }
aad8732c 3886 binder_dequeue_work_ilocked(&death->work);
355b0502
GKH
3887 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3888 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
aad8732c
TK
3889 if (thread->looper &
3890 (BINDER_LOOPER_STATE_REGISTERED |
3891 BINDER_LOOPER_STATE_ENTERED))
3fb0a5b6
MC
3892 binder_enqueue_thread_work_ilocked(
3893 thread, &death->work);
aad8732c
TK
3894 else {
3895 binder_enqueue_work_ilocked(
3896 &death->work,
3897 &proc->todo);
1200efe9 3898 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
3899 }
3900 }
aad8732c 3901 binder_inner_proc_unlock(proc);
355b0502
GKH
3902 } break;
3903
3904 default:
56b468fc 3905 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3906 proc->pid, thread->pid, cmd);
3907 return -EINVAL;
3908 }
3909 *consumed = ptr - buffer;
3910 }
3911 return 0;
3912}
3913
48cd96f5
BP
3914static void binder_stat_br(struct binder_proc *proc,
3915 struct binder_thread *thread, uint32_t cmd)
355b0502 3916{
975a1ac9 3917 trace_binder_return(cmd);
7eac155d 3918 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
340b5f86
BJS
3919 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3920 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3921 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3922 }
3923}
3924
9342b49f
TK
3925static int binder_put_node_cmd(struct binder_proc *proc,
3926 struct binder_thread *thread,
3927 void __user **ptrp,
3928 binder_uintptr_t node_ptr,
3929 binder_uintptr_t node_cookie,
3930 int node_debug_id,
3931 uint32_t cmd, const char *cmd_name)
3932{
3933 void __user *ptr = *ptrp;
3934
3935 if (put_user(cmd, (uint32_t __user *)ptr))
3936 return -EFAULT;
3937 ptr += sizeof(uint32_t);
3938
3939 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3940 return -EFAULT;
3941 ptr += sizeof(binder_uintptr_t);
3942
3943 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3944 return -EFAULT;
3945 ptr += sizeof(binder_uintptr_t);
3946
3947 binder_stat_br(proc, thread, cmd);
3948 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3949 proc->pid, thread->pid, cmd_name, node_debug_id,
3950 (u64)node_ptr, (u64)node_cookie);
3951
3952 *ptrp = ptr;
3953 return 0;
3954}
3955
02545935
MC
3956static int binder_wait_for_work(struct binder_thread *thread,
3957 bool do_proc_work)
3958{
3959 DEFINE_WAIT(wait);
3960 struct binder_proc *proc = thread->proc;
3961 int ret = 0;
3962
3963 freezer_do_not_count();
3964 binder_inner_proc_lock(proc);
3965 for (;;) {
3966 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3967 if (binder_has_work_ilocked(thread, do_proc_work))
3968 break;
3969 if (do_proc_work)
3970 list_add(&thread->waiting_thread_node,
3971 &proc->waiting_threads);
3972 binder_inner_proc_unlock(proc);
3973 schedule();
3974 binder_inner_proc_lock(proc);
3975 list_del_init(&thread->waiting_thread_node);
3976 if (signal_pending(current)) {
3977 ret = -ERESTARTSYS;
3978 break;
3979 }
3980 }
3981 finish_wait(&thread->wait, &wait);
3982 binder_inner_proc_unlock(proc);
3983 freezer_count();
3984
3985 return ret;
3986}
3987
355b0502
GKH
3988static int binder_thread_read(struct binder_proc *proc,
3989 struct binder_thread *thread,
9c0a1d02
AH
3990 binder_uintptr_t binder_buffer, size_t size,
3991 binder_size_t *consumed, int non_block)
355b0502 3992{
9c0a1d02 3993 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3994 void __user *ptr = buffer + *consumed;
3995 void __user *end = buffer + size;
3996
3997 int ret = 0;
3998 int wait_for_proc_work;
3999
4000 if (*consumed == 0) {
d31f0c40 4001 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
355b0502
GKH
4002 return -EFAULT;
4003 ptr += sizeof(uint32_t);
4004 }
4005
4006retry:
a45c586f 4007 binder_inner_proc_lock(proc);
02545935 4008 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
a45c586f 4009 binder_inner_proc_unlock(proc);
355b0502 4010
355b0502 4011 thread->looper |= BINDER_LOOPER_STATE_WAITING;
975a1ac9 4012
975a1ac9
AH
4013 trace_binder_wait_for_work(wait_for_proc_work,
4014 !!thread->transaction_stack,
aad8732c 4015 !binder_worklist_empty(proc, &thread->todo));
355b0502
GKH
4016 if (wait_for_proc_work) {
4017 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4018 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 4019 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
4020 proc->pid, thread->pid, thread->looper);
4021 wait_event_interruptible(binder_user_error_wait,
4022 binder_stop_on_user_error < 2);
4023 }
dadba0f5 4024 binder_restore_priority(current, proc->default_priority);
02545935
MC
4025 }
4026
4027 if (non_block) {
4028 if (!binder_has_work(thread, wait_for_proc_work))
4029 ret = -EAGAIN;
355b0502 4030 } else {
02545935 4031 ret = binder_wait_for_work(thread, wait_for_proc_work);
355b0502 4032 }
975a1ac9 4033
355b0502
GKH
4034 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4035
4036 if (ret)
4037 return ret;
4038
4039 while (1) {
4040 uint32_t cmd;
4041 struct binder_transaction_data tr;
aad8732c
TK
4042 struct binder_work *w = NULL;
4043 struct list_head *list = NULL;
355b0502 4044 struct binder_transaction *t = NULL;
82f6ad88 4045 struct binder_thread *t_from;
355b0502 4046
ccca76b6 4047 binder_inner_proc_lock(proc);
aad8732c
TK
4048 if (!binder_worklist_empty_ilocked(&thread->todo))
4049 list = &thread->todo;
4050 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4051 wait_for_proc_work)
4052 list = &proc->todo;
4053 else {
4054 binder_inner_proc_unlock(proc);
4055
c789d154 4056 /* no data added */
85ab7b9c 4057 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
4058 goto retry;
4059 break;
4060 }
4061
ccca76b6
TK
4062 if (end - ptr < sizeof(tr) + 4) {
4063 binder_inner_proc_unlock(proc);
355b0502 4064 break;
ccca76b6 4065 }
aad8732c 4066 w = binder_dequeue_work_head_ilocked(list);
3fb0a5b6
MC
4067 if (binder_worklist_empty_ilocked(&thread->todo))
4068 thread->process_todo = false;
355b0502
GKH
4069
4070 switch (w->type) {
4071 case BINDER_WORK_TRANSACTION: {
ccca76b6 4072 binder_inner_proc_unlock(proc);
355b0502
GKH
4073 t = container_of(w, struct binder_transaction, work);
4074 } break;
795aa6bc
TK
4075 case BINDER_WORK_RETURN_ERROR: {
4076 struct binder_error *e = container_of(
4077 w, struct binder_error, work);
4078
4079 WARN_ON(e->cmd == BR_OK);
ccca76b6 4080 binder_inner_proc_unlock(proc);
795aa6bc
TK
4081 if (put_user(e->cmd, (uint32_t __user *)ptr))
4082 return -EFAULT;
4083 e->cmd = BR_OK;
4084 ptr += sizeof(uint32_t);
4085
4086 binder_stat_br(proc, thread, cmd);
795aa6bc 4087 } break;
355b0502 4088 case BINDER_WORK_TRANSACTION_COMPLETE: {
ccca76b6 4089 binder_inner_proc_unlock(proc);
355b0502 4090 cmd = BR_TRANSACTION_COMPLETE;
d31f0c40 4091 if (put_user(cmd, (uint32_t __user *)ptr))
355b0502
GKH
4092 return -EFAULT;
4093 ptr += sizeof(uint32_t);
4094
4095 binder_stat_br(proc, thread, cmd);
4096 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 4097 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 4098 proc->pid, thread->pid);
355b0502
GKH
4099 kfree(w);
4100 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4101 } break;
4102 case BINDER_WORK_NODE: {
4103 struct binder_node *node = container_of(w, struct binder_node, work);
9342b49f
TK
4104 int strong, weak;
4105 binder_uintptr_t node_ptr = node->ptr;
4106 binder_uintptr_t node_cookie = node->cookie;
4107 int node_debug_id = node->debug_id;
4108 int has_weak_ref;
4109 int has_strong_ref;
4110 void __user *orig_ptr = ptr;
4111
4112 BUG_ON(proc != node->proc);
4113 strong = node->internal_strong_refs ||
4114 node->local_strong_refs;
4115 weak = !hlist_empty(&node->refs) ||
9607bf70
TK
4116 node->local_weak_refs ||
4117 node->tmp_refs || strong;
9342b49f
TK
4118 has_strong_ref = node->has_strong_ref;
4119 has_weak_ref = node->has_weak_ref;
4120
4121 if (weak && !has_weak_ref) {
355b0502
GKH
4122 node->has_weak_ref = 1;
4123 node->pending_weak_ref = 1;
4124 node->local_weak_refs++;
9342b49f
TK
4125 }
4126 if (strong && !has_strong_ref) {
355b0502
GKH
4127 node->has_strong_ref = 1;
4128 node->pending_strong_ref = 1;
4129 node->local_strong_refs++;
9342b49f
TK
4130 }
4131 if (!strong && has_strong_ref)
355b0502 4132 node->has_strong_ref = 0;
9342b49f 4133 if (!weak && has_weak_ref)
355b0502 4134 node->has_weak_ref = 0;
9342b49f
TK
4135 if (!weak && !strong) {
4136 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4137 "%d:%d node %d u%016llx c%016llx deleted\n",
4138 proc->pid, thread->pid,
4139 node_debug_id,
4140 (u64)node_ptr,
4141 (u64)node_cookie);
4142 rb_erase(&node->rb_node, &proc->nodes);
ccca76b6 4143 binder_inner_proc_unlock(proc);
25de59a5
TK
4144 binder_node_lock(node);
4145 /*
4146 * Acquire the node lock before freeing the
4147 * node to serialize with other threads that
4148 * may have been holding the node lock while
4149 * decrementing this node (avoids race where
4150 * this thread frees while the other thread
4151 * is unlocking the node after the final
4152 * decrement)
4153 */
4154 binder_node_unlock(node);
ccca76b6
TK
4155 binder_free_node(node);
4156 } else
4157 binder_inner_proc_unlock(proc);
4158
9342b49f
TK
4159 if (weak && !has_weak_ref)
4160 ret = binder_put_node_cmd(
4161 proc, thread, &ptr, node_ptr,
4162 node_cookie, node_debug_id,
4163 BR_INCREFS, "BR_INCREFS");
4164 if (!ret && strong && !has_strong_ref)
4165 ret = binder_put_node_cmd(
4166 proc, thread, &ptr, node_ptr,
4167 node_cookie, node_debug_id,
4168 BR_ACQUIRE, "BR_ACQUIRE");
4169 if (!ret && !strong && has_strong_ref)
4170 ret = binder_put_node_cmd(
4171 proc, thread, &ptr, node_ptr,
4172 node_cookie, node_debug_id,
4173 BR_RELEASE, "BR_RELEASE");
4174 if (!ret && !weak && has_weak_ref)
4175 ret = binder_put_node_cmd(
4176 proc, thread, &ptr, node_ptr,
4177 node_cookie, node_debug_id,
4178 BR_DECREFS, "BR_DECREFS");
4179 if (orig_ptr == ptr)
4180 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4181 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4182 proc->pid, thread->pid,
4183 node_debug_id,
4184 (u64)node_ptr,
4185 (u64)node_cookie);
4186 if (ret)
4187 return ret;
355b0502
GKH
4188 } break;
4189 case BINDER_WORK_DEAD_BINDER:
4190 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4191 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4192 struct binder_ref_death *death;
4193 uint32_t cmd;
914f62a0 4194 binder_uintptr_t cookie;
355b0502
GKH
4195
4196 death = container_of(w, struct binder_ref_death, work);
4197 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4198 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4199 else
4200 cmd = BR_DEAD_BINDER;
914f62a0
MC
4201 cookie = death->cookie;
4202
355b0502 4203 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
9c0a1d02 4204 "%d:%d %s %016llx\n",
355b0502
GKH
4205 proc->pid, thread->pid,
4206 cmd == BR_DEAD_BINDER ?
4207 "BR_DEAD_BINDER" :
4208 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
914f62a0 4209 (u64)cookie);
355b0502 4210 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
914f62a0 4211 binder_inner_proc_unlock(proc);
355b0502
GKH
4212 kfree(death);
4213 binder_stats_deleted(BINDER_STAT_DEATH);
ccca76b6 4214 } else {
aad8732c
TK
4215 binder_enqueue_work_ilocked(
4216 w, &proc->delivered_death);
ccca76b6
TK
4217 binder_inner_proc_unlock(proc);
4218 }
914f62a0
MC
4219 if (put_user(cmd, (uint32_t __user *)ptr))
4220 return -EFAULT;
4221 ptr += sizeof(uint32_t);
4222 if (put_user(cookie,
4223 (binder_uintptr_t __user *)ptr))
4224 return -EFAULT;
4225 ptr += sizeof(binder_uintptr_t);
4226 binder_stat_br(proc, thread, cmd);
355b0502
GKH
4227 if (cmd == BR_DEAD_BINDER)
4228 goto done; /* DEAD_BINDER notifications can cause transactions */
4229 } break;
4230 }
4231
4232 if (!t)
4233 continue;
4234
4235 BUG_ON(t->buffer == NULL);
4236 if (t->buffer->target_node) {
4237 struct binder_node *target_node = t->buffer->target_node;
c1dd125f 4238 struct binder_priority node_prio;
01c8cbde 4239
355b0502
GKH
4240 tr.target.ptr = target_node->ptr;
4241 tr.cookie = target_node->cookie;
c1dd125f
MC
4242 node_prio.sched_policy = target_node->sched_policy;
4243 node_prio.prio = target_node->min_priority;
21dfe3eb
MC
4244 binder_transaction_priority(current, t, node_prio,
4245 target_node->inherit_rt);
355b0502
GKH
4246 cmd = BR_TRANSACTION;
4247 } else {
9c0a1d02
AH
4248 tr.target.ptr = 0;
4249 tr.cookie = 0;
355b0502
GKH
4250 cmd = BR_REPLY;
4251 }
4252 tr.code = t->code;
4253 tr.flags = t->flags;
4a2ebb93 4254 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 4255
82f6ad88
TK
4256 t_from = binder_get_txn_from(t);
4257 if (t_from) {
4258 struct task_struct *sender = t_from->proc->tsk;
01c8cbde 4259
355b0502 4260 tr.sender_pid = task_tgid_nr_ns(sender,
17cf22c3 4261 task_active_pid_ns(current));
355b0502
GKH
4262 } else {
4263 tr.sender_pid = 0;
4264 }
4265
4266 tr.data_size = t->buffer->data_size;
4267 tr.offsets_size = t->buffer->offsets_size;
69c33bb1
TK
4268 tr.data.ptr.buffer = (binder_uintptr_t)
4269 ((uintptr_t)t->buffer->data +
4270 binder_alloc_get_user_buffer_offset(&proc->alloc));
355b0502
GKH
4271 tr.data.ptr.offsets = tr.data.ptr.buffer +
4272 ALIGN(t->buffer->data_size,
4273 sizeof(void *));
4274
82f6ad88
TK
4275 if (put_user(cmd, (uint32_t __user *)ptr)) {
4276 if (t_from)
4277 binder_thread_dec_tmpref(t_from);
1d6de5d0
MC
4278
4279 binder_cleanup_transaction(t, "put_user failed",
4280 BR_FAILED_REPLY);
4281
355b0502 4282 return -EFAULT;
82f6ad88 4283 }
355b0502 4284 ptr += sizeof(uint32_t);
82f6ad88
TK
4285 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4286 if (t_from)
4287 binder_thread_dec_tmpref(t_from);
1d6de5d0
MC
4288
4289 binder_cleanup_transaction(t, "copy_to_user failed",
4290 BR_FAILED_REPLY);
4291
355b0502 4292 return -EFAULT;
82f6ad88 4293 }
355b0502
GKH
4294 ptr += sizeof(tr);
4295
975a1ac9 4296 trace_binder_transaction_received(t);
355b0502
GKH
4297 binder_stat_br(proc, thread, cmd);
4298 binder_debug(BINDER_DEBUG_TRANSACTION,
9c0a1d02 4299 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
4300 proc->pid, thread->pid,
4301 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4302 "BR_REPLY",
82f6ad88
TK
4303 t->debug_id, t_from ? t_from->proc->pid : 0,
4304 t_from ? t_from->pid : 0, cmd,
355b0502 4305 t->buffer->data_size, t->buffer->offsets_size,
9c0a1d02 4306 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
355b0502 4307
82f6ad88
TK
4308 if (t_from)
4309 binder_thread_dec_tmpref(t_from);
355b0502
GKH
4310 t->buffer->allow_user_free = 1;
4311 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
a45c586f 4312 binder_inner_proc_lock(thread->proc);
355b0502
GKH
4313 t->to_parent = thread->transaction_stack;
4314 t->to_thread = thread;
4315 thread->transaction_stack = t;
a45c586f 4316 binder_inner_proc_unlock(thread->proc);
355b0502 4317 } else {
7323366b 4318 binder_free_transaction(t);
355b0502
GKH
4319 }
4320 break;
4321 }
4322
4323done:
4324
4325 *consumed = ptr - buffer;
e09d7b9a 4326 binder_inner_proc_lock(proc);
02545935
MC
4327 if (proc->requested_threads == 0 &&
4328 list_empty(&thread->proc->waiting_threads) &&
355b0502
GKH
4329 proc->requested_threads_started < proc->max_threads &&
4330 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4331 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4332 /*spawn a new thread if we leave this out */) {
4333 proc->requested_threads++;
e09d7b9a 4334 binder_inner_proc_unlock(proc);
355b0502 4335 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 4336 "%d:%d BR_SPAWN_LOOPER\n",
355b0502 4337 proc->pid, thread->pid);
d31f0c40 4338 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
355b0502 4339 return -EFAULT;
89334ab4 4340 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
e09d7b9a
TK
4341 } else
4342 binder_inner_proc_unlock(proc);
355b0502
GKH
4343 return 0;
4344}
4345
aad8732c
TK
4346static void binder_release_work(struct binder_proc *proc,
4347 struct list_head *list)
355b0502
GKH
4348{
4349 struct binder_work *w;
01c8cbde 4350
aad8732c
TK
4351 while (1) {
4352 w = binder_dequeue_work_head(proc, list);
4353 if (!w)
4354 return;
4355
355b0502
GKH
4356 switch (w->type) {
4357 case BINDER_WORK_TRANSACTION: {
4358 struct binder_transaction *t;
4359
4360 t = container_of(w, struct binder_transaction, work);
1d6de5d0
MC
4361
4362 binder_cleanup_transaction(t, "process died.",
4363 BR_DEAD_REPLY);
355b0502 4364 } break;
795aa6bc
TK
4365 case BINDER_WORK_RETURN_ERROR: {
4366 struct binder_error *e = container_of(
4367 w, struct binder_error, work);
4368
4369 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4370 "undelivered TRANSACTION_ERROR: %u\n",
4371 e->cmd);
4372 } break;
355b0502 4373 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 4374 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 4375 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
4376 kfree(w);
4377 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4378 } break;
675d66b0
AH
4379 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4380 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4381 struct binder_ref_death *death;
4382
4383 death = container_of(w, struct binder_ref_death, work);
4384 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
9c0a1d02
AH
4385 "undelivered death notification, %016llx\n",
4386 (u64)death->cookie);
675d66b0
AH
4387 kfree(death);
4388 binder_stats_deleted(BINDER_STAT_DEATH);
4389 } break;
355b0502 4390 default:
56b468fc 4391 pr_err("unexpected work type, %d, not freed\n",
675d66b0 4392 w->type);
355b0502
GKH
4393 break;
4394 }
4395 }
4396
4397}
4398
aed01731
TK
4399static struct binder_thread *binder_get_thread_ilocked(
4400 struct binder_proc *proc, struct binder_thread *new_thread)
355b0502
GKH
4401{
4402 struct binder_thread *thread = NULL;
4403 struct rb_node *parent = NULL;
4404 struct rb_node **p = &proc->threads.rb_node;
4405
4406 while (*p) {
4407 parent = *p;
4408 thread = rb_entry(parent, struct binder_thread, rb_node);
4409
4410 if (current->pid < thread->pid)
4411 p = &(*p)->rb_left;
4412 else if (current->pid > thread->pid)
4413 p = &(*p)->rb_right;
4414 else
aed01731 4415 return thread;
355b0502 4416 }
aed01731
TK
4417 if (!new_thread)
4418 return NULL;
4419 thread = new_thread;
4420 binder_stats_created(BINDER_STAT_THREAD);
4421 thread->proc = proc;
4422 thread->pid = current->pid;
c1dd125f
MC
4423 get_task_struct(current);
4424 thread->task = current;
aed01731
TK
4425 atomic_set(&thread->tmp_ref, 0);
4426 init_waitqueue_head(&thread->wait);
4427 INIT_LIST_HEAD(&thread->todo);
4428 rb_link_node(&thread->rb_node, parent, p);
4429 rb_insert_color(&thread->rb_node, &proc->threads);
4430 thread->looper_need_return = true;
4431 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4432 thread->return_error.cmd = BR_OK;
4433 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4434 thread->reply_error.cmd = BR_OK;
02545935 4435 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
aed01731
TK
4436 return thread;
4437}
4438
4439static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4440{
4441 struct binder_thread *thread;
4442 struct binder_thread *new_thread;
4443
4444 binder_inner_proc_lock(proc);
4445 thread = binder_get_thread_ilocked(proc, NULL);
4446 binder_inner_proc_unlock(proc);
4447 if (!thread) {
4448 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4449 if (new_thread == NULL)
355b0502 4450 return NULL;
aed01731
TK
4451 binder_inner_proc_lock(proc);
4452 thread = binder_get_thread_ilocked(proc, new_thread);
4453 binder_inner_proc_unlock(proc);
4454 if (thread != new_thread)
4455 kfree(new_thread);
355b0502
GKH
4456 }
4457 return thread;
4458}
4459
82f6ad88
TK
4460static void binder_free_proc(struct binder_proc *proc)
4461{
4462 BUG_ON(!list_empty(&proc->todo));
4463 BUG_ON(!list_empty(&proc->delivered_death));
4464 binder_alloc_deferred_release(&proc->alloc);
4465 put_task_struct(proc->tsk);
4466 binder_stats_deleted(BINDER_STAT_PROC);
4467 kfree(proc);
4468}
4469
4470static void binder_free_thread(struct binder_thread *thread)
4471{
4472 BUG_ON(!list_empty(&thread->todo));
4473 binder_stats_deleted(BINDER_STAT_THREAD);
4474 binder_proc_dec_tmpref(thread->proc);
c1dd125f 4475 put_task_struct(thread->task);
82f6ad88
TK
4476 kfree(thread);
4477}
4478
4479static int binder_thread_release(struct binder_proc *proc,
4480 struct binder_thread *thread)
355b0502
GKH
4481{
4482 struct binder_transaction *t;
4483 struct binder_transaction *send_reply = NULL;
4484 int active_transactions = 0;
82f6ad88 4485 struct binder_transaction *last_t = NULL;
355b0502 4486
aed01731 4487 binder_inner_proc_lock(thread->proc);
82f6ad88
TK
4488 /*
4489 * take a ref on the proc so it survives
4490 * after we remove this thread from proc->threads.
4491 * The corresponding dec is when we actually
4492 * free the thread in binder_free_thread()
4493 */
4494 proc->tmp_ref++;
4495 /*
4496 * take a ref on this thread to ensure it
4497 * survives while we are releasing it
4498 */
4499 atomic_inc(&thread->tmp_ref);
355b0502
GKH
4500 rb_erase(&thread->rb_node, &proc->threads);
4501 t = thread->transaction_stack;
82f6ad88
TK
4502 if (t) {
4503 spin_lock(&t->lock);
4504 if (t->to_thread == thread)
4505 send_reply = t;
4506 }
4507 thread->is_dead = true;
4508
355b0502 4509 while (t) {
82f6ad88 4510 last_t = t;
355b0502
GKH
4511 active_transactions++;
4512 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
4513 "release %d:%d transaction %d %s, still active\n",
4514 proc->pid, thread->pid,
355b0502
GKH
4515 t->debug_id,
4516 (t->to_thread == thread) ? "in" : "out");
4517
4518 if (t->to_thread == thread) {
4519 t->to_proc = NULL;
4520 t->to_thread = NULL;
4521 if (t->buffer) {
4522 t->buffer->transaction = NULL;
4523 t->buffer = NULL;
4524 }
4525 t = t->to_parent;
4526 } else if (t->from == thread) {
4527 t->from = NULL;
4528 t = t->from_parent;
4529 } else
4530 BUG();
82f6ad88
TK
4531 spin_unlock(&last_t->lock);
4532 if (t)
4533 spin_lock(&t->lock);
355b0502 4534 }
aed01731 4535 binder_inner_proc_unlock(thread->proc);
82f6ad88 4536
355b0502
GKH
4537 if (send_reply)
4538 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
aad8732c 4539 binder_release_work(proc, &thread->todo);
82f6ad88 4540 binder_thread_dec_tmpref(thread);
355b0502
GKH
4541 return active_transactions;
4542}
4543
4544static unsigned int binder_poll(struct file *filp,
4545 struct poll_table_struct *wait)
4546{
4547 struct binder_proc *proc = filp->private_data;
4548 struct binder_thread *thread = NULL;
02545935 4549 bool wait_for_proc_work;
355b0502 4550
355b0502
GKH
4551 thread = binder_get_thread(proc);
4552
a45c586f 4553 binder_inner_proc_lock(thread->proc);
02545935
MC
4554 thread->looper |= BINDER_LOOPER_STATE_POLL;
4555 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4556
a45c586f 4557 binder_inner_proc_unlock(thread->proc);
975a1ac9 4558
02545935
MC
4559 poll_wait(filp, &thread->wait, wait);
4560
21ad255d 4561 if (binder_has_work(thread, wait_for_proc_work))
02545935
MC
4562 return POLLIN;
4563
355b0502
GKH
4564 return 0;
4565}
4566
74961582
RA
4567static int binder_ioctl_write_read(struct file *filp,
4568 unsigned int cmd, unsigned long arg,
4569 struct binder_thread *thread)
4570{
4571 int ret = 0;
4572 struct binder_proc *proc = filp->private_data;
4573 unsigned int size = _IOC_SIZE(cmd);
4574 void __user *ubuf = (void __user *)arg;
4575 struct binder_write_read bwr;
4576
4577 if (size != sizeof(struct binder_write_read)) {
4578 ret = -EINVAL;
4579 goto out;
4580 }
d31f0c40 4581 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
74961582
RA
4582 ret = -EFAULT;
4583 goto out;
4584 }
4585 binder_debug(BINDER_DEBUG_READ_WRITE,
4586 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4587 proc->pid, thread->pid,
4588 (u64)bwr.write_size, (u64)bwr.write_buffer,
4589 (u64)bwr.read_size, (u64)bwr.read_buffer);
4590
4591 if (bwr.write_size > 0) {
4592 ret = binder_thread_write(proc, thread,
4593 bwr.write_buffer,
4594 bwr.write_size,
4595 &bwr.write_consumed);
4596 trace_binder_write_done(ret);
4597 if (ret < 0) {
4598 bwr.read_consumed = 0;
d31f0c40 4599 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
74961582
RA
4600 ret = -EFAULT;
4601 goto out;
4602 }
4603 }
4604 if (bwr.read_size > 0) {
4605 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4606 bwr.read_size,
4607 &bwr.read_consumed,
4608 filp->f_flags & O_NONBLOCK);
4609 trace_binder_read_done(ret);
02545935
MC
4610 binder_inner_proc_lock(proc);
4611 if (!binder_worklist_empty_ilocked(&proc->todo))
1200efe9 4612 binder_wakeup_proc_ilocked(proc);
02545935 4613 binder_inner_proc_unlock(proc);
74961582 4614 if (ret < 0) {
d31f0c40 4615 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
74961582
RA
4616 ret = -EFAULT;
4617 goto out;
4618 }
4619 }
4620 binder_debug(BINDER_DEBUG_READ_WRITE,
4621 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4622 proc->pid, thread->pid,
4623 (u64)bwr.write_consumed, (u64)bwr.write_size,
4624 (u64)bwr.read_consumed, (u64)bwr.read_size);
d31f0c40 4625 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
74961582
RA
4626 ret = -EFAULT;
4627 goto out;
4628 }
4629out:
4630 return ret;
4631}
4632
4633static int binder_ioctl_set_ctx_mgr(struct file *filp)
4634{
4635 int ret = 0;
4636 struct binder_proc *proc = filp->private_data;
913926d0 4637 struct binder_context *context = proc->context;
76341f6f 4638 struct binder_node *new_node;
74961582
RA
4639 kuid_t curr_euid = current_euid();
4640
76341f6f 4641 mutex_lock(&context->context_mgr_node_lock);
913926d0 4642 if (context->binder_context_mgr_node) {
74961582
RA
4643 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4644 ret = -EBUSY;
4645 goto out;
4646 }
3b090f5e
S
4647 ret = security_binder_set_context_mgr(proc->tsk);
4648 if (ret < 0)
4649 goto out;
913926d0
MC
4650 if (uid_valid(context->binder_context_mgr_uid)) {
4651 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
74961582
RA
4652 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4653 from_kuid(&init_user_ns, curr_euid),
4654 from_kuid(&init_user_ns,
913926d0 4655 context->binder_context_mgr_uid));
74961582
RA
4656 ret = -EPERM;
4657 goto out;
4658 }
4659 } else {
913926d0 4660 context->binder_context_mgr_uid = curr_euid;
74961582 4661 }
25de59a5 4662 new_node = binder_new_node(proc, NULL);
76341f6f 4663 if (!new_node) {
74961582
RA
4664 ret = -ENOMEM;
4665 goto out;
4666 }
25de59a5 4667 binder_node_lock(new_node);
76341f6f
TK
4668 new_node->local_weak_refs++;
4669 new_node->local_strong_refs++;
4670 new_node->has_strong_ref = 1;
4671 new_node->has_weak_ref = 1;
4672 context->binder_context_mgr_node = new_node;
25de59a5 4673 binder_node_unlock(new_node);
9607bf70 4674 binder_put_node(new_node);
74961582 4675out:
76341f6f 4676 mutex_unlock(&context->context_mgr_node_lock);
74961582
RA
4677 return ret;
4678}
4679
66266ad3
CC
4680static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4681 struct binder_node_debug_info *info) {
4682 struct rb_node *n;
4683 binder_uintptr_t ptr = info->ptr;
4684
4685 memset(info, 0, sizeof(*info));
4686
4687 binder_inner_proc_lock(proc);
4688 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4689 struct binder_node *node = rb_entry(n, struct binder_node,
4690 rb_node);
4691 if (node->ptr > ptr) {
4692 info->ptr = node->ptr;
4693 info->cookie = node->cookie;
4694 info->has_strong_ref = node->has_strong_ref;
4695 info->has_weak_ref = node->has_weak_ref;
4696 break;
4697 }
4698 }
4699 binder_inner_proc_unlock(proc);
4700
4701 return 0;
4702}
4703
355b0502
GKH
4704static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4705{
4706 int ret;
4707 struct binder_proc *proc = filp->private_data;
4708 struct binder_thread *thread;
4709 unsigned int size = _IOC_SIZE(cmd);
4710 void __user *ubuf = (void __user *)arg;
4711
74961582
RA
4712 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4713 proc->pid, current->pid, cmd, arg);*/
355b0502 4714
b020aa3f
SY
4715 binder_selftest_alloc(&proc->alloc);
4716
975a1ac9
AH
4717 trace_binder_ioctl(cmd, arg);
4718
355b0502
GKH
4719 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4720 if (ret)
975a1ac9 4721 goto err_unlocked;
355b0502 4722
355b0502
GKH
4723 thread = binder_get_thread(proc);
4724 if (thread == NULL) {
4725 ret = -ENOMEM;
4726 goto err;
4727 }
4728
4729 switch (cmd) {
74961582
RA
4730 case BINDER_WRITE_READ:
4731 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4732 if (ret)
355b0502 4733 goto err;
355b0502 4734 break;
e09d7b9a
TK
4735 case BINDER_SET_MAX_THREADS: {
4736 int max_threads;
4737
4738 if (copy_from_user(&max_threads, ubuf,
4739 sizeof(max_threads))) {
355b0502
GKH
4740 ret = -EINVAL;
4741 goto err;
4742 }
e09d7b9a
TK
4743 binder_inner_proc_lock(proc);
4744 proc->max_threads = max_threads;
4745 binder_inner_proc_unlock(proc);
355b0502 4746 break;
e09d7b9a 4747 }
355b0502 4748 case BINDER_SET_CONTEXT_MGR:
74961582
RA
4749 ret = binder_ioctl_set_ctx_mgr(filp);
4750 if (ret)
355b0502 4751 goto err;
355b0502
GKH
4752 break;
4753 case BINDER_THREAD_EXIT:
56b468fc 4754 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 4755 proc->pid, thread->pid);
82f6ad88 4756 binder_thread_release(proc, thread);
355b0502
GKH
4757 thread = NULL;
4758 break;
1b4ce188
MM
4759 case BINDER_VERSION: {
4760 struct binder_version __user *ver = ubuf;
4761
355b0502
GKH
4762 if (size != sizeof(struct binder_version)) {
4763 ret = -EINVAL;
4764 goto err;
4765 }
d31f0c40 4766 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
1b4ce188 4767 &ver->protocol_version)) {
355b0502
GKH
4768 ret = -EINVAL;
4769 goto err;
4770 }
4771 break;
1b4ce188 4772 }
66266ad3
CC
4773 case BINDER_GET_NODE_DEBUG_INFO: {
4774 struct binder_node_debug_info info;
4775
4776 if (copy_from_user(&info, ubuf, sizeof(info))) {
4777 ret = -EFAULT;
4778 goto err;
4779 }
4780
4781 ret = binder_ioctl_get_node_debug_info(proc, &info);
4782 if (ret < 0)
4783 goto err;
4784
4785 if (copy_to_user(ubuf, &info, sizeof(info))) {
4786 ret = -EFAULT;
4787 goto err;
4788 }
4789 break;
4790 }
355b0502
GKH
4791 default:
4792 ret = -EINVAL;
4793 goto err;
4794 }
4795 ret = 0;
4796err:
4797 if (thread)
85ab7b9c 4798 thread->looper_need_return = false;
355b0502
GKH
4799 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4800 if (ret && ret != -ERESTARTSYS)
56b468fc 4801 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
4802err_unlocked:
4803 trace_binder_ioctl_done(ret);
355b0502
GKH
4804 return ret;
4805}
4806
4807static void binder_vma_open(struct vm_area_struct *vma)
4808{
4809 struct binder_proc *proc = vma->vm_private_data;
01c8cbde 4810
355b0502 4811 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4812 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4813 proc->pid, vma->vm_start, vma->vm_end,
4814 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4815 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
4816}
4817
4818static void binder_vma_close(struct vm_area_struct *vma)
4819{
4820 struct binder_proc *proc = vma->vm_private_data;
01c8cbde 4821
355b0502 4822 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4823 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4824 proc->pid, vma->vm_start, vma->vm_end,
4825 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4826 (unsigned long)pgprot_val(vma->vm_page_prot));
69c33bb1 4827 binder_alloc_vma_close(&proc->alloc);
355b0502
GKH
4828 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4829}
4830
430f7ee8
VM
4831static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4832{
4833 return VM_FAULT_SIGBUS;
4834}
4835
355b0502
GKH
4836static struct vm_operations_struct binder_vm_ops = {
4837 .open = binder_vma_open,
4838 .close = binder_vma_close,
430f7ee8 4839 .fault = binder_vm_fault,
355b0502
GKH
4840};
4841
69c33bb1
TK
4842static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4843{
4844 int ret;
4845 struct binder_proc *proc = filp->private_data;
4846 const char *failure_string;
4847
4848 if (proc->tsk != current->group_leader)
4849 return -EINVAL;
4850
4851 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4852 vma->vm_end = vma->vm_start + SZ_4M;
4853
4854 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4855 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4856 __func__, proc->pid, vma->vm_start, vma->vm_end,
4857 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4858 (unsigned long)pgprot_val(vma->vm_page_prot));
4859
4860 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4861 ret = -EPERM;
4862 failure_string = "bad vm_flags";
4863 goto err_bad_arg;
4864 }
4865 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4866 vma->vm_ops = &binder_vm_ops;
4867 vma->vm_private_data = proc;
4868
4869 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4870 if (ret)
4871 return ret;
4872 proc->files = get_files_struct(current);
4873 return 0;
4874
355b0502 4875err_bad_arg:
258767fe 4876 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
355b0502
GKH
4877 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4878 return ret;
4879}
4880
4881static int binder_open(struct inode *nodp, struct file *filp)
4882{
4883 struct binder_proc *proc;
bfd99b42 4884 struct binder_device *binder_dev;
355b0502
GKH
4885
4886 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4887 current->group_leader->pid, current->pid);
4888
4889 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4890 if (proc == NULL)
4891 return -ENOMEM;
751c6a8e
TK
4892 spin_lock_init(&proc->inner_lock);
4893 spin_lock_init(&proc->outer_lock);
f43b3d58
TK
4894 get_task_struct(current->group_leader);
4895 proc->tsk = current->group_leader;
355b0502 4896 INIT_LIST_HEAD(&proc->todo);
425bb8fc
MC
4897 if (binder_supported_policy(current->policy)) {
4898 proc->default_priority.sched_policy = current->policy;
4899 proc->default_priority.prio = current->normal_prio;
4900 } else {
4901 proc->default_priority.sched_policy = SCHED_NORMAL;
4902 proc->default_priority.prio = NICE_TO_PRIO(0);
4903 }
4904
bfd99b42
MC
4905 binder_dev = container_of(filp->private_data, struct binder_device,
4906 miscdev);
4907 proc->context = &binder_dev->context;
69c33bb1 4908 binder_alloc_init(&proc->alloc);
975a1ac9 4909
355b0502 4910 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
4911 proc->pid = current->group_leader->pid;
4912 INIT_LIST_HEAD(&proc->delivered_death);
02545935 4913 INIT_LIST_HEAD(&proc->waiting_threads);
355b0502 4914 filp->private_data = proc;
975a1ac9 4915
76341f6f
TK
4916 mutex_lock(&binder_procs_lock);
4917 hlist_add_head(&proc->proc_node, &binder_procs);
4918 mutex_unlock(&binder_procs_lock);
4919
16b66554 4920 if (binder_debugfs_dir_entry_proc) {
355b0502 4921 char strbuf[11];
01c8cbde 4922
355b0502 4923 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
a64af2cf
MC
4924 /*
4925 * proc debug entries are shared between contexts, so
4926 * this will fail if the process tries to open the driver
4927 * again with a different context. The priting code will
4928 * anyway print all contexts that a given PID has, so this
4929 * is not a problem.
4930 */
16b66554 4931 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
a64af2cf
MC
4932 binder_debugfs_dir_entry_proc,
4933 (void *)(unsigned long)proc->pid,
4934 &binder_proc_fops);
355b0502
GKH
4935 }
4936
4937 return 0;
4938}
4939
4940static int binder_flush(struct file *filp, fl_owner_t id)
4941{
4942 struct binder_proc *proc = filp->private_data;
4943
4944 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4945
4946 return 0;
4947}
4948
4949static void binder_deferred_flush(struct binder_proc *proc)
4950{
4951 struct rb_node *n;
4952 int wake_count = 0;
01c8cbde 4953
aed01731 4954 binder_inner_proc_lock(proc);
355b0502
GKH
4955 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4956 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
01c8cbde 4957
85ab7b9c 4958 thread->looper_need_return = true;
355b0502
GKH
4959 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4960 wake_up_interruptible(&thread->wait);
4961 wake_count++;
4962 }
4963 }
aed01731 4964 binder_inner_proc_unlock(proc);
355b0502
GKH
4965
4966 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4967 "binder_flush: %d woke %d threads\n", proc->pid,
4968 wake_count);
4969}
4970
4971static int binder_release(struct inode *nodp, struct file *filp)
4972{
4973 struct binder_proc *proc = filp->private_data;
01c8cbde 4974
16b66554 4975 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
4976 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4977
4978 return 0;
4979}
4980
008fa749
ME
4981static int binder_node_release(struct binder_node *node, int refs)
4982{
4983 struct binder_ref *ref;
4984 int death = 0;
ccca76b6 4985 struct binder_proc *proc = node->proc;
008fa749 4986
aad8732c 4987 binder_release_work(proc, &node->async_todo);
ccca76b6 4988
25de59a5 4989 binder_node_lock(node);
ccca76b6 4990 binder_inner_proc_lock(proc);
aad8732c 4991 binder_dequeue_work_ilocked(&node->work);
9607bf70
TK
4992 /*
4993 * The caller must have taken a temporary ref on the node,
4994 */
4995 BUG_ON(!node->tmp_refs);
4996 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
ccca76b6 4997 binder_inner_proc_unlock(proc);
25de59a5 4998 binder_node_unlock(node);
ccca76b6 4999 binder_free_node(node);
008fa749
ME
5000
5001 return refs;
5002 }
5003
5004 node->proc = NULL;
5005 node->local_strong_refs = 0;
5006 node->local_weak_refs = 0;
ccca76b6 5007 binder_inner_proc_unlock(proc);
76341f6f
TK
5008
5009 spin_lock(&binder_dead_nodes_lock);
7eac155d 5010 hlist_add_head(&node->dead_node, &binder_dead_nodes);
76341f6f 5011 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
5012
5013 hlist_for_each_entry(ref, &node->refs, node_entry) {
5014 refs++;
914f62a0
MC
5015 /*
5016 * Need the node lock to synchronize
5017 * with new notification requests and the
5018 * inner lock to synchronize with queued
5019 * death notifications.
5020 */
5021 binder_inner_proc_lock(ref->proc);
5022 if (!ref->death) {
5023 binder_inner_proc_unlock(ref->proc);
b3a59abf 5024 continue;
914f62a0 5025 }
008fa749
ME
5026
5027 death++;
5028
914f62a0
MC
5029 BUG_ON(!list_empty(&ref->death->work.entry));
5030 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5031 binder_enqueue_work_ilocked(&ref->death->work,
5032 &ref->proc->todo);
1200efe9 5033 binder_wakeup_proc_ilocked(ref->proc);
aad8732c 5034 binder_inner_proc_unlock(ref->proc);
008fa749
ME
5035 }
5036
008fa749
ME
5037 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5038 "node %d now dead, refs %d, death %d\n",
5039 node->debug_id, refs, death);
25de59a5 5040 binder_node_unlock(node);
9607bf70 5041 binder_put_node(node);
008fa749
ME
5042
5043 return refs;
5044}
5045
355b0502
GKH
5046static void binder_deferred_release(struct binder_proc *proc)
5047{
913926d0 5048 struct binder_context *context = proc->context;
355b0502 5049 struct rb_node *n;
69c33bb1 5050 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 5051
355b0502
GKH
5052 BUG_ON(proc->files);
5053
76341f6f 5054 mutex_lock(&binder_procs_lock);
355b0502 5055 hlist_del(&proc->proc_node);
76341f6f 5056 mutex_unlock(&binder_procs_lock);
53413e7d 5057
76341f6f 5058 mutex_lock(&context->context_mgr_node_lock);
913926d0
MC
5059 if (context->binder_context_mgr_node &&
5060 context->binder_context_mgr_node->proc == proc) {
355b0502 5061 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
5062 "%s: %d context_mgr_node gone\n",
5063 __func__, proc->pid);
913926d0 5064 context->binder_context_mgr_node = NULL;
355b0502 5065 }
76341f6f 5066 mutex_unlock(&context->context_mgr_node_lock);
aed01731 5067 binder_inner_proc_lock(proc);
82f6ad88
TK
5068 /*
5069 * Make sure proc stays alive after we
5070 * remove all the threads
5071 */
5072 proc->tmp_ref++;
355b0502 5073
82f6ad88 5074 proc->is_dead = true;
355b0502
GKH
5075 threads = 0;
5076 active_transactions = 0;
5077 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
5078 struct binder_thread *thread;
5079
5080 thread = rb_entry(n, struct binder_thread, rb_node);
aed01731 5081 binder_inner_proc_unlock(proc);
355b0502 5082 threads++;
82f6ad88 5083 active_transactions += binder_thread_release(proc, thread);
aed01731 5084 binder_inner_proc_lock(proc);
355b0502 5085 }
53413e7d 5086
355b0502
GKH
5087 nodes = 0;
5088 incoming_refs = 0;
5089 while ((n = rb_first(&proc->nodes))) {
53413e7d 5090 struct binder_node *node;
355b0502 5091
53413e7d 5092 node = rb_entry(n, struct binder_node, rb_node);
355b0502 5093 nodes++;
9607bf70
TK
5094 /*
5095 * take a temporary ref on the node before
5096 * calling binder_node_release() which will either
5097 * kfree() the node or call binder_put_node()
5098 */
ceeaf28f 5099 binder_inc_node_tmpref_ilocked(node);
355b0502 5100 rb_erase(&node->rb_node, &proc->nodes);
ceeaf28f 5101 binder_inner_proc_unlock(proc);
7eac155d 5102 incoming_refs = binder_node_release(node, incoming_refs);
ceeaf28f 5103 binder_inner_proc_lock(proc);
355b0502 5104 }
ceeaf28f 5105 binder_inner_proc_unlock(proc);
53413e7d 5106
355b0502 5107 outgoing_refs = 0;
5717118f 5108 binder_proc_lock(proc);
355b0502 5109 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
5110 struct binder_ref *ref;
5111
5112 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 5113 outgoing_refs++;
5717118f
TK
5114 binder_cleanup_ref_olocked(ref);
5115 binder_proc_unlock(proc);
bc65c39a 5116 binder_free_ref(ref);
5717118f 5117 binder_proc_lock(proc);
355b0502 5118 }
5717118f 5119 binder_proc_unlock(proc);
53413e7d 5120
aad8732c
TK
5121 binder_release_work(proc, &proc->todo);
5122 binder_release_work(proc, &proc->delivered_death);
355b0502 5123
355b0502 5124 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
69c33bb1 5125 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 5126 __func__, proc->pid, threads, nodes, incoming_refs,
69c33bb1 5127 outgoing_refs, active_transactions);
355b0502 5128
82f6ad88 5129 binder_proc_dec_tmpref(proc);
355b0502
GKH
5130}
5131
5132static void binder_deferred_func(struct work_struct *work)
5133{
5134 struct binder_proc *proc;
5135 struct files_struct *files;
5136
5137 int defer;
01c8cbde 5138
355b0502 5139 do {
7eac155d
DW
5140 mutex_lock(&binder_deferred_lock);
5141 if (!hlist_empty(&binder_deferred_list)) {
5142 proc = hlist_entry(binder_deferred_list.first,
5143 struct binder_proc, deferred_work_node);
355b0502
GKH
5144 hlist_del_init(&proc->deferred_work_node);
5145 defer = proc->deferred_work;
5146 proc->deferred_work = 0;
5147 } else {
5148 proc = NULL;
5149 defer = 0;
5150 }
7eac155d 5151 mutex_unlock(&binder_deferred_lock);
355b0502
GKH
5152
5153 files = NULL;
5154 if (defer & BINDER_DEFERRED_PUT_FILES) {
5155 files = proc->files;
5156 if (files)
5157 proc->files = NULL;
5158 }
5159
5160 if (defer & BINDER_DEFERRED_FLUSH)
5161 binder_deferred_flush(proc);
5162
5163 if (defer & BINDER_DEFERRED_RELEASE)
5164 binder_deferred_release(proc); /* frees proc */
5165
355b0502
GKH
5166 if (files)
5167 put_files_struct(files);
5168 } while (proc);
5169}
7eac155d 5170static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
355b0502
GKH
5171
5172static void
5173binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5174{
7eac155d 5175 mutex_lock(&binder_deferred_lock);
355b0502
GKH
5176 proc->deferred_work |= defer;
5177 if (hlist_unhashed(&proc->deferred_work_node)) {
5178 hlist_add_head(&proc->deferred_work_node,
7eac155d
DW
5179 &binder_deferred_list);
5180 queue_work(binder_deferred_workqueue, &binder_deferred_work);
355b0502 5181 }
7eac155d 5182 mutex_unlock(&binder_deferred_lock);
355b0502
GKH
5183}
5184
c2bdc4c4
TK
5185static void print_binder_transaction_ilocked(struct seq_file *m,
5186 struct binder_proc *proc,
5187 const char *prefix,
5188 struct binder_transaction *t)
5249f488 5189{
c2bdc4c4
TK
5190 struct binder_proc *to_proc;
5191 struct binder_buffer *buffer = t->buffer;
5192
82f6ad88 5193 spin_lock(&t->lock);
c2bdc4c4 5194 to_proc = t->to_proc;
5249f488 5195 seq_printf(m,
425bb8fc 5196 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5249f488
AH
5197 prefix, t->debug_id, t,
5198 t->from ? t->from->proc->pid : 0,
5199 t->from ? t->from->pid : 0,
c2bdc4c4 5200 to_proc ? to_proc->pid : 0,
5249f488 5201 t->to_thread ? t->to_thread->pid : 0,
425bb8fc
MC
5202 t->code, t->flags, t->priority.sched_policy,
5203 t->priority.prio, t->need_reply);
82f6ad88
TK
5204 spin_unlock(&t->lock);
5205
c2bdc4c4
TK
5206 if (proc != to_proc) {
5207 /*
5208 * Can only safely deref buffer if we are holding the
5209 * correct proc inner lock for this node
5210 */
5211 seq_puts(m, "\n");
5212 return;
5213 }
5214
5215 if (buffer == NULL) {
5249f488
AH
5216 seq_puts(m, " buffer free\n");
5217 return;
355b0502 5218 }
c2bdc4c4
TK
5219 if (buffer->target_node)
5220 seq_printf(m, " node %d", buffer->target_node->debug_id);
3877fd57 5221 seq_printf(m, " size %zd:%zd data %p\n",
c2bdc4c4
TK
5222 buffer->data_size, buffer->offsets_size,
5223 buffer->data);
355b0502
GKH
5224}
5225
c2bdc4c4
TK
5226static void print_binder_work_ilocked(struct seq_file *m,
5227 struct binder_proc *proc,
5228 const char *prefix,
5229 const char *transaction_prefix,
5230 struct binder_work *w)
355b0502
GKH
5231{
5232 struct binder_node *node;
5233 struct binder_transaction *t;
5234
5235 switch (w->type) {
5236 case BINDER_WORK_TRANSACTION:
5237 t = container_of(w, struct binder_transaction, work);
c2bdc4c4
TK
5238 print_binder_transaction_ilocked(
5239 m, proc, transaction_prefix, t);
355b0502 5240 break;
795aa6bc
TK
5241 case BINDER_WORK_RETURN_ERROR: {
5242 struct binder_error *e = container_of(
5243 w, struct binder_error, work);
5244
5245 seq_printf(m, "%stransaction error: %u\n",
5246 prefix, e->cmd);
5247 } break;
355b0502 5248 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 5249 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
5250 break;
5251 case BINDER_WORK_NODE:
5252 node = container_of(w, struct binder_node, work);
9c0a1d02
AH
5253 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5254 prefix, node->debug_id,
5255 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
5256 break;
5257 case BINDER_WORK_DEAD_BINDER:
5249f488 5258 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
5259 break;
5260 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 5261 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
5262 break;
5263 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 5264 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
5265 break;
5266 default:
5249f488 5267 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
5268 break;
5269 }
355b0502
GKH
5270}
5271
aad8732c
TK
5272static void print_binder_thread_ilocked(struct seq_file *m,
5273 struct binder_thread *thread,
5274 int print_always)
355b0502
GKH
5275{
5276 struct binder_transaction *t;
5277 struct binder_work *w;
5249f488
AH
5278 size_t start_pos = m->count;
5279 size_t header_pos;
355b0502 5280
82f6ad88 5281 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
85ab7b9c 5282 thread->pid, thread->looper,
82f6ad88
TK
5283 thread->looper_need_return,
5284 atomic_read(&thread->tmp_ref));
5249f488 5285 header_pos = m->count;
355b0502
GKH
5286 t = thread->transaction_stack;
5287 while (t) {
355b0502 5288 if (t->from == thread) {
c2bdc4c4
TK
5289 print_binder_transaction_ilocked(m, thread->proc,
5290 " outgoing transaction", t);
355b0502
GKH
5291 t = t->from_parent;
5292 } else if (t->to_thread == thread) {
c2bdc4c4 5293 print_binder_transaction_ilocked(m, thread->proc,
5249f488 5294 " incoming transaction", t);
355b0502
GKH
5295 t = t->to_parent;
5296 } else {
c2bdc4c4
TK
5297 print_binder_transaction_ilocked(m, thread->proc,
5298 " bad transaction", t);
355b0502
GKH
5299 t = NULL;
5300 }
5301 }
5302 list_for_each_entry(w, &thread->todo, entry) {
c2bdc4c4 5303 print_binder_work_ilocked(m, thread->proc, " ",
aad8732c 5304 " pending transaction", w);
355b0502 5305 }
5249f488
AH
5306 if (!print_always && m->count == header_pos)
5307 m->count = start_pos;
355b0502
GKH
5308}
5309
ceeaf28f
TK
5310static void print_binder_node_nilocked(struct seq_file *m,
5311 struct binder_node *node)
355b0502
GKH
5312{
5313 struct binder_ref *ref;
355b0502
GKH
5314 struct binder_work *w;
5315 int count;
5316
5317 count = 0;
b67bfe0d 5318 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
5319 count++;
5320
fbb541d2 5321 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
9c0a1d02 5322 node->debug_id, (u64)node->ptr, (u64)node->cookie,
fbb541d2 5323 node->sched_policy, node->min_priority,
5249f488
AH
5324 node->has_strong_ref, node->has_weak_ref,
5325 node->local_strong_refs, node->local_weak_refs,
9607bf70 5326 node->internal_strong_refs, count, node->tmp_refs);
355b0502 5327 if (count) {
5249f488 5328 seq_puts(m, " proc");
b67bfe0d 5329 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 5330 seq_printf(m, " %d", ref->proc->pid);
355b0502 5331 }
5249f488 5332 seq_puts(m, "\n");
aad8732c 5333 if (node->proc) {
aad8732c 5334 list_for_each_entry(w, &node->async_todo, entry)
c2bdc4c4 5335 print_binder_work_ilocked(m, node->proc, " ",
aad8732c 5336 " pending async transaction", w);
aad8732c 5337 }
355b0502
GKH
5338}
5339
5717118f
TK
5340static void print_binder_ref_olocked(struct seq_file *m,
5341 struct binder_ref *ref)
355b0502 5342{
25de59a5 5343 binder_node_lock(ref->node);
bc65c39a
TK
5344 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5345 ref->data.debug_id, ref->data.desc,
5346 ref->node->proc ? "" : "dead ",
5347 ref->node->debug_id, ref->data.strong,
5348 ref->data.weak, ref->death);
25de59a5 5349 binder_node_unlock(ref->node);
355b0502
GKH
5350}
5351
5249f488
AH
5352static void print_binder_proc(struct seq_file *m,
5353 struct binder_proc *proc, int print_all)
355b0502
GKH
5354{
5355 struct binder_work *w;
5356 struct rb_node *n;
5249f488
AH
5357 size_t start_pos = m->count;
5358 size_t header_pos;
ceeaf28f 5359 struct binder_node *last_node = NULL;
5249f488
AH
5360
5361 seq_printf(m, "proc %d\n", proc->pid);
a64af2cf 5362 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
5363 header_pos = m->count;
5364
aad8732c 5365 binder_inner_proc_lock(proc);
5249f488 5366 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
aad8732c 5367 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5249f488 5368 rb_node), print_all);
ceeaf28f 5369
5249f488 5370 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
5371 struct binder_node *node = rb_entry(n, struct binder_node,
5372 rb_node);
ceeaf28f
TK
5373 /*
5374 * take a temporary reference on the node so it
5375 * survives and isn't removed from the tree
5376 * while we print it.
5377 */
5378 binder_inc_node_tmpref_ilocked(node);
5379 /* Need to drop inner lock to take node lock */
5380 binder_inner_proc_unlock(proc);
5381 if (last_node)
5382 binder_put_node(last_node);
5383 binder_node_inner_lock(node);
5384 print_binder_node_nilocked(m, node);
5385 binder_node_inner_unlock(node);
5386 last_node = node;
5387 binder_inner_proc_lock(proc);
355b0502 5388 }
ceeaf28f
TK
5389 binder_inner_proc_unlock(proc);
5390 if (last_node)
5391 binder_put_node(last_node);
5392
355b0502 5393 if (print_all) {
5717118f 5394 binder_proc_lock(proc);
355b0502 5395 for (n = rb_first(&proc->refs_by_desc);
5249f488 5396 n != NULL;
355b0502 5397 n = rb_next(n))
5717118f
TK
5398 print_binder_ref_olocked(m, rb_entry(n,
5399 struct binder_ref,
5400 rb_node_desc));
5401 binder_proc_unlock(proc);
355b0502 5402 }
69c33bb1 5403 binder_alloc_print_allocated(m, &proc->alloc);
aad8732c 5404 binder_inner_proc_lock(proc);
5249f488 5405 list_for_each_entry(w, &proc->todo, entry)
c2bdc4c4
TK
5406 print_binder_work_ilocked(m, proc, " ",
5407 " pending transaction", w);
355b0502 5408 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 5409 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
5410 break;
5411 }
aad8732c 5412 binder_inner_proc_unlock(proc);
5249f488
AH
5413 if (!print_all && m->count == header_pos)
5414 m->count = start_pos;
355b0502
GKH
5415}
5416
167bccbd 5417static const char * const binder_return_strings[] = {
355b0502
GKH
5418 "BR_ERROR",
5419 "BR_OK",
5420 "BR_TRANSACTION",
5421 "BR_REPLY",
5422 "BR_ACQUIRE_RESULT",
5423 "BR_DEAD_REPLY",
5424 "BR_TRANSACTION_COMPLETE",
5425 "BR_INCREFS",
5426 "BR_ACQUIRE",
5427 "BR_RELEASE",
5428 "BR_DECREFS",
5429 "BR_ATTEMPT_ACQUIRE",
5430 "BR_NOOP",
5431 "BR_SPAWN_LOOPER",
5432 "BR_FINISHED",
5433 "BR_DEAD_BINDER",
5434 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5435 "BR_FAILED_REPLY"
5436};
5437
167bccbd 5438static const char * const binder_command_strings[] = {
355b0502
GKH
5439 "BC_TRANSACTION",
5440 "BC_REPLY",
5441 "BC_ACQUIRE_RESULT",
5442 "BC_FREE_BUFFER",
5443 "BC_INCREFS",
5444 "BC_ACQUIRE",
5445 "BC_RELEASE",
5446 "BC_DECREFS",
5447 "BC_INCREFS_DONE",
5448 "BC_ACQUIRE_DONE",
5449 "BC_ATTEMPT_ACQUIRE",
5450 "BC_REGISTER_LOOPER",
5451 "BC_ENTER_LOOPER",
5452 "BC_EXIT_LOOPER",
5453 "BC_REQUEST_DEATH_NOTIFICATION",
5454 "BC_CLEAR_DEATH_NOTIFICATION",
e884979e
MC
5455 "BC_DEAD_BINDER_DONE",
5456 "BC_TRANSACTION_SG",
5457 "BC_REPLY_SG",
355b0502
GKH
5458};
5459
167bccbd 5460static const char * const binder_objstat_strings[] = {
355b0502
GKH
5461 "proc",
5462 "thread",
5463 "node",
5464 "ref",
5465 "death",
5466 "transaction",
5467 "transaction_complete"
5468};
5469
5249f488 5470static void print_binder_stats(struct seq_file *m, const char *prefix,
7eac155d 5471 struct binder_stats *stats)
355b0502
GKH
5472{
5473 int i;
5474
5475 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 5476 ARRAY_SIZE(binder_command_strings));
355b0502 5477 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
340b5f86
BJS
5478 int temp = atomic_read(&stats->bc[i]);
5479
5480 if (temp)
5249f488 5481 seq_printf(m, "%s%s: %d\n", prefix,
340b5f86 5482 binder_command_strings[i], temp);
355b0502
GKH
5483 }
5484
5485 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 5486 ARRAY_SIZE(binder_return_strings));
355b0502 5487 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
340b5f86
BJS
5488 int temp = atomic_read(&stats->br[i]);
5489
5490 if (temp)
5249f488 5491 seq_printf(m, "%s%s: %d\n", prefix,
340b5f86 5492 binder_return_strings[i], temp);
355b0502
GKH
5493 }
5494
7eac155d 5495 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5496 ARRAY_SIZE(binder_objstat_strings));
7eac155d
DW
5497 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5498 ARRAY_SIZE(stats->obj_deleted));
5499 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
340b5f86
BJS
5500 int created = atomic_read(&stats->obj_created[i]);
5501 int deleted = atomic_read(&stats->obj_deleted[i]);
5502
5503 if (created || deleted)
5504 seq_printf(m, "%s%s: active %d total %d\n",
5505 prefix,
7eac155d 5506 binder_objstat_strings[i],
340b5f86
BJS
5507 created - deleted,
5508 created);
355b0502 5509 }
69c33bb1
TK
5510}
5511
5249f488
AH
5512static void print_binder_proc_stats(struct seq_file *m,
5513 struct binder_proc *proc)
355b0502
GKH
5514{
5515 struct binder_work *w;
02545935 5516 struct binder_thread *thread;
355b0502 5517 struct rb_node *n;
02545935 5518 int count, strong, weak, ready_threads;
aed01731
TK
5519 size_t free_async_space =
5520 binder_alloc_get_free_async_space(&proc->alloc);
355b0502 5521
5249f488 5522 seq_printf(m, "proc %d\n", proc->pid);
a64af2cf 5523 seq_printf(m, "context %s\n", proc->context->name);
355b0502 5524 count = 0;
02545935 5525 ready_threads = 0;
aed01731 5526 binder_inner_proc_lock(proc);
355b0502
GKH
5527 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5528 count++;
02545935
MC
5529
5530 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5531 ready_threads++;
5532
5249f488
AH
5533 seq_printf(m, " threads: %d\n", count);
5534 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
5535 " ready threads %d\n"
5536 " free async space %zd\n", proc->requested_threads,
5537 proc->requested_threads_started, proc->max_threads,
02545935 5538 ready_threads,
aed01731 5539 free_async_space);
355b0502
GKH
5540 count = 0;
5541 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5542 count++;
ceeaf28f 5543 binder_inner_proc_unlock(proc);
5249f488 5544 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
5545 count = 0;
5546 strong = 0;
5547 weak = 0;
5717118f 5548 binder_proc_lock(proc);
355b0502
GKH
5549 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5550 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5551 rb_node_desc);
5552 count++;
bc65c39a
TK
5553 strong += ref->data.strong;
5554 weak += ref->data.weak;
355b0502 5555 }
5717118f 5556 binder_proc_unlock(proc);
5249f488 5557 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 5558
69c33bb1 5559 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 5560 seq_printf(m, " buffers: %d\n", count);
355b0502
GKH
5561
5562 count = 0;
aad8732c 5563 binder_inner_proc_lock(proc);
355b0502 5564 list_for_each_entry(w, &proc->todo, entry) {
aad8732c 5565 if (w->type == BINDER_WORK_TRANSACTION)
355b0502 5566 count++;
355b0502 5567 }
aad8732c 5568 binder_inner_proc_unlock(proc);
5249f488 5569 seq_printf(m, " pending transactions: %d\n", count);
355b0502 5570
7eac155d 5571 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
5572}
5573
5574
5249f488 5575static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
5576{
5577 struct binder_proc *proc;
355b0502 5578 struct binder_node *node;
25de59a5 5579 struct binder_node *last_node = NULL;
355b0502 5580
7eac155d 5581 seq_puts(m, "binder state:\n");
355b0502 5582
76341f6f 5583 spin_lock(&binder_dead_nodes_lock);
7eac155d
DW
5584 if (!hlist_empty(&binder_dead_nodes))
5585 seq_puts(m, "dead nodes:\n");
25de59a5
TK
5586 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5587 /*
5588 * take a temporary reference on the node so it
5589 * survives and isn't removed from the list
5590 * while we print it.
5591 */
5592 node->tmp_refs++;
5593 spin_unlock(&binder_dead_nodes_lock);
5594 if (last_node)
5595 binder_put_node(last_node);
5596 binder_node_lock(node);
ceeaf28f 5597 print_binder_node_nilocked(m, node);
25de59a5
TK
5598 binder_node_unlock(node);
5599 last_node = node;
5600 spin_lock(&binder_dead_nodes_lock);
5601 }
76341f6f 5602 spin_unlock(&binder_dead_nodes_lock);
25de59a5
TK
5603 if (last_node)
5604 binder_put_node(last_node);
9f28e23f 5605
76341f6f 5606 mutex_lock(&binder_procs_lock);
7eac155d
DW
5607 hlist_for_each_entry(proc, &binder_procs, proc_node)
5608 print_binder_proc(m, proc, 1);
76341f6f 5609 mutex_unlock(&binder_procs_lock);
9f71b6a6 5610
5249f488 5611 return 0;
355b0502
GKH
5612}
5613
5249f488 5614static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
5615{
5616 struct binder_proc *proc;
355b0502 5617
5249f488 5618 seq_puts(m, "binder stats:\n");
355b0502 5619
7eac155d 5620 print_binder_stats(m, "", &binder_stats);
355b0502 5621
76341f6f 5622 mutex_lock(&binder_procs_lock);
7eac155d
DW
5623 hlist_for_each_entry(proc, &binder_procs, proc_node)
5624 print_binder_proc_stats(m, proc);
76341f6f 5625 mutex_unlock(&binder_procs_lock);
9f71b6a6 5626
5249f488 5627 return 0;
355b0502
GKH
5628}
5629
5249f488 5630static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
5631{
5632 struct binder_proc *proc;
355b0502 5633
7eac155d 5634 seq_puts(m, "binder transactions:\n");
76341f6f 5635 mutex_lock(&binder_procs_lock);
7eac155d
DW
5636 hlist_for_each_entry(proc, &binder_procs, proc_node)
5637 print_binder_proc(m, proc, 0);
76341f6f 5638 mutex_unlock(&binder_procs_lock);
9f71b6a6 5639
5249f488 5640 return 0;
355b0502
GKH
5641}
5642
5249f488 5643static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 5644{
1bfa8186 5645 struct binder_proc *itr;
a64af2cf 5646 int pid = (unsigned long)m->private;
355b0502 5647
76341f6f 5648 mutex_lock(&binder_procs_lock);
7eac155d
DW
5649 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5650 if (itr->pid == pid) {
5651 seq_puts(m, "binder proc state:\n");
5652 print_binder_proc(m, itr, 1);
1bfa8186
RA
5653 }
5654 }
76341f6f
TK
5655 mutex_unlock(&binder_procs_lock);
5656
5249f488 5657 return 0;
355b0502
GKH
5658}
5659
5249f488 5660static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
5661 struct binder_transaction_log_entry *e)
5662{
7d94b2c7
TK
5663 int debug_id = READ_ONCE(e->debug_id_done);
5664 /*
5665 * read barrier to guarantee debug_id_done read before
5666 * we print the log values
5667 */
5668 smp_rmb();
5249f488 5669 seq_printf(m,
7d94b2c7 5670 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
5671 e->debug_id, (e->call_type == 2) ? "reply" :
5672 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
a64af2cf 5673 e->from_thread, e->to_proc, e->to_thread, e->context_name,
dfc99565
TK
5674 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5675 e->return_error, e->return_error_param,
5676 e->return_error_line);
7d94b2c7
TK
5677 /*
5678 * read-barrier to guarantee read of debug_id_done after
5679 * done printing the fields of the entry
5680 */
5681 smp_rmb();
5682 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5683 "\n" : " (incomplete)\n");
355b0502
GKH
5684}
5685
7eac155d 5686static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 5687{
7eac155d 5688 struct binder_transaction_log *log = m->private;
7d94b2c7
TK
5689 unsigned int log_cur = atomic_read(&log->cur);
5690 unsigned int count;
5691 unsigned int cur;
355b0502 5692 int i;
7eac155d 5693
7d94b2c7
TK
5694 count = log_cur + 1;
5695 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5696 0 : count % ARRAY_SIZE(log->entry);
5697 if (count > ARRAY_SIZE(log->entry) || log->full)
5698 count = ARRAY_SIZE(log->entry);
5699 for (i = 0; i < count; i++) {
5700 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5701
5702 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 5703 }
5249f488 5704 return 0;
355b0502
GKH
5705}
5706
5707static const struct file_operations binder_fops = {
5708 .owner = THIS_MODULE,
5709 .poll = binder_poll,
5710 .unlocked_ioctl = binder_ioctl,
9c0a1d02 5711 .compat_ioctl = binder_ioctl,
355b0502
GKH
5712 .mmap = binder_mmap,
5713 .open = binder_open,
5714 .flush = binder_flush,
5715 .release = binder_release,
5716};
5717
5249f488
AH
5718BINDER_DEBUG_ENTRY(state);
5719BINDER_DEBUG_ENTRY(stats);
5720BINDER_DEBUG_ENTRY(transactions);
5721BINDER_DEBUG_ENTRY(transaction_log);
5722
bfd99b42
MC
5723static int __init init_binder_device(const char *name)
5724{
5725 int ret;
5726 struct binder_device *binder_device;
5727
5728 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5729 if (!binder_device)
5730 return -ENOMEM;
5731
5732 binder_device->miscdev.fops = &binder_fops;
5733 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5734 binder_device->miscdev.name = name;
5735
7eac155d
DW
5736 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5737 binder_device->context.name = name;
76341f6f 5738 mutex_init(&binder_device->context.context_mgr_node_lock);
bfd99b42
MC
5739
5740 ret = misc_register(&binder_device->miscdev);
5741 if (ret < 0) {
7eac155d
DW
5742 kfree(binder_device);
5743 return ret;
bfd99b42
MC
5744 }
5745
5746 hlist_add_head(&binder_device->hlist, &binder_devices);
5747
5748 return ret;
5749}
5750
355b0502
GKH
5751static int __init binder_init(void)
5752{
7eac155d 5753 int ret;
bfd99b42
MC
5754 char *device_name, *device_names;
5755 struct binder_device *device;
5756 struct hlist_node *tmp;
355b0502 5757
7d94b2c7
TK
5758 atomic_set(&binder_transaction_log.cur, ~0U);
5759 atomic_set(&binder_transaction_log_failed.cur, ~0U);
7eac155d
DW
5760 binder_deferred_workqueue = create_singlethread_workqueue("binder");
5761 if (!binder_deferred_workqueue)
3c762a49
AH
5762 return -ENOMEM;
5763
16b66554
AH
5764 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5765 if (binder_debugfs_dir_entry_root)
5766 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5767 binder_debugfs_dir_entry_root);
bfd99b42 5768
16b66554
AH
5769 if (binder_debugfs_dir_entry_root) {
5770 debugfs_create_file("state",
5771 S_IRUGO,
5772 binder_debugfs_dir_entry_root,
5773 NULL,
5774 &binder_state_fops);
5775 debugfs_create_file("stats",
5776 S_IRUGO,
5777 binder_debugfs_dir_entry_root,
5778 NULL,
5779 &binder_stats_fops);
5780 debugfs_create_file("transactions",
5781 S_IRUGO,
5782 binder_debugfs_dir_entry_root,
5783 NULL,
5784 &binder_transactions_fops);
5785 debugfs_create_file("transaction_log",
5786 S_IRUGO,
5787 binder_debugfs_dir_entry_root,
7eac155d 5788 &binder_transaction_log,
16b66554
AH
5789 &binder_transaction_log_fops);
5790 debugfs_create_file("failed_transaction_log",
5791 S_IRUGO,
5792 binder_debugfs_dir_entry_root,
7eac155d
DW
5793 &binder_transaction_log_failed,
5794 &binder_transaction_log_fops);
5795 }
5796
5797 /*
5798 * Copy the module_parameter string, because we don't want to
5799 * tokenize it in-place.
5800 */
5801 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5802 if (!device_names) {
5803 ret = -ENOMEM;
5804 goto err_alloc_device_names_failed;
5805 }
5806 strcpy(device_names, binder_devices_param);
5807
5808 while ((device_name = strsep(&device_names, ","))) {
5809 ret = init_binder_device(device_name);
5810 if (ret)
5811 goto err_init_binder_device_failed;
bfd99b42
MC
5812 }
5813
5814 return ret;
5815
5816err_init_binder_device_failed:
5817 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5818 misc_deregister(&device->miscdev);
5819 hlist_del(&device->hlist);
7eac155d 5820 kfree(device);
bfd99b42 5821 }
7eac155d
DW
5822err_alloc_device_names_failed:
5823 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5824
5825 destroy_workqueue(binder_deferred_workqueue);
bfd99b42 5826
355b0502
GKH
5827 return ret;
5828}
5829
5830device_initcall(binder_init);
5831
975a1ac9
AH
5832#define CREATE_TRACE_POINTS
5833#include "binder_trace.h"
5834
355b0502 5835MODULE_LICENSE("GPL v2");