source: G950FXXS5DSI1
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / android / binder.c
CommitLineData
355b0502
GKH
1/* binder.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2008 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
b0f59d6d
TK
18/*
19 * Locking overview
20 *
21 * There are 3 main spinlocks which must be acquired in the
22 * order shown:
23 *
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
26 * used to acq/rel.
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
29 * used to acq/rel
30 * 3) proc->inner_lock : protects the thread and node lists
c9cd6356
MC
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
b0f59d6d
TK
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
36 * are used to acq/rel
37 *
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
40 *
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
43 *
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
49 * ...
50 */
51
56b468fc
AS
52#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
53
355b0502
GKH
54#include <asm/cacheflush.h>
55#include <linux/fdtable.h>
56#include <linux/file.h>
e2610b26 57#include <linux/freezer.h>
355b0502
GKH
58#include <linux/fs.h>
59#include <linux/list.h>
60#include <linux/miscdevice.h>
355b0502
GKH
61#include <linux/module.h>
62#include <linux/mutex.h>
63#include <linux/nsproxy.h>
64#include <linux/poll.h>
16b66554 65#include <linux/debugfs.h>
355b0502
GKH
66#include <linux/rbtree.h>
67#include <linux/sched.h>
5249f488 68#include <linux/seq_file.h>
355b0502 69#include <linux/uaccess.h>
17cf22c3 70#include <linux/pid_namespace.h>
79af7307 71#include <linux/security.h>
b0f59d6d 72#include <linux/spinlock.h>
355b0502 73
9246a4a9 74#include <uapi/linux/android/binder.h>
2324f70c 75#include "binder_alloc.h"
975a1ac9 76#include "binder_trace.h"
1cac41cb
MB
77#ifdef CONFIG_SAMSUNG_FREECESS
78#include <linux/freecess.h>
79#endif
355b0502 80
3490fdcb 81static HLIST_HEAD(binder_deferred_list);
ec49bb00 82static DEFINE_MUTEX(binder_deferred_lock);
ec49bb00 83
04e3812e 84static HLIST_HEAD(binder_devices);
ec49bb00 85static HLIST_HEAD(binder_procs);
3490fdcb
TK
86static DEFINE_MUTEX(binder_procs_lock);
87
ec49bb00 88static HLIST_HEAD(binder_dead_nodes);
3490fdcb 89static DEFINE_SPINLOCK(binder_dead_nodes_lock);
355b0502 90
16b66554
AH
91static struct dentry *binder_debugfs_dir_entry_root;
92static struct dentry *binder_debugfs_dir_entry_proc;
be4dde1f 93static atomic_t binder_last_id;
355b0502 94
5249f488
AH
95#define BINDER_DEBUG_ENTRY(name) \
96static int binder_##name##_open(struct inode *inode, struct file *file) \
97{ \
16b66554 98 return single_open(file, binder_##name##_show, inode->i_private); \
5249f488
AH
99} \
100\
101static const struct file_operations binder_##name##_fops = { \
102 .owner = THIS_MODULE, \
103 .open = binder_##name##_open, \
104 .read = seq_read, \
105 .llseek = seq_lseek, \
106 .release = single_release, \
107}
108
109static int binder_proc_show(struct seq_file *m, void *unused);
110BINDER_DEBUG_ENTRY(proc);
355b0502
GKH
111
112/* This is only defined in include/asm-arm/sizes.h */
113#ifndef SZ_1K
114#define SZ_1K 0x400
115#endif
116
117#ifndef SZ_4M
118#define SZ_4M 0x400000
119#endif
120
121#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
122
123#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
124
125enum {
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
467545d8 139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
b0f59d6d 140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
355b0502
GKH
141};
142static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
5a068558 144module_param_named(debug_mask, binder_debug_mask, uint, 0644);
355b0502 145
04e3812e
MC
146static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147module_param_named(devices, binder_devices_param, charp, S_IRUGO);
148
355b0502
GKH
149static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150static int binder_stop_on_user_error;
151
152static int binder_set_stop_on_user_error(const char *val,
153 struct kernel_param *kp)
154{
155 int ret;
10f62861 156
355b0502
GKH
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
160 return ret;
161}
162module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
5a068558 163 param_get_int, &binder_stop_on_user_error, 0644);
355b0502
GKH
164
165#define binder_debug(mask, x...) \
166 do { \
167 if (binder_debug_mask & mask) \
258767fe 168 pr_info(x); \
355b0502
GKH
169 } while (0)
170
171#define binder_user_error(x...) \
172 do { \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
258767fe 174 pr_info(x); \
355b0502
GKH
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
177 } while (0)
178
ce0c6598
MC
179#define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
181
182#define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
183
dd9bc4f9
MC
184#define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
186
e124de38
MC
187#define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
189
355b0502
GKH
190enum binder_stat_types {
191 BINDER_STAT_PROC,
192 BINDER_STAT_THREAD,
193 BINDER_STAT_NODE,
194 BINDER_STAT_REF,
195 BINDER_STAT_DEATH,
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
198 BINDER_STAT_COUNT
199};
200
201struct binder_stats {
f716ecfc
BJS
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
355b0502
GKH
206};
207
ec49bb00 208static struct binder_stats binder_stats;
355b0502
GKH
209
210static inline void binder_stats_deleted(enum binder_stat_types type)
211{
f716ecfc 212 atomic_inc(&binder_stats.obj_deleted[type]);
355b0502
GKH
213}
214
215static inline void binder_stats_created(enum binder_stat_types type)
216{
f716ecfc 217 atomic_inc(&binder_stats.obj_created[type]);
355b0502
GKH
218}
219
220struct binder_transaction_log_entry {
221 int debug_id;
0f32aeb3 222 int debug_id_done;
355b0502
GKH
223 int call_type;
224 int from_proc;
225 int from_thread;
226 int target_handle;
227 int to_proc;
228 int to_thread;
229 int to_node;
230 int data_size;
231 int offsets_size;
0a0fdc1f
TK
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
8b980bee 235 const char *context_name;
355b0502
GKH
236};
237struct binder_transaction_log {
0f32aeb3
TK
238 atomic_t cur;
239 bool full;
355b0502
GKH
240 struct binder_transaction_log_entry entry[32];
241};
ec49bb00
TK
242static struct binder_transaction_log binder_transaction_log;
243static struct binder_transaction_log binder_transaction_log_failed;
355b0502
GKH
244
245static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
247{
248 struct binder_transaction_log_entry *e;
0f32aeb3 249 unsigned int cur = atomic_inc_return(&log->cur);
10f62861 250
0f32aeb3 251 if (cur >= ARRAY_SIZE(log->entry))
5a068558 252 log->full = true;
0f32aeb3
TK
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
255 /*
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
259 */
260 smp_wmb();
261 memset(e, 0, sizeof(*e));
355b0502
GKH
262 return e;
263}
264
803df563
MC
265struct binder_context {
266 struct binder_node *binder_context_mgr_node;
3490fdcb
TK
267 struct mutex context_mgr_node_lock;
268
803df563 269 kuid_t binder_context_mgr_uid;
8b980bee 270 const char *name;
803df563
MC
271};
272
04e3812e
MC
273struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
803df563
MC
277};
278
57628830
TK
279/**
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
283 *
284 * There are separate work lists for proc, thread, and node (async).
285 */
355b0502
GKH
286struct binder_work {
287 struct list_head entry;
57628830 288
355b0502
GKH
289 enum {
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
3a822b33 292 BINDER_WORK_RETURN_ERROR,
355b0502
GKH
293 BINDER_WORK_NODE,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
297 } type;
298};
299
3a822b33
TK
300struct binder_error {
301 struct binder_work work;
302 uint32_t cmd;
303};
304
b0f59d6d
TK
305/**
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
57628830 311 * (protected by @proc->inner_lock)
b0f59d6d 312 * @rb_node: element for proc->nodes tree
46655970 313 * (protected by @proc->inner_lock)
b0f59d6d
TK
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
14c312e9 319 * (protected by @lock)
b0f59d6d
TK
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
f73f378b
TK
322 * (protected by @proc->inner_lock if @proc
323 * and by @lock)
b0f59d6d 324 * @local_weak_refs: weak user refs from local process
f73f378b
TK
325 * (protected by @proc->inner_lock if @proc
326 * and by @lock)
b0f59d6d 327 * @local_strong_refs: strong user refs from local process
f73f378b
TK
328 * (protected by @proc->inner_lock if @proc
329 * and by @lock)
b0f59d6d 330 * @tmp_refs: temporary kernel refs
f73f378b
TK
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
b0f59d6d
TK
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
f73f378b
TK
341 * (protected by @proc->inner_lock if @proc
342 * and by @lock)
b0f59d6d 343 * @pending_strong_ref: userspace has acked notification of strong ref
f73f378b
TK
344 * (protected by @proc->inner_lock if @proc
345 * and by @lock)
b0f59d6d 346 * @has_weak_ref: userspace notified of weak ref
f73f378b
TK
347 * (protected by @proc->inner_lock if @proc
348 * and by @lock)
b0f59d6d 349 * @pending_weak_ref: userspace has acked notification of weak ref
f73f378b
TK
350 * (protected by @proc->inner_lock if @proc
351 * and by @lock)
b0f59d6d 352 * @has_async_transaction: async transaction to node in progress
14c312e9 353 * (protected by @lock)
adb68543
MC
354 * @sched_policy: minimum scheduling policy for node
355 * (invariant after initialized)
b0f59d6d
TK
356 * @accept_fds: file descriptor operations supported for node
357 * (invariant after initialized)
358 * @min_priority: minimum scheduling priority
359 * (invariant after initialized)
1cac41cb
MB
360 * @txn_security_ctx: require sender's security context
361 (invariant after initialized)
362
39140a0f
MC
363 * @inherit_rt: inherit RT scheduling policy from caller
364 * (invariant after initialized)
b0f59d6d 365 * @async_todo: list of async work items
57628830 366 * (protected by @proc->inner_lock)
b0f59d6d
TK
367 *
368 * Bookkeeping structure for binder nodes.
369 */
355b0502
GKH
370struct binder_node {
371 int debug_id;
b0f59d6d 372 spinlock_t lock;
355b0502
GKH
373 struct binder_work work;
374 union {
375 struct rb_node rb_node;
376 struct hlist_node dead_node;
377 };
378 struct binder_proc *proc;
379 struct hlist_head refs;
380 int internal_strong_refs;
381 int local_weak_refs;
382 int local_strong_refs;
96dd75d9 383 int tmp_refs;
da49889d
AH
384 binder_uintptr_t ptr;
385 binder_uintptr_t cookie;
f73f378b
TK
386 struct {
387 /*
388 * bitfield elements protected by
389 * proc inner_lock
390 */
391 u8 has_strong_ref:1;
392 u8 pending_strong_ref:1;
393 u8 has_weak_ref:1;
394 u8 pending_weak_ref:1;
395 };
396 struct {
397 /*
398 * invariant after initialization
399 */
adb68543 400 u8 sched_policy:2;
39140a0f 401 u8 inherit_rt:1;
f73f378b 402 u8 accept_fds:1;
1cac41cb 403 u8 txn_security_ctx:1;
f73f378b
TK
404 u8 min_priority;
405 };
406 bool has_async_transaction;
355b0502
GKH
407 struct list_head async_todo;
408};
409
410struct binder_ref_death {
57628830
TK
411 /**
412 * @work: worklist element for death notifications
413 * (protected by inner_lock of the proc that
414 * this ref belongs to)
415 */
355b0502 416 struct binder_work work;
da49889d 417 binder_uintptr_t cookie;
355b0502
GKH
418};
419
f7d87412
TK
420/**
421 * struct binder_ref_data - binder_ref counts and id
422 * @debug_id: unique ID for the ref
423 * @desc: unique userspace handle for ref
424 * @strong: strong ref count (debugging only if not locked)
425 * @weak: weak ref count (debugging only if not locked)
426 *
427 * Structure to hold ref count and ref id information. Since
428 * the actual ref can only be accessed with a lock, this structure
429 * is used to return information about the ref to callers of
430 * ref inc/dec functions.
431 */
432struct binder_ref_data {
433 int debug_id;
434 uint32_t desc;
435 int strong;
436 int weak;
437};
438
439/**
440 * struct binder_ref - struct to track references on nodes
441 * @data: binder_ref_data containing id, handle, and current refcounts
442 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
443 * @rb_node_node: node for lookup by @node in proc's rb_tree
444 * @node_entry: list entry for node->refs list in target node
14c312e9 445 * (protected by @node->lock)
f7d87412
TK
446 * @proc: binder_proc containing ref
447 * @node: binder_node of target node. When cleaning up a
448 * ref for deletion in binder_cleanup_ref, a non-NULL
449 * @node indicates the node must be freed
450 * @death: pointer to death notification (ref_death) if requested
6c8ad5b3 451 * (protected by @node->lock)
f7d87412
TK
452 *
453 * Structure to track references from procA to target node (on procB). This
454 * structure is unsafe to access without holding @proc->outer_lock.
455 */
355b0502
GKH
456struct binder_ref {
457 /* Lookups needed: */
458 /* node + proc => ref (transaction) */
459 /* desc + proc => ref (transaction, inc/dec ref) */
460 /* node => refs + procs (proc exit) */
f7d87412 461 struct binder_ref_data data;
355b0502
GKH
462 struct rb_node rb_node_desc;
463 struct rb_node rb_node_node;
464 struct hlist_node node_entry;
465 struct binder_proc *proc;
466 struct binder_node *node;
355b0502
GKH
467 struct binder_ref_death *death;
468};
469
355b0502 470enum binder_deferred_state {
5a068558
MB
471 BINDER_DEFERRED_PUT_FILES = 0x01,
472 BINDER_DEFERRED_FLUSH = 0x02,
473 BINDER_DEFERRED_RELEASE = 0x04,
355b0502
GKH
474};
475
d30e6a87
MC
476/**
477 * struct binder_priority - scheduler policy and priority
478 * @sched_policy scheduler policy
479 * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
480 *
481 * The binder driver supports inheriting the following scheduler policies:
482 * SCHED_NORMAL
483 * SCHED_BATCH
484 * SCHED_FIFO
485 * SCHED_RR
486 */
487struct binder_priority {
488 unsigned int sched_policy;
489 int prio;
490};
491
b0f59d6d
TK
492/**
493 * struct binder_proc - binder process bookkeeping
494 * @proc_node: element for binder_procs list
495 * @threads: rbtree of binder_threads in this proc
e4951233 496 * (protected by @inner_lock)
b0f59d6d
TK
497 * @nodes: rbtree of binder nodes associated with
498 * this proc ordered by node->ptr
46655970 499 * (protected by @inner_lock)
b0f59d6d 500 * @refs_by_desc: rbtree of refs ordered by ref->desc
6fcb2b9a 501 * (protected by @outer_lock)
b0f59d6d 502 * @refs_by_node: rbtree of refs ordered by ref->node
6fcb2b9a 503 * (protected by @outer_lock)
c9cd6356
MC
504 * @waiting_threads: threads currently waiting for proc work
505 * (protected by @inner_lock)
b0f59d6d
TK
506 * @pid PID of group_leader of process
507 * (invariant after initialized)
508 * @tsk task_struct for group_leader of process
509 * (invariant after initialized)
5a068558
MB
510 * @files files_struct for process
511 * (protected by @files_lock)
512 * @files_lock mutex to protect @files
b0f59d6d
TK
513 * @deferred_work_node: element for binder_deferred_list
514 * (protected by binder_deferred_lock)
515 * @deferred_work: bitmap of deferred work to perform
516 * (protected by binder_deferred_lock)
517 * @is_dead: process is dead and awaiting free
518 * when outstanding transactions are cleaned up
e4951233 519 * (protected by @inner_lock)
b0f59d6d 520 * @todo: list of work for this process
57628830 521 * (protected by @inner_lock)
b0f59d6d
TK
522 * @stats: per-process binder statistics
523 * (atomics, no lock needed)
524 * @delivered_death: list of delivered death notification
57628830 525 * (protected by @inner_lock)
b0f59d6d 526 * @max_threads: cap on number of binder threads
814ce251 527 * (protected by @inner_lock)
b0f59d6d
TK
528 * @requested_threads: number of binder threads requested but not
529 * yet started. In current implementation, can
530 * only be 0 or 1.
814ce251 531 * (protected by @inner_lock)
b0f59d6d 532 * @requested_threads_started: number binder threads started
814ce251 533 * (protected by @inner_lock)
b0f59d6d 534 * @tmp_ref: temporary reference to indicate proc is in use
e4951233 535 * (protected by @inner_lock)
b0f59d6d
TK
536 * @default_priority: default scheduler priority
537 * (invariant after initialized)
538 * @debugfs_entry: debugfs node
539 * @alloc: binder allocator bookkeeping
540 * @context: binder_context for this proc
541 * (invariant after initialized)
542 * @inner_lock: can nest under outer_lock and/or node lock
543 * @outer_lock: no nesting under innor or node lock
544 * Lock order: 1) outer, 2) node, 3) inner
545 *
546 * Bookkeeping structure for binder processes
547 */
355b0502
GKH
548struct binder_proc {
549 struct hlist_node proc_node;
550 struct rb_root threads;
551 struct rb_root nodes;
552 struct rb_root refs_by_desc;
553 struct rb_root refs_by_node;
c9cd6356 554 struct list_head waiting_threads;
355b0502 555 int pid;
355b0502 556 struct task_struct *tsk;
5a068558
MB
557 struct files_struct *files;
558 struct mutex files_lock;
355b0502
GKH
559 struct hlist_node deferred_work_node;
560 int deferred_work;
e482ec39 561 bool is_dead;
355b0502 562
355b0502 563 struct list_head todo;
355b0502
GKH
564 struct binder_stats stats;
565 struct list_head delivered_death;
566 int max_threads;
567 int requested_threads;
568 int requested_threads_started;
e482ec39 569 int tmp_ref;
d30e6a87 570 struct binder_priority default_priority;
16b66554 571 struct dentry *debugfs_entry;
19a3948b 572 struct binder_alloc alloc;
803df563 573 struct binder_context *context;
b0f59d6d
TK
574 spinlock_t inner_lock;
575 spinlock_t outer_lock;
355b0502
GKH
576};
577
578enum {
579 BINDER_LOOPER_STATE_REGISTERED = 0x01,
580 BINDER_LOOPER_STATE_ENTERED = 0x02,
581 BINDER_LOOPER_STATE_EXITED = 0x04,
582 BINDER_LOOPER_STATE_INVALID = 0x08,
583 BINDER_LOOPER_STATE_WAITING = 0x10,
c9cd6356 584 BINDER_LOOPER_STATE_POLL = 0x20,
355b0502
GKH
585};
586
b0f59d6d
TK
587/**
588 * struct binder_thread - binder thread bookkeeping
589 * @proc: binder process for this thread
590 * (invariant after initialization)
591 * @rb_node: element for proc->threads rbtree
e4951233 592 * (protected by @proc->inner_lock)
c9cd6356
MC
593 * @waiting_thread_node: element for @proc->waiting_threads list
594 * (protected by @proc->inner_lock)
b0f59d6d
TK
595 * @pid: PID for this thread
596 * (invariant after initialization)
597 * @looper: bitmap of looping state
598 * (only accessed by this thread)
599 * @looper_needs_return: looping thread needs to exit driver
600 * (no lock needed)
601 * @transaction_stack: stack of in-progress transactions for this thread
89b657e0 602 * (protected by @proc->inner_lock)
b0f59d6d 603 * @todo: list of work to do for this thread
57628830 604 * (protected by @proc->inner_lock)
95317055
MC
605 * @process_todo: whether work in @todo should be processed
606 * (protected by @proc->inner_lock)
b0f59d6d
TK
607 * @return_error: transaction errors reported by this thread
608 * (only accessed by this thread)
609 * @reply_error: transaction errors reported by target thread
89b657e0 610 * (protected by @proc->inner_lock)
b0f59d6d
TK
611 * @wait: wait queue for thread work
612 * @stats: per-thread statistics
613 * (atomics, no lock needed)
614 * @tmp_ref: temporary reference to indicate thread is in use
615 * (atomic since @proc->inner_lock cannot
616 * always be acquired)
617 * @is_dead: thread is dead and awaiting free
618 * when outstanding transactions are cleaned up
e4951233 619 * (protected by @proc->inner_lock)
7230f991 620 * @task: struct task_struct for this thread
b0f59d6d
TK
621 *
622 * Bookkeeping structure for binder threads.
623 */
355b0502
GKH
624struct binder_thread {
625 struct binder_proc *proc;
626 struct rb_node rb_node;
c9cd6356 627 struct list_head waiting_thread_node;
355b0502 628 int pid;
afda44d0
TK
629 int looper; /* only modified by this thread */
630 bool looper_need_return; /* can be written by other thread */
355b0502
GKH
631 struct binder_transaction *transaction_stack;
632 struct list_head todo;
95317055 633 bool process_todo;
3a822b33
TK
634 struct binder_error return_error;
635 struct binder_error reply_error;
355b0502
GKH
636 wait_queue_head_t wait;
637 struct binder_stats stats;
e482ec39
TK
638 atomic_t tmp_ref;
639 bool is_dead;
7230f991 640 struct task_struct *task;
355b0502
GKH
641};
642
643struct binder_transaction {
644 int debug_id;
645 struct binder_work work;
646 struct binder_thread *from;
647 struct binder_transaction *from_parent;
648 struct binder_proc *to_proc;
649 struct binder_thread *to_thread;
650 struct binder_transaction *to_parent;
651 unsigned need_reply:1;
652 /* unsigned is_dead:1; */ /* not used at the moment */
653
654 struct binder_buffer *buffer;
655 unsigned int code;
656 unsigned int flags;
d30e6a87
MC
657 struct binder_priority priority;
658 struct binder_priority saved_priority;
7230f991 659 bool set_priority_called;
4a2ebb93 660 kuid_t sender_euid;
1cac41cb 661 binder_uintptr_t security_ctx;
e482ec39
TK
662 /**
663 * @lock: protects @from, @to_proc, and @to_thread
664 *
665 * @from, @to_proc, and @to_thread can be set to NULL
666 * during thread teardown
667 */
668 spinlock_t lock;
355b0502
GKH
669};
670
b0f59d6d
TK
671/**
672 * binder_proc_lock() - Acquire outer lock for given binder_proc
673 * @proc: struct binder_proc to acquire
674 *
675 * Acquires proc->outer_lock. Used to protect binder_ref
676 * structures associated with the given proc.
677 */
678#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
679static void
680_binder_proc_lock(struct binder_proc *proc, int line)
681{
682 binder_debug(BINDER_DEBUG_SPINLOCKS,
683 "%s: line=%d\n", __func__, line);
684 spin_lock(&proc->outer_lock);
685}
686
687/**
688 * binder_proc_unlock() - Release spinlock for given binder_proc
689 * @proc: struct binder_proc to acquire
690 *
691 * Release lock acquired via binder_proc_lock()
692 */
693#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
694static void
695_binder_proc_unlock(struct binder_proc *proc, int line)
696{
697 binder_debug(BINDER_DEBUG_SPINLOCKS,
698 "%s: line=%d\n", __func__, line);
699 spin_unlock(&proc->outer_lock);
700}
701
702/**
703 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
704 * @proc: struct binder_proc to acquire
705 *
706 * Acquires proc->inner_lock. Used to protect todo lists
707 */
708#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
709static void
710_binder_inner_proc_lock(struct binder_proc *proc, int line)
711{
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_lock(&proc->inner_lock);
715}
716
717/**
718 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
719 * @proc: struct binder_proc to acquire
720 *
721 * Release lock acquired via binder_inner_proc_lock()
722 */
723#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
724static void
725_binder_inner_proc_unlock(struct binder_proc *proc, int line)
726{
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_unlock(&proc->inner_lock);
730}
731
732/**
733 * binder_node_lock() - Acquire spinlock for given binder_node
734 * @node: struct binder_node to acquire
735 *
736 * Acquires node->lock. Used to protect binder_node fields
737 */
738#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
739static void
740_binder_node_lock(struct binder_node *node, int line)
741{
742 binder_debug(BINDER_DEBUG_SPINLOCKS,
743 "%s: line=%d\n", __func__, line);
744 spin_lock(&node->lock);
745}
746
747/**
748 * binder_node_unlock() - Release spinlock for given binder_proc
749 * @node: struct binder_node to acquire
750 *
751 * Release lock acquired via binder_node_lock()
752 */
753#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
754static void
755_binder_node_unlock(struct binder_node *node, int line)
756{
757 binder_debug(BINDER_DEBUG_SPINLOCKS,
758 "%s: line=%d\n", __func__, line);
759 spin_unlock(&node->lock);
760}
761
14c312e9
TK
762/**
763 * binder_node_inner_lock() - Acquire node and inner locks
764 * @node: struct binder_node to acquire
765 *
766 * Acquires node->lock. If node->proc also acquires
767 * proc->inner_lock. Used to protect binder_node fields
768 */
769#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
770static void
771_binder_node_inner_lock(struct binder_node *node, int line)
772{
773 binder_debug(BINDER_DEBUG_SPINLOCKS,
774 "%s: line=%d\n", __func__, line);
775 spin_lock(&node->lock);
776 if (node->proc)
777 binder_inner_proc_lock(node->proc);
778}
779
780/**
781 * binder_node_unlock() - Release node and inner locks
782 * @node: struct binder_node to acquire
783 *
784 * Release lock acquired via binder_node_lock()
785 */
786#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
787static void
788_binder_node_inner_unlock(struct binder_node *node, int line)
789{
790 struct binder_proc *proc = node->proc;
791
792 binder_debug(BINDER_DEBUG_SPINLOCKS,
793 "%s: line=%d\n", __func__, line);
794 if (proc)
795 binder_inner_proc_unlock(proc);
796 spin_unlock(&node->lock);
797}
798
57628830
TK
799static bool binder_worklist_empty_ilocked(struct list_head *list)
800{
801 return list_empty(list);
802}
803
804/**
805 * binder_worklist_empty() - Check if no items on the work list
806 * @proc: binder_proc associated with list
807 * @list: list to check
808 *
809 * Return: true if there are no items on list, else false
810 */
811static bool binder_worklist_empty(struct binder_proc *proc,
812 struct list_head *list)
813{
814 bool ret;
815
816 binder_inner_proc_lock(proc);
817 ret = binder_worklist_empty_ilocked(list);
818 binder_inner_proc_unlock(proc);
819 return ret;
820}
821
95317055
MC
822/**
823 * binder_enqueue_work_ilocked() - Add an item to the work list
824 * @work: struct binder_work to add to list
825 * @target_list: list to add work to
826 *
827 * Adds the work to the specified list. Asserts that work
828 * is not already on a list.
829 *
830 * Requires the proc->inner_lock to be held.
831 */
57628830
TK
832static void
833binder_enqueue_work_ilocked(struct binder_work *work,
834 struct list_head *target_list)
835{
836 BUG_ON(target_list == NULL);
837 BUG_ON(work->entry.next && !list_empty(&work->entry));
838 list_add_tail(&work->entry, target_list);
839}
840
841/**
6b6637fd 842 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
95317055 843 * @thread: thread to queue work to
57628830 844 * @work: struct binder_work to add to list
57628830 845 *
95317055
MC
846 * Adds the work to the todo list of the thread. Doesn't set the process_todo
847 * flag, which means that (if it wasn't already set) the thread will go to
848 * sleep without handling this work when it calls read.
849 *
850 * Requires the proc->inner_lock to be held.
57628830
TK
851 */
852static void
6b6637fd
MC
853binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
854 struct binder_work *work)
57628830 855{
95317055
MC
856 binder_enqueue_work_ilocked(work, &thread->todo);
857}
858
859/**
860 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
861 * @thread: thread to queue work to
862 * @work: struct binder_work to add to list
863 *
864 * Adds the work to the todo list of the thread, and enables processing
865 * of the todo queue.
866 *
867 * Requires the proc->inner_lock to be held.
868 */
869static void
870binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
871 struct binder_work *work)
872{
873 binder_enqueue_work_ilocked(work, &thread->todo);
874 thread->process_todo = true;
875}
876
877/**
878 * binder_enqueue_thread_work() - Add an item to the thread work list
879 * @thread: thread to queue work to
880 * @work: struct binder_work to add to list
881 *
882 * Adds the work to the todo list of the thread, and enables processing
883 * of the todo queue.
884 */
885static void
886binder_enqueue_thread_work(struct binder_thread *thread,
887 struct binder_work *work)
888{
889 binder_inner_proc_lock(thread->proc);
890 binder_enqueue_thread_work_ilocked(thread, work);
891 binder_inner_proc_unlock(thread->proc);
57628830
TK
892}
893
894static void
895binder_dequeue_work_ilocked(struct binder_work *work)
896{
897 list_del_init(&work->entry);
898}
899
900/**
901 * binder_dequeue_work() - Removes an item from the work list
902 * @proc: binder_proc associated with list
903 * @work: struct binder_work to remove from list
904 *
905 * Removes the specified work item from whatever list it is on.
906 * Can safely be called if work is not on any list.
907 */
908static void
909binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
910{
911 binder_inner_proc_lock(proc);
912 binder_dequeue_work_ilocked(work);
913 binder_inner_proc_unlock(proc);
914}
915
916static struct binder_work *binder_dequeue_work_head_ilocked(
917 struct list_head *list)
918{
919 struct binder_work *w;
920
921 w = list_first_entry_or_null(list, struct binder_work, entry);
922 if (w)
923 list_del_init(&w->entry);
924 return w;
925}
926
927/**
928 * binder_dequeue_work_head() - Dequeues the item at head of list
929 * @proc: binder_proc associated with list
930 * @list: list to dequeue head
931 *
932 * Removes the head of the list if there are items on the list
933 *
934 * Return: pointer dequeued binder_work, NULL if list was empty
935 */
936static struct binder_work *binder_dequeue_work_head(
937 struct binder_proc *proc,
938 struct list_head *list)
939{
940 struct binder_work *w;
941
942 binder_inner_proc_lock(proc);
943 w = binder_dequeue_work_head_ilocked(list);
944 binder_inner_proc_unlock(proc);
945 return w;
946}
947
355b0502
GKH
948static void
949binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
e482ec39
TK
950static void binder_free_thread(struct binder_thread *thread);
951static void binder_free_proc(struct binder_proc *proc);
46655970 952static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
355b0502 953
efde99cd 954static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
355b0502 955{
355b0502
GKH
956 unsigned long rlim_cur;
957 unsigned long irqs;
19ef30ef 958 int ret;
355b0502 959
5a068558
MB
960 mutex_lock(&proc->files_lock);
961 if (proc->files == NULL) {
962 ret = -ESRCH;
963 goto err;
964 }
19ef30ef
TK
965 if (!lock_task_sighand(proc->tsk, &irqs)) {
966 ret = -EMFILE;
967 goto err;
968 }
dcfadfa4
AV
969 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
970 unlock_task_sighand(proc->tsk, &irqs);
355b0502 971
5a068558 972 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
19ef30ef 973err:
5a068558 974 mutex_unlock(&proc->files_lock);
19ef30ef 975 return ret;
355b0502
GKH
976}
977
978/*
979 * copied from fd_install
980 */
981static void task_fd_install(
982 struct binder_proc *proc, unsigned int fd, struct file *file)
983{
5a068558
MB
984 mutex_lock(&proc->files_lock);
985 if (proc->files)
986 __fd_install(proc->files, fd, file);
987 mutex_unlock(&proc->files_lock);
355b0502
GKH
988}
989
990/*
991 * copied from sys_close
992 */
993static long task_close_fd(struct binder_proc *proc, unsigned int fd)
994{
355b0502
GKH
995 int retval;
996
5a068558
MB
997 mutex_lock(&proc->files_lock);
998 if (proc->files == NULL) {
999 retval = -ESRCH;
1000 goto err;
1001 }
1002 retval = __close_fd(proc->files, fd);
355b0502
GKH
1003 /* can't restart close syscall because file table entry was cleared */
1004 if (unlikely(retval == -ERESTARTSYS ||
1005 retval == -ERESTARTNOINTR ||
1006 retval == -ERESTARTNOHAND ||
1007 retval == -ERESTART_RESTARTBLOCK))
1008 retval = -EINTR;
5a068558
MB
1009err:
1010 mutex_unlock(&proc->files_lock);
355b0502 1011 return retval;
355b0502
GKH
1012}
1013
c9cd6356
MC
1014static bool binder_has_work_ilocked(struct binder_thread *thread,
1015 bool do_proc_work)
1016{
95317055 1017 return thread->process_todo ||
c9cd6356
MC
1018 thread->looper_need_return ||
1019 (do_proc_work &&
1020 !binder_worklist_empty_ilocked(&thread->proc->todo));
1021}
1022
1023static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
1024{
1025 bool has_work;
1026
1027 binder_inner_proc_lock(thread->proc);
1028 has_work = binder_has_work_ilocked(thread, do_proc_work);
1029 binder_inner_proc_unlock(thread->proc);
1030
1031 return has_work;
1032}
1033
1034static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
1035{
1036 return !thread->transaction_stack &&
1037 binder_worklist_empty_ilocked(&thread->todo) &&
1038 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
1039 BINDER_LOOPER_STATE_REGISTERED));
1040}
1041
1042static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
1043 bool sync)
1044{
1045 struct rb_node *n;
1046 struct binder_thread *thread;
1047
1048 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
1049 thread = rb_entry(n, struct binder_thread, rb_node);
1050 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
1051 binder_available_for_proc_work_ilocked(thread)) {
1052 if (sync)
1053 wake_up_interruptible_sync(&thread->wait);
1054 else
1055 wake_up_interruptible(&thread->wait);
1056 }
1057 }
1058}
1059
5347bf52
MC
1060/**
1061 * binder_select_thread_ilocked() - selects a thread for doing proc work.
1062 * @proc: process to select a thread from
1063 *
1064 * Note that calling this function moves the thread off the waiting_threads
1065 * list, so it can only be woken up by the caller of this function, or a
1066 * signal. Therefore, callers *should* always wake up the thread this function
1067 * returns.
1068 *
1069 * Return: If there's a thread currently waiting for process work,
1070 * returns that thread. Otherwise returns NULL.
1071 */
1072static struct binder_thread *
1073binder_select_thread_ilocked(struct binder_proc *proc)
c9cd6356
MC
1074{
1075 struct binder_thread *thread;
1076
7eeebce6 1077 assert_spin_locked(&proc->inner_lock);
c9cd6356
MC
1078 thread = list_first_entry_or_null(&proc->waiting_threads,
1079 struct binder_thread,
1080 waiting_thread_node);
1081
5347bf52 1082 if (thread)
c9cd6356 1083 list_del_init(&thread->waiting_thread_node);
5347bf52
MC
1084
1085 return thread;
1086}
1087
1088/**
1089 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1090 * @proc: process to wake up a thread in
1091 * @thread: specific thread to wake-up (may be NULL)
1092 * @sync: whether to do a synchronous wake-up
1093 *
1094 * This function wakes up a thread in the @proc process.
1095 * The caller may provide a specific thread to wake-up in
1096 * the @thread parameter. If @thread is NULL, this function
1097 * will wake up threads that have called poll().
1098 *
1099 * Note that for this function to work as expected, callers
1100 * should first call binder_select_thread() to find a thread
1101 * to handle the work (if they don't have a thread already),
1102 * and pass the result into the @thread parameter.
1103 */
1104static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1105 struct binder_thread *thread,
1106 bool sync)
1107{
7eeebce6 1108 assert_spin_locked(&proc->inner_lock);
5347bf52
MC
1109
1110 if (thread) {
c9cd6356
MC
1111 if (sync)
1112 wake_up_interruptible_sync(&thread->wait);
1113 else
1114 wake_up_interruptible(&thread->wait);
1115 return;
1116 }
1117
1118 /* Didn't find a thread waiting for proc work; this can happen
1119 * in two scenarios:
1120 * 1. All threads are busy handling transactions
1121 * In that case, one of those threads should call back into
1122 * the kernel driver soon and pick up this work.
1123 * 2. Threads are using the (e)poll interface, in which case
1124 * they may be blocked on the waitqueue without having been
1125 * added to waiting_threads. For this case, we just iterate
1126 * over all threads not handling transaction work, and
1127 * wake them all up. We wake all because we don't know whether
1128 * a thread that called into (e)poll is handling non-binder
1129 * work currently.
1130 */
1131 binder_wakeup_poll_threads_ilocked(proc, sync);
1132}
1133
5347bf52
MC
1134static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1135{
1136 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1137
1138 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1139}
1140
d30e6a87
MC
1141static bool is_rt_policy(int policy)
1142{
1143 return policy == SCHED_FIFO || policy == SCHED_RR;
1144}
1145
1146static bool is_fair_policy(int policy)
1147{
1148 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
1149}
1150
1151static bool binder_supported_policy(int policy)
1152{
1153 return is_fair_policy(policy) || is_rt_policy(policy);
1154}
1155
1156static int to_userspace_prio(int policy, int kernel_priority)
1157{
1158 if (is_fair_policy(policy))
1159 return PRIO_TO_NICE(kernel_priority);
1160 else
1161 return MAX_USER_RT_PRIO - 1 - kernel_priority;
1162}
1163
1164static int to_kernel_prio(int policy, int user_priority)
1165{
1166 if (is_fair_policy(policy))
1167 return NICE_TO_PRIO(user_priority);
1168 else
1169 return MAX_USER_RT_PRIO - 1 - user_priority;
1170}
1171
76b376ea
MC
1172static void binder_do_set_priority(struct task_struct *task,
1173 struct binder_priority desired,
1174 bool verify)
355b0502 1175{
d30e6a87
MC
1176 int priority; /* user-space prio value */
1177 bool has_cap_nice;
1178 unsigned int policy = desired.sched_policy;
10f62861 1179
d30e6a87 1180 if (task->policy == policy && task->normal_prio == desired.prio)
355b0502 1181 return;
d30e6a87
MC
1182
1183 has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
1184
1185 priority = to_userspace_prio(policy, desired.prio);
1186
76b376ea 1187 if (verify && is_rt_policy(policy) && !has_cap_nice) {
d30e6a87
MC
1188 long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
1189
1190 if (max_rtprio == 0) {
1191 policy = SCHED_NORMAL;
1192 priority = MIN_NICE;
1193 } else if (priority > max_rtprio) {
1194 priority = max_rtprio;
1195 }
355b0502 1196 }
d30e6a87 1197
76b376ea 1198 if (verify && is_fair_policy(policy) && !has_cap_nice) {
d30e6a87
MC
1199 long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
1200
1201 if (min_nice > MAX_NICE) {
1202 binder_user_error("%d RLIMIT_NICE not set\n",
1203 task->pid);
1204 return;
1205 } else if (priority < min_nice) {
1206 priority = min_nice;
1207 }
1208 }
1209
1210 if (policy != desired.sched_policy ||
1211 to_kernel_prio(policy, priority) != desired.prio)
1212 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1213 "%d: priority %d not allowed, using %d instead\n",
1214 task->pid, desired.prio,
1215 to_kernel_prio(policy, priority));
1216
e5e42eca
MC
1217 trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
1218 to_kernel_prio(policy, priority),
1219 desired.prio);
1220
d30e6a87
MC
1221 /* Set the actual priority */
1222 if (task->policy != policy || is_rt_policy(policy)) {
1223 struct sched_param params;
1224
1225 params.sched_priority = is_rt_policy(policy) ? priority : 0;
1226
1227 sched_setscheduler_nocheck(task,
1228 policy | SCHED_RESET_ON_FORK,
1229 &params);
1230 }
1231 if (is_fair_policy(policy))
1232 set_user_nice(task, priority);
355b0502
GKH
1233}
1234
76b376ea
MC
1235static void binder_set_priority(struct task_struct *task,
1236 struct binder_priority desired)
1237{
1238 binder_do_set_priority(task, desired, /* verify = */ true);
1239}
1240
1241static void binder_restore_priority(struct task_struct *task,
1242 struct binder_priority desired)
1243{
1244 binder_do_set_priority(task, desired, /* verify = */ false);
1245}
1246
7230f991
MC
1247static void binder_transaction_priority(struct task_struct *task,
1248 struct binder_transaction *t,
39140a0f
MC
1249 struct binder_priority node_prio,
1250 bool inherit_rt)
7230f991 1251{
35091a1d 1252 struct binder_priority desired_prio = t->priority;
7230f991
MC
1253
1254 if (t->set_priority_called)
1255 return;
1256
1257 t->set_priority_called = true;
1258 t->saved_priority.sched_policy = task->policy;
1259 t->saved_priority.prio = task->normal_prio;
1260
39140a0f
MC
1261 if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
1262 desired_prio.prio = NICE_TO_PRIO(0);
1263 desired_prio.sched_policy = SCHED_NORMAL;
39140a0f 1264 }
7230f991
MC
1265
1266 if (node_prio.prio < t->priority.prio ||
1267 (node_prio.prio == t->priority.prio &&
1268 node_prio.sched_policy == SCHED_FIFO)) {
1269 /*
1270 * In case the minimum priority on the node is
1271 * higher (lower value), use that priority. If
1272 * the priority is the same, but the node uses
1273 * SCHED_FIFO, prefer SCHED_FIFO, since it can
1274 * run unbounded, unlike SCHED_RR.
1275 */
1276 desired_prio = node_prio;
1277 }
1278
1279 binder_set_priority(task, desired_prio);
1280}
1281
46655970
TK
1282static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1283 binder_uintptr_t ptr)
355b0502
GKH
1284{
1285 struct rb_node *n = proc->nodes.rb_node;
1286 struct binder_node *node;
1287
7eeebce6 1288 assert_spin_locked(&proc->inner_lock);
46655970 1289
355b0502
GKH
1290 while (n) {
1291 node = rb_entry(n, struct binder_node, rb_node);
1292
1293 if (ptr < node->ptr)
1294 n = n->rb_left;
1295 else if (ptr > node->ptr)
1296 n = n->rb_right;
96dd75d9
TK
1297 else {
1298 /*
1299 * take an implicit weak reference
1300 * to ensure node stays alive until
1301 * call to binder_put_node()
1302 */
46655970 1303 binder_inc_node_tmpref_ilocked(node);
355b0502 1304 return node;
96dd75d9 1305 }
355b0502
GKH
1306 }
1307 return NULL;
1308}
1309
46655970
TK
1310static struct binder_node *binder_get_node(struct binder_proc *proc,
1311 binder_uintptr_t ptr)
1312{
1313 struct binder_node *node;
1314
1315 binder_inner_proc_lock(proc);
1316 node = binder_get_node_ilocked(proc, ptr);
1317 binder_inner_proc_unlock(proc);
1318 return node;
1319}
1320
1321static struct binder_node *binder_init_node_ilocked(
1322 struct binder_proc *proc,
1323 struct binder_node *new_node,
1324 struct flat_binder_object *fp)
355b0502
GKH
1325{
1326 struct rb_node **p = &proc->nodes.rb_node;
1327 struct rb_node *parent = NULL;
1328 struct binder_node *node;
14c312e9
TK
1329 binder_uintptr_t ptr = fp ? fp->binder : 0;
1330 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1331 __u32 flags = fp ? fp->flags : 0;
adb68543 1332 s8 priority;
355b0502 1333
7eeebce6
MC
1334 assert_spin_locked(&proc->inner_lock);
1335
355b0502 1336 while (*p) {
46655970 1337
355b0502
GKH
1338 parent = *p;
1339 node = rb_entry(parent, struct binder_node, rb_node);
1340
1341 if (ptr < node->ptr)
1342 p = &(*p)->rb_left;
1343 else if (ptr > node->ptr)
1344 p = &(*p)->rb_right;
46655970
TK
1345 else {
1346 /*
1347 * A matching node is already in
1348 * the rb tree. Abandon the init
1349 * and return it.
1350 */
1351 binder_inc_node_tmpref_ilocked(node);
1352 return node;
1353 }
355b0502 1354 }
46655970 1355 node = new_node;
355b0502 1356 binder_stats_created(BINDER_STAT_NODE);
96dd75d9 1357 node->tmp_refs++;
355b0502
GKH
1358 rb_link_node(&node->rb_node, parent, p);
1359 rb_insert_color(&node->rb_node, &proc->nodes);
be4dde1f 1360 node->debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1361 node->proc = proc;
1362 node->ptr = ptr;
1363 node->cookie = cookie;
1364 node->work.type = BINDER_WORK_NODE;
adb68543 1365 priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
79fd4ce9 1366 node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
adb68543
MC
1367 FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
1368 node->min_priority = to_kernel_prio(node->sched_policy, priority);
14c312e9 1369 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1cac41cb 1370 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
39140a0f 1371 node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
b0f59d6d 1372 spin_lock_init(&node->lock);
355b0502
GKH
1373 INIT_LIST_HEAD(&node->work.entry);
1374 INIT_LIST_HEAD(&node->async_todo);
1375 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
da49889d 1376 "%d:%d node %d u%016llx c%016llx created\n",
355b0502 1377 proc->pid, current->pid, node->debug_id,
da49889d 1378 (u64)node->ptr, (u64)node->cookie);
46655970
TK
1379
1380 return node;
1381}
1382
1383static struct binder_node *binder_new_node(struct binder_proc *proc,
1384 struct flat_binder_object *fp)
1385{
1386 struct binder_node *node;
1387 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1388
1389 if (!new_node)
1390 return NULL;
1391 binder_inner_proc_lock(proc);
1392 node = binder_init_node_ilocked(proc, new_node, fp);
1393 binder_inner_proc_unlock(proc);
1394 if (node != new_node)
1395 /*
1396 * The node was already added by another thread
1397 */
1398 kfree(new_node);
1399
355b0502
GKH
1400 return node;
1401}
1402
f73f378b 1403static void binder_free_node(struct binder_node *node)
355b0502 1404{
f73f378b
TK
1405 kfree(node);
1406 binder_stats_deleted(BINDER_STAT_NODE);
1407}
1408
14c312e9
TK
1409static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1410 int internal,
1411 struct list_head *target_list)
f73f378b 1412{
14c312e9
TK
1413 struct binder_proc *proc = node->proc;
1414
7eeebce6 1415 assert_spin_locked(&node->lock);
14c312e9 1416 if (proc)
7eeebce6 1417 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1418 if (strong) {
1419 if (internal) {
1420 if (target_list == NULL &&
1421 node->internal_strong_refs == 0 &&
803df563
MC
1422 !(node->proc &&
1423 node == node->proc->context->
1424 binder_context_mgr_node &&
1425 node->has_strong_ref)) {
56b468fc
AS
1426 pr_err("invalid inc strong node for %d\n",
1427 node->debug_id);
355b0502
GKH
1428 return -EINVAL;
1429 }
1430 node->internal_strong_refs++;
1431 } else
1432 node->local_strong_refs++;
1433 if (!node->has_strong_ref && target_list) {
57628830 1434 binder_dequeue_work_ilocked(&node->work);
95317055
MC
1435 /*
1436 * Note: this function is the only place where we queue
1437 * directly to a thread->todo without using the
1438 * corresponding binder_enqueue_thread_work() helper
1439 * functions; in this case it's ok to not set the
1440 * process_todo flag, since we know this node work will
1441 * always be followed by other work that starts queue
1442 * processing: in case of synchronous transactions, a
1443 * BR_REPLY or BR_ERROR; in case of oneway
1444 * transactions, a BR_TRANSACTION_COMPLETE.
1445 */
57628830 1446 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1447 }
1448 } else {
1449 if (!internal)
1450 node->local_weak_refs++;
1451 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1452 if (target_list == NULL) {
56b468fc
AS
1453 pr_err("invalid inc weak node for %d\n",
1454 node->debug_id);
355b0502
GKH
1455 return -EINVAL;
1456 }
95317055
MC
1457 /*
1458 * See comment above
1459 */
57628830 1460 binder_enqueue_work_ilocked(&node->work, target_list);
355b0502
GKH
1461 }
1462 }
1463 return 0;
1464}
1465
f73f378b
TK
1466static int binder_inc_node(struct binder_node *node, int strong, int internal,
1467 struct list_head *target_list)
1468{
1469 int ret;
1470
14c312e9
TK
1471 binder_node_inner_lock(node);
1472 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1473 binder_node_inner_unlock(node);
f73f378b
TK
1474
1475 return ret;
1476}
1477
14c312e9
TK
1478static bool binder_dec_node_nilocked(struct binder_node *node,
1479 int strong, int internal)
355b0502 1480{
f73f378b
TK
1481 struct binder_proc *proc = node->proc;
1482
7eeebce6 1483 assert_spin_locked(&node->lock);
f73f378b 1484 if (proc)
7eeebce6 1485 assert_spin_locked(&proc->inner_lock);
355b0502
GKH
1486 if (strong) {
1487 if (internal)
1488 node->internal_strong_refs--;
1489 else
1490 node->local_strong_refs--;
1491 if (node->local_strong_refs || node->internal_strong_refs)
f73f378b 1492 return false;
355b0502
GKH
1493 } else {
1494 if (!internal)
1495 node->local_weak_refs--;
96dd75d9
TK
1496 if (node->local_weak_refs || node->tmp_refs ||
1497 !hlist_empty(&node->refs))
f73f378b 1498 return false;
355b0502 1499 }
f73f378b
TK
1500
1501 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
355b0502 1502 if (list_empty(&node->work.entry)) {
57628830 1503 binder_enqueue_work_ilocked(&node->work, &proc->todo);
5347bf52 1504 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
1505 }
1506 } else {
1507 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
96dd75d9 1508 !node->local_weak_refs && !node->tmp_refs) {
f73f378b 1509 if (proc) {
57628830
TK
1510 binder_dequeue_work_ilocked(&node->work);
1511 rb_erase(&node->rb_node, &proc->nodes);
355b0502 1512 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1513 "refless node %d deleted\n",
355b0502
GKH
1514 node->debug_id);
1515 } else {
57628830 1516 BUG_ON(!list_empty(&node->work.entry));
3490fdcb 1517 spin_lock(&binder_dead_nodes_lock);
f73f378b
TK
1518 /*
1519 * tmp_refs could have changed so
1520 * check it again
1521 */
1522 if (node->tmp_refs) {
1523 spin_unlock(&binder_dead_nodes_lock);
1524 return false;
1525 }
355b0502 1526 hlist_del(&node->dead_node);
3490fdcb 1527 spin_unlock(&binder_dead_nodes_lock);
355b0502 1528 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1529 "dead node %d deleted\n",
355b0502
GKH
1530 node->debug_id);
1531 }
f73f378b 1532 return true;
355b0502
GKH
1533 }
1534 }
f73f378b
TK
1535 return false;
1536}
355b0502 1537
f73f378b
TK
1538static void binder_dec_node(struct binder_node *node, int strong, int internal)
1539{
1540 bool free_node;
1541
14c312e9
TK
1542 binder_node_inner_lock(node);
1543 free_node = binder_dec_node_nilocked(node, strong, internal);
1544 binder_node_inner_unlock(node);
f73f378b
TK
1545 if (free_node)
1546 binder_free_node(node);
1547}
1548
1549static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1550{
1551 /*
1552 * No call to binder_inc_node() is needed since we
1553 * don't need to inform userspace of any changes to
1554 * tmp_refs
1555 */
1556 node->tmp_refs++;
355b0502
GKH
1557}
1558
96dd75d9
TK
1559/**
1560 * binder_inc_node_tmpref() - take a temporary reference on node
1561 * @node: node to reference
1562 *
1563 * Take reference on node to prevent the node from being freed
f73f378b
TK
1564 * while referenced only by a local variable. The inner lock is
1565 * needed to serialize with the node work on the queue (which
1566 * isn't needed after the node is dead). If the node is dead
1567 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1568 * node->tmp_refs against dead-node-only cases where the node
1569 * lock cannot be acquired (eg traversing the dead node list to
1570 * print nodes)
96dd75d9
TK
1571 */
1572static void binder_inc_node_tmpref(struct binder_node *node)
1573{
14c312e9 1574 binder_node_lock(node);
f73f378b
TK
1575 if (node->proc)
1576 binder_inner_proc_lock(node->proc);
1577 else
1578 spin_lock(&binder_dead_nodes_lock);
1579 binder_inc_node_tmpref_ilocked(node);
1580 if (node->proc)
1581 binder_inner_proc_unlock(node->proc);
1582 else
1583 spin_unlock(&binder_dead_nodes_lock);
14c312e9 1584 binder_node_unlock(node);
96dd75d9
TK
1585}
1586
1587/**
1588 * binder_dec_node_tmpref() - remove a temporary reference on node
1589 * @node: node to reference
1590 *
1591 * Release temporary reference on node taken via binder_inc_node_tmpref()
1592 */
1593static void binder_dec_node_tmpref(struct binder_node *node)
1594{
f73f378b
TK
1595 bool free_node;
1596
14c312e9
TK
1597 binder_node_inner_lock(node);
1598 if (!node->proc)
f73f378b 1599 spin_lock(&binder_dead_nodes_lock);
96dd75d9
TK
1600 node->tmp_refs--;
1601 BUG_ON(node->tmp_refs < 0);
f73f378b
TK
1602 if (!node->proc)
1603 spin_unlock(&binder_dead_nodes_lock);
96dd75d9
TK
1604 /*
1605 * Call binder_dec_node() to check if all refcounts are 0
1606 * and cleanup is needed. Calling with strong=0 and internal=1
1607 * causes no actual reference to be released in binder_dec_node().
1608 * If that changes, a change is needed here too.
1609 */
14c312e9
TK
1610 free_node = binder_dec_node_nilocked(node, 0, 1);
1611 binder_node_inner_unlock(node);
f73f378b
TK
1612 if (free_node)
1613 binder_free_node(node);
96dd75d9
TK
1614}
1615
1616static void binder_put_node(struct binder_node *node)
1617{
1618 binder_dec_node_tmpref(node);
1619}
355b0502 1620
6fcb2b9a
TK
1621static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1622 u32 desc, bool need_strong_ref)
355b0502
GKH
1623{
1624 struct rb_node *n = proc->refs_by_desc.rb_node;
1625 struct binder_ref *ref;
1626
1627 while (n) {
1628 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1629
f7d87412 1630 if (desc < ref->data.desc) {
355b0502 1631 n = n->rb_left;
f7d87412 1632 } else if (desc > ref->data.desc) {
355b0502 1633 n = n->rb_right;
f7d87412 1634 } else if (need_strong_ref && !ref->data.strong) {
b81f4c5f
AH
1635 binder_user_error("tried to use weak ref as strong ref\n");
1636 return NULL;
1637 } else {
355b0502 1638 return ref;
b81f4c5f 1639 }
355b0502
GKH
1640 }
1641 return NULL;
1642}
1643
f7d87412 1644/**
6fcb2b9a 1645 * binder_get_ref_for_node_olocked() - get the ref associated with given node
f7d87412
TK
1646 * @proc: binder_proc that owns the ref
1647 * @node: binder_node of target
1648 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1649 *
1650 * Look up the ref for the given node and return it if it exists
1651 *
1652 * If it doesn't exist and the caller provides a newly allocated
1653 * ref, initialize the fields of the newly allocated ref and insert
1654 * into the given proc rb_trees and node refs list.
1655 *
1656 * Return: the ref for node. It is possible that another thread
1657 * allocated/initialized the ref first in which case the
1658 * returned ref would be different than the passed-in
1659 * new_ref. new_ref must be kfree'd by the caller in
1660 * this case.
1661 */
6fcb2b9a
TK
1662static struct binder_ref *binder_get_ref_for_node_olocked(
1663 struct binder_proc *proc,
1664 struct binder_node *node,
1665 struct binder_ref *new_ref)
355b0502 1666{
f7d87412 1667 struct binder_context *context = proc->context;
355b0502
GKH
1668 struct rb_node **p = &proc->refs_by_node.rb_node;
1669 struct rb_node *parent = NULL;
f7d87412
TK
1670 struct binder_ref *ref;
1671 struct rb_node *n;
355b0502
GKH
1672
1673 while (*p) {
1674 parent = *p;
1675 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1676
1677 if (node < ref->node)
1678 p = &(*p)->rb_left;
1679 else if (node > ref->node)
1680 p = &(*p)->rb_right;
1681 else
1682 return ref;
1683 }
f7d87412 1684 if (!new_ref)
355b0502 1685 return NULL;
f7d87412 1686
355b0502 1687 binder_stats_created(BINDER_STAT_REF);
f7d87412 1688 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
355b0502
GKH
1689 new_ref->proc = proc;
1690 new_ref->node = node;
1691 rb_link_node(&new_ref->rb_node_node, parent, p);
1692 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1693
f7d87412 1694 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
355b0502
GKH
1695 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1696 ref = rb_entry(n, struct binder_ref, rb_node_desc);
f7d87412 1697 if (ref->data.desc > new_ref->data.desc)
355b0502 1698 break;
f7d87412 1699 new_ref->data.desc = ref->data.desc + 1;
355b0502
GKH
1700 }
1701
1702 p = &proc->refs_by_desc.rb_node;
1703 while (*p) {
1704 parent = *p;
1705 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1706
f7d87412 1707 if (new_ref->data.desc < ref->data.desc)
355b0502 1708 p = &(*p)->rb_left;
f7d87412 1709 else if (new_ref->data.desc > ref->data.desc)
355b0502
GKH
1710 p = &(*p)->rb_right;
1711 else
1712 BUG();
1713 }
1714 rb_link_node(&new_ref->rb_node_desc, parent, p);
1715 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
14c312e9
TK
1716
1717 binder_node_lock(node);
ce9b7747 1718 hlist_add_head(&new_ref->node_entry, &node->refs);
355b0502 1719
ce9b7747
TK
1720 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1721 "%d new ref %d desc %d for node %d\n",
f7d87412 1722 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
ce9b7747 1723 node->debug_id);
14c312e9 1724 binder_node_unlock(node);
355b0502
GKH
1725 return new_ref;
1726}
1727
6fcb2b9a 1728static void binder_cleanup_ref_olocked(struct binder_ref *ref)
355b0502 1729{
f73f378b 1730 bool delete_node = false;
f73f378b 1731
355b0502 1732 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
56b468fc 1733 "%d delete ref %d desc %d for node %d\n",
f7d87412 1734 ref->proc->pid, ref->data.debug_id, ref->data.desc,
56b468fc 1735 ref->node->debug_id);
355b0502
GKH
1736
1737 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1738 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
f7d87412 1739
14c312e9 1740 binder_node_inner_lock(ref->node);
f7d87412 1741 if (ref->data.strong)
14c312e9 1742 binder_dec_node_nilocked(ref->node, 1, 1);
f7d87412 1743
355b0502 1744 hlist_del(&ref->node_entry);
14c312e9
TK
1745 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1746 binder_node_inner_unlock(ref->node);
f73f378b
TK
1747 /*
1748 * Clear ref->node unless we want the caller to free the node
1749 */
1750 if (!delete_node) {
1751 /*
1752 * The caller uses ref->node to determine
1753 * whether the node needs to be freed. Clear
1754 * it since the node is still alive.
1755 */
1756 ref->node = NULL;
1757 }
f7d87412 1758
355b0502
GKH
1759 if (ref->death) {
1760 binder_debug(BINDER_DEBUG_DEAD_BINDER,
56b468fc 1761 "%d delete ref %d desc %d has death notification\n",
f7d87412
TK
1762 ref->proc->pid, ref->data.debug_id,
1763 ref->data.desc);
57628830 1764 binder_dequeue_work(ref->proc, &ref->death->work);
355b0502
GKH
1765 binder_stats_deleted(BINDER_STAT_DEATH);
1766 }
355b0502
GKH
1767 binder_stats_deleted(BINDER_STAT_REF);
1768}
1769
f7d87412 1770/**
6fcb2b9a 1771 * binder_inc_ref_olocked() - increment the ref for given handle
f7d87412
TK
1772 * @ref: ref to be incremented
1773 * @strong: if true, strong increment, else weak
1774 * @target_list: list to queue node work on
1775 *
6fcb2b9a 1776 * Increment the ref. @ref->proc->outer_lock must be held on entry
f7d87412
TK
1777 *
1778 * Return: 0, if successful, else errno
1779 */
6fcb2b9a
TK
1780static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1781 struct list_head *target_list)
355b0502
GKH
1782{
1783 int ret;
10f62861 1784
355b0502 1785 if (strong) {
f7d87412 1786 if (ref->data.strong == 0) {
355b0502
GKH
1787 ret = binder_inc_node(ref->node, 1, 1, target_list);
1788 if (ret)
1789 return ret;
1790 }
f7d87412 1791 ref->data.strong++;
355b0502 1792 } else {
f7d87412 1793 if (ref->data.weak == 0) {
355b0502
GKH
1794 ret = binder_inc_node(ref->node, 0, 1, target_list);
1795 if (ret)
1796 return ret;
1797 }
f7d87412 1798 ref->data.weak++;
355b0502
GKH
1799 }
1800 return 0;
1801}
1802
f7d87412
TK
1803/**
1804 * binder_dec_ref() - dec the ref for given handle
1805 * @ref: ref to be decremented
1806 * @strong: if true, strong decrement, else weak
1807 *
1808 * Decrement the ref.
1809 *
f7d87412
TK
1810 * Return: true if ref is cleaned up and ready to be freed
1811 */
6fcb2b9a 1812static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
355b0502
GKH
1813{
1814 if (strong) {
f7d87412 1815 if (ref->data.strong == 0) {
56b468fc 1816 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
f7d87412
TK
1817 ref->proc->pid, ref->data.debug_id,
1818 ref->data.desc, ref->data.strong,
1819 ref->data.weak);
1820 return false;
355b0502 1821 }
f7d87412 1822 ref->data.strong--;
f73f378b
TK
1823 if (ref->data.strong == 0)
1824 binder_dec_node(ref->node, strong, 1);
355b0502 1825 } else {
f7d87412 1826 if (ref->data.weak == 0) {
56b468fc 1827 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
f7d87412
TK
1828 ref->proc->pid, ref->data.debug_id,
1829 ref->data.desc, ref->data.strong,
1830 ref->data.weak);
1831 return false;
355b0502 1832 }
f7d87412 1833 ref->data.weak--;
355b0502 1834 }
f7d87412 1835 if (ref->data.strong == 0 && ref->data.weak == 0) {
6fcb2b9a 1836 binder_cleanup_ref_olocked(ref);
f7d87412
TK
1837 return true;
1838 }
1839 return false;
1840}
1841
1842/**
1843 * binder_get_node_from_ref() - get the node from the given proc/desc
1844 * @proc: proc containing the ref
1845 * @desc: the handle associated with the ref
1846 * @need_strong_ref: if true, only return node if ref is strong
1847 * @rdata: the id/refcount data for the ref
1848 *
1849 * Given a proc and ref handle, return the associated binder_node
1850 *
1851 * Return: a binder_node or NULL if not found or not strong when strong required
1852 */
1853static struct binder_node *binder_get_node_from_ref(
1854 struct binder_proc *proc,
1855 u32 desc, bool need_strong_ref,
1856 struct binder_ref_data *rdata)
1857{
1858 struct binder_node *node;
1859 struct binder_ref *ref;
1860
6fcb2b9a
TK
1861 binder_proc_lock(proc);
1862 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
f7d87412
TK
1863 if (!ref)
1864 goto err_no_ref;
1865 node = ref->node;
96dd75d9
TK
1866 /*
1867 * Take an implicit reference on the node to ensure
1868 * it stays alive until the call to binder_put_node()
1869 */
1870 binder_inc_node_tmpref(node);
f7d87412
TK
1871 if (rdata)
1872 *rdata = ref->data;
6fcb2b9a 1873 binder_proc_unlock(proc);
f7d87412
TK
1874
1875 return node;
1876
1877err_no_ref:
6fcb2b9a 1878 binder_proc_unlock(proc);
f7d87412
TK
1879 return NULL;
1880}
1881
1882/**
1883 * binder_free_ref() - free the binder_ref
1884 * @ref: ref to free
1885 *
f73f378b
TK
1886 * Free the binder_ref. Free the binder_node indicated by ref->node
1887 * (if non-NULL) and the binder_ref_death indicated by ref->death.
f7d87412
TK
1888 */
1889static void binder_free_ref(struct binder_ref *ref)
1890{
f73f378b
TK
1891 if (ref->node)
1892 binder_free_node(ref->node);
f7d87412
TK
1893 kfree(ref->death);
1894 kfree(ref);
1895}
1896
1897/**
1898 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1899 * @proc: proc containing the ref
1900 * @desc: the handle associated with the ref
1901 * @increment: true=inc reference, false=dec reference
1902 * @strong: true=strong reference, false=weak reference
1903 * @rdata: the id/refcount data for the ref
1904 *
1905 * Given a proc and ref handle, increment or decrement the ref
1906 * according to "increment" arg.
1907 *
1908 * Return: 0 if successful, else errno
1909 */
1910static int binder_update_ref_for_handle(struct binder_proc *proc,
1911 uint32_t desc, bool increment, bool strong,
1912 struct binder_ref_data *rdata)
1913{
1914 int ret = 0;
1915 struct binder_ref *ref;
1916 bool delete_ref = false;
1917
6fcb2b9a
TK
1918 binder_proc_lock(proc);
1919 ref = binder_get_ref_olocked(proc, desc, strong);
f7d87412
TK
1920 if (!ref) {
1921 ret = -EINVAL;
1922 goto err_no_ref;
1923 }
1924 if (increment)
6fcb2b9a 1925 ret = binder_inc_ref_olocked(ref, strong, NULL);
f7d87412 1926 else
6fcb2b9a 1927 delete_ref = binder_dec_ref_olocked(ref, strong);
f7d87412
TK
1928
1929 if (rdata)
1930 *rdata = ref->data;
6fcb2b9a 1931 binder_proc_unlock(proc);
f7d87412
TK
1932
1933 if (delete_ref)
1934 binder_free_ref(ref);
1935 return ret;
1936
1937err_no_ref:
6fcb2b9a 1938 binder_proc_unlock(proc);
f7d87412
TK
1939 return ret;
1940}
1941
1942/**
1943 * binder_dec_ref_for_handle() - dec the ref for given handle
1944 * @proc: proc containing the ref
1945 * @desc: the handle associated with the ref
1946 * @strong: true=strong reference, false=weak reference
1947 * @rdata: the id/refcount data for the ref
1948 *
1949 * Just calls binder_update_ref_for_handle() to decrement the ref.
1950 *
1951 * Return: 0 if successful, else errno
1952 */
1953static int binder_dec_ref_for_handle(struct binder_proc *proc,
1954 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1955{
1956 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1957}
1958
1959
1960/**
1961 * binder_inc_ref_for_node() - increment the ref for given proc/node
1962 * @proc: proc containing the ref
1963 * @node: target node
1964 * @strong: true=strong reference, false=weak reference
1965 * @target_list: worklist to use if node is incremented
1966 * @rdata: the id/refcount data for the ref
1967 *
1968 * Given a proc and node, increment the ref. Create the ref if it
1969 * doesn't already exist
1970 *
1971 * Return: 0 if successful, else errno
1972 */
1973static int binder_inc_ref_for_node(struct binder_proc *proc,
1974 struct binder_node *node,
1975 bool strong,
1976 struct list_head *target_list,
1977 struct binder_ref_data *rdata)
1978{
1979 struct binder_ref *ref;
1980 struct binder_ref *new_ref = NULL;
1981 int ret = 0;
1982
6fcb2b9a
TK
1983 binder_proc_lock(proc);
1984 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
f7d87412 1985 if (!ref) {
6fcb2b9a 1986 binder_proc_unlock(proc);
f7d87412
TK
1987 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1988 if (!new_ref)
1989 return -ENOMEM;
6fcb2b9a
TK
1990 binder_proc_lock(proc);
1991 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
f7d87412 1992 }
6fcb2b9a 1993 ret = binder_inc_ref_olocked(ref, strong, target_list);
f7d87412 1994 *rdata = ref->data;
6fcb2b9a 1995 binder_proc_unlock(proc);
f7d87412
TK
1996 if (new_ref && ref != new_ref)
1997 /*
1998 * Another thread created the ref first so
1999 * free the one we allocated
2000 */
2001 kfree(new_ref);
2002 return ret;
355b0502
GKH
2003}
2004
89b657e0
MC
2005static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
2006 struct binder_transaction *t)
355b0502 2007{
16273538 2008 BUG_ON(!target_thread);
7eeebce6 2009 assert_spin_locked(&target_thread->proc->inner_lock);
16273538
TK
2010 BUG_ON(target_thread->transaction_stack != t);
2011 BUG_ON(target_thread->transaction_stack->from != target_thread);
2012 target_thread->transaction_stack =
2013 target_thread->transaction_stack->from_parent;
2014 t->from = NULL;
2015}
2016
e482ec39
TK
2017/**
2018 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
2019 * @thread: thread to decrement
2020 *
2021 * A thread needs to be kept alive while being used to create or
2022 * handle a transaction. binder_get_txn_from() is used to safely
2023 * extract t->from from a binder_transaction and keep the thread
2024 * indicated by t->from from being freed. When done with that
2025 * binder_thread, this function is called to decrement the
2026 * tmp_ref and free if appropriate (thread has been released
2027 * and no transaction being processed by the driver)
2028 */
2029static void binder_thread_dec_tmpref(struct binder_thread *thread)
2030{
2031 /*
2032 * atomic is used to protect the counter value while
2033 * it cannot reach zero or thread->is_dead is false
e482ec39 2034 */
e4951233 2035 binder_inner_proc_lock(thread->proc);
e482ec39
TK
2036 atomic_dec(&thread->tmp_ref);
2037 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
e4951233 2038 binder_inner_proc_unlock(thread->proc);
e482ec39
TK
2039 binder_free_thread(thread);
2040 return;
2041 }
e4951233 2042 binder_inner_proc_unlock(thread->proc);
e482ec39
TK
2043}
2044
2045/**
2046 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
2047 * @proc: proc to decrement
2048 *
2049 * A binder_proc needs to be kept alive while being used to create or
2050 * handle a transaction. proc->tmp_ref is incremented when
2051 * creating a new transaction or the binder_proc is currently in-use
2052 * by threads that are being released. When done with the binder_proc,
2053 * this function is called to decrement the counter and free the
2054 * proc if appropriate (proc has been released, all threads have
2055 * been released and not currenly in-use to process a transaction).
2056 */
2057static void binder_proc_dec_tmpref(struct binder_proc *proc)
2058{
e4951233 2059 binder_inner_proc_lock(proc);
e482ec39
TK
2060 proc->tmp_ref--;
2061 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
2062 !proc->tmp_ref) {
e4951233 2063 binder_inner_proc_unlock(proc);
e482ec39
TK
2064 binder_free_proc(proc);
2065 return;
2066 }
e4951233 2067 binder_inner_proc_unlock(proc);
e482ec39
TK
2068}
2069
2070/**
2071 * binder_get_txn_from() - safely extract the "from" thread in transaction
2072 * @t: binder transaction for t->from
2073 *
2074 * Atomically return the "from" thread and increment the tmp_ref
2075 * count for the thread to ensure it stays alive until
2076 * binder_thread_dec_tmpref() is called.
2077 *
2078 * Return: the value of t->from
2079 */
2080static struct binder_thread *binder_get_txn_from(
2081 struct binder_transaction *t)
2082{
2083 struct binder_thread *from;
2084
2085 spin_lock(&t->lock);
2086 from = t->from;
2087 if (from)
2088 atomic_inc(&from->tmp_ref);
2089 spin_unlock(&t->lock);
2090 return from;
2091}
2092
89b657e0
MC
2093/**
2094 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
2095 * @t: binder transaction for t->from
2096 *
2097 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
2098 * to guarantee that the thread cannot be released while operating on it.
2099 * The caller must call binder_inner_proc_unlock() to release the inner lock
2100 * as well as call binder_dec_thread_txn() to release the reference.
2101 *
2102 * Return: the value of t->from
2103 */
2104static struct binder_thread *binder_get_txn_from_and_acq_inner(
2105 struct binder_transaction *t)
2106{
2107 struct binder_thread *from;
2108
2109 from = binder_get_txn_from(t);
2110 if (!from)
2111 return NULL;
2112 binder_inner_proc_lock(from->proc);
2113 if (t->from) {
2114 BUG_ON(from != t->from);
2115 return from;
2116 }
2117 binder_inner_proc_unlock(from->proc);
2118 binder_thread_dec_tmpref(from);
2119 return NULL;
2120}
2121
16273538
TK
2122static void binder_free_transaction(struct binder_transaction *t)
2123{
355b0502
GKH
2124 if (t->buffer)
2125 t->buffer->transaction = NULL;
2126 kfree(t);
2127 binder_stats_deleted(BINDER_STAT_TRANSACTION);
2128}
2129
2130static void binder_send_failed_reply(struct binder_transaction *t,
2131 uint32_t error_code)
2132{
2133 struct binder_thread *target_thread;
d4ec15e1 2134 struct binder_transaction *next;
10f62861 2135
355b0502
GKH
2136 BUG_ON(t->flags & TF_ONE_WAY);
2137 while (1) {
89b657e0 2138 target_thread = binder_get_txn_from_and_acq_inner(t);
355b0502 2139 if (target_thread) {
3a822b33
TK
2140 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2141 "send failed reply for transaction %d to %d:%d\n",
2142 t->debug_id,
2143 target_thread->proc->pid,
2144 target_thread->pid);
2145
89b657e0 2146 binder_pop_transaction_ilocked(target_thread, t);
3a822b33
TK
2147 if (target_thread->reply_error.cmd == BR_OK) {
2148 target_thread->reply_error.cmd = error_code;
95317055
MC
2149 binder_enqueue_thread_work_ilocked(
2150 target_thread,
2151 &target_thread->reply_error.work);
355b0502
GKH
2152 wake_up_interruptible(&target_thread->wait);
2153 } else {
5a068558
MB
2154 /*
2155 * Cannot get here for normal operation, but
2156 * we can if multiple synchronous transactions
2157 * are sent without blocking for responses.
2158 * Just ignore the 2nd error in this case.
2159 */
2160 pr_warn("Unexpected reply error: %u\n",
2161 target_thread->reply_error.cmd);
355b0502 2162 }
89b657e0 2163 binder_inner_proc_unlock(target_thread->proc);
e482ec39 2164 binder_thread_dec_tmpref(target_thread);
3a822b33 2165 binder_free_transaction(t);
355b0502 2166 return;
d4ec15e1
LT
2167 }
2168 next = t->from_parent;
2169
2170 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
2171 "send failed reply for transaction %d, target dead\n",
2172 t->debug_id);
2173
16273538 2174 binder_free_transaction(t);
d4ec15e1 2175 if (next == NULL) {
355b0502 2176 binder_debug(BINDER_DEBUG_DEAD_BINDER,
d4ec15e1
LT
2177 "reply failed, no target thread at root\n");
2178 return;
355b0502 2179 }
d4ec15e1
LT
2180 t = next;
2181 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2182 "reply failed, no target thread -- retry %d\n",
2183 t->debug_id);
355b0502
GKH
2184 }
2185}
2186
3cc62103
MC
2187/**
2188 * binder_cleanup_transaction() - cleans up undelivered transaction
2189 * @t: transaction that needs to be cleaned up
2190 * @reason: reason the transaction wasn't delivered
2191 * @error_code: error to return to caller (if synchronous call)
2192 */
2193static void binder_cleanup_transaction(struct binder_transaction *t,
2194 const char *reason,
2195 uint32_t error_code)
2196{
2197 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
2198 binder_send_failed_reply(t, error_code);
2199 } else {
2200 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
2201 "undelivered transaction %d, %s\n",
2202 t->debug_id, reason);
2203 binder_free_transaction(t);
2204 }
2205}
2206
ce0c6598
MC
2207/**
2208 * binder_validate_object() - checks for a valid metadata object in a buffer.
2209 * @buffer: binder_buffer that we're parsing.
2210 * @offset: offset in the buffer at which to validate an object.
2211 *
2212 * Return: If there's a valid metadata object at @offset in @buffer, the
2213 * size of that object. Otherwise, it returns zero.
2214 */
2215static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
2216{
2217 /* Check if we can read a header first */
2218 struct binder_object_header *hdr;
2219 size_t object_size = 0;
2220
5a068558
MB
2221 if (buffer->data_size < sizeof(*hdr) ||
2222 offset > buffer->data_size - sizeof(*hdr) ||
ce0c6598
MC
2223 !IS_ALIGNED(offset, sizeof(u32)))
2224 return 0;
2225
2226 /* Ok, now see if we can read a complete object. */
2227 hdr = (struct binder_object_header *)(buffer->data + offset);
2228 switch (hdr->type) {
2229 case BINDER_TYPE_BINDER:
2230 case BINDER_TYPE_WEAK_BINDER:
2231 case BINDER_TYPE_HANDLE:
2232 case BINDER_TYPE_WEAK_HANDLE:
2233 object_size = sizeof(struct flat_binder_object);
2234 break;
2235 case BINDER_TYPE_FD:
2236 object_size = sizeof(struct binder_fd_object);
2237 break;
dd9bc4f9
MC
2238 case BINDER_TYPE_PTR:
2239 object_size = sizeof(struct binder_buffer_object);
2240 break;
e124de38
MC
2241 case BINDER_TYPE_FDA:
2242 object_size = sizeof(struct binder_fd_array_object);
2243 break;
ce0c6598
MC
2244 default:
2245 return 0;
2246 }
2247 if (offset <= buffer->data_size - object_size &&
2248 buffer->data_size >= object_size)
2249 return object_size;
2250 else
2251 return 0;
2252}
2253
dd9bc4f9
MC
2254/**
2255 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2256 * @b: binder_buffer containing the object
2257 * @index: index in offset array at which the binder_buffer_object is
2258 * located
2259 * @start: points to the start of the offset array
2260 * @num_valid: the number of valid offsets in the offset array
2261 *
2262 * Return: If @index is within the valid range of the offset array
2263 * described by @start and @num_valid, and if there's a valid
2264 * binder_buffer_object at the offset found in index @index
2265 * of the offset array, that object is returned. Otherwise,
2266 * %NULL is returned.
2267 * Note that the offset found in index @index itself is not
2268 * verified; this function assumes that @num_valid elements
2269 * from @start were previously verified to have valid offsets.
2270 */
2271static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2272 binder_size_t index,
2273 binder_size_t *start,
2274 binder_size_t num_valid)
2275{
2276 struct binder_buffer_object *buffer_obj;
2277 binder_size_t *offp;
2278
2279 if (index >= num_valid)
2280 return NULL;
2281
2282 offp = start + index;
2283 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2284 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2285 return NULL;
2286
2287 return buffer_obj;
2288}
2289
2290/**
2291 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2292 * @b: transaction buffer
2293 * @objects_start start of objects buffer
2294 * @buffer: binder_buffer_object in which to fix up
2295 * @offset: start offset in @buffer to fix up
2296 * @last_obj: last binder_buffer_object that we fixed up in
2297 * @last_min_offset: minimum fixup offset in @last_obj
2298 *
2299 * Return: %true if a fixup in buffer @buffer at offset @offset is
2300 * allowed.
2301 *
2302 * For safety reasons, we only allow fixups inside a buffer to happen
2303 * at increasing offsets; additionally, we only allow fixup on the last
2304 * buffer object that was verified, or one of its parents.
2305 *
2306 * Example of what is allowed:
2307 *
2308 * A
2309 * B (parent = A, offset = 0)
2310 * C (parent = A, offset = 16)
2311 * D (parent = C, offset = 0)
2312 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2313 *
2314 * Examples of what is not allowed:
2315 *
2316 * Decreasing offsets within the same parent:
2317 * A
2318 * C (parent = A, offset = 16)
2319 * B (parent = A, offset = 0) // decreasing offset within A
2320 *
2321 * Referring to a parent that wasn't the last object or any of its parents:
2322 * A
2323 * B (parent = A, offset = 0)
2324 * C (parent = A, offset = 0)
2325 * C (parent = A, offset = 16)
2326 * D (parent = B, offset = 0) // B is not A or any of A's parents
2327 */
2328static bool binder_validate_fixup(struct binder_buffer *b,
2329 binder_size_t *objects_start,
2330 struct binder_buffer_object *buffer,
2331 binder_size_t fixup_offset,
2332 struct binder_buffer_object *last_obj,
2333 binder_size_t last_min_offset)
2334{
2335 if (!last_obj) {
2336 /* Nothing to fix up in */
2337 return false;
2338 }
2339
2340 while (last_obj != buffer) {
2341 /*
2342 * Safe to retrieve the parent of last_obj, since it
2343 * was already previously verified by the driver.
2344 */
2345 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2346 return false;
2347 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2348 last_obj = (struct binder_buffer_object *)
2349 (b->data + *(objects_start + last_obj->parent));
2350 }
2351 return (fixup_offset >= last_min_offset);
2352}
2353
355b0502
GKH
2354static void binder_transaction_buffer_release(struct binder_proc *proc,
2355 struct binder_buffer *buffer,
da49889d 2356 binder_size_t *failed_at)
355b0502 2357{
dd9bc4f9 2358 binder_size_t *offp, *off_start, *off_end;
355b0502
GKH
2359 int debug_id = buffer->debug_id;
2360
2361 binder_debug(BINDER_DEBUG_TRANSACTION,
56b468fc 2362 "%d buffer release %d, size %zd-%zd, failed at %p\n",
355b0502
GKH
2363 proc->pid, buffer->debug_id,
2364 buffer->data_size, buffer->offsets_size, failed_at);
2365
2366 if (buffer->target_node)
2367 binder_dec_node(buffer->target_node, 1, 0);
2368
dd9bc4f9
MC
2369 off_start = (binder_size_t *)(buffer->data +
2370 ALIGN(buffer->data_size, sizeof(void *)));
355b0502
GKH
2371 if (failed_at)
2372 off_end = failed_at;
2373 else
dd9bc4f9
MC
2374 off_end = (void *)off_start + buffer->offsets_size;
2375 for (offp = off_start; offp < off_end; offp++) {
ce0c6598
MC
2376 struct binder_object_header *hdr;
2377 size_t object_size = binder_validate_object(buffer, *offp);
10f62861 2378
ce0c6598
MC
2379 if (object_size == 0) {
2380 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
da49889d 2381 debug_id, (u64)*offp, buffer->data_size);
355b0502
GKH
2382 continue;
2383 }
ce0c6598
MC
2384 hdr = (struct binder_object_header *)(buffer->data + *offp);
2385 switch (hdr->type) {
355b0502
GKH
2386 case BINDER_TYPE_BINDER:
2387 case BINDER_TYPE_WEAK_BINDER: {
ce0c6598
MC
2388 struct flat_binder_object *fp;
2389 struct binder_node *node;
10f62861 2390
ce0c6598
MC
2391 fp = to_flat_binder_object(hdr);
2392 node = binder_get_node(proc, fp->binder);
355b0502 2393 if (node == NULL) {
da49889d
AH
2394 pr_err("transaction release %d bad node %016llx\n",
2395 debug_id, (u64)fp->binder);
355b0502
GKH
2396 break;
2397 }
2398 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d
AH
2399 " node %d u%016llx\n",
2400 node->debug_id, (u64)node->ptr);
ce0c6598
MC
2401 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2402 0);
96dd75d9 2403 binder_put_node(node);
355b0502
GKH
2404 } break;
2405 case BINDER_TYPE_HANDLE:
2406 case BINDER_TYPE_WEAK_HANDLE: {
ce0c6598 2407 struct flat_binder_object *fp;
f7d87412
TK
2408 struct binder_ref_data rdata;
2409 int ret;
10f62861 2410
ce0c6598 2411 fp = to_flat_binder_object(hdr);
f7d87412
TK
2412 ret = binder_dec_ref_for_handle(proc, fp->handle,
2413 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2414
2415 if (ret) {
2416 pr_err("transaction release %d bad handle %d, ret = %d\n",
2417 debug_id, fp->handle, ret);
355b0502
GKH
2418 break;
2419 }
2420 binder_debug(BINDER_DEBUG_TRANSACTION,
f7d87412
TK
2421 " ref %d desc %d\n",
2422 rdata.debug_id, rdata.desc);
355b0502
GKH
2423 } break;
2424
ce0c6598
MC
2425 case BINDER_TYPE_FD: {
2426 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2427
355b0502 2428 binder_debug(BINDER_DEBUG_TRANSACTION,
ce0c6598 2429 " fd %d\n", fp->fd);
355b0502 2430 if (failed_at)
ce0c6598
MC
2431 task_close_fd(proc, fp->fd);
2432 } break;
dd9bc4f9
MC
2433 case BINDER_TYPE_PTR:
2434 /*
2435 * Nothing to do here, this will get cleaned up when the
2436 * transaction buffer gets freed
2437 */
355b0502 2438 break;
e124de38
MC
2439 case BINDER_TYPE_FDA: {
2440 struct binder_fd_array_object *fda;
2441 struct binder_buffer_object *parent;
2442 uintptr_t parent_buffer;
2443 u32 *fd_array;
2444 size_t fd_index;
2445 binder_size_t fd_buf_size;
2446
2447 fda = to_binder_fd_array_object(hdr);
2448 parent = binder_validate_ptr(buffer, fda->parent,
2449 off_start,
2450 offp - off_start);
2451 if (!parent) {
2452 pr_err("transaction release %d bad parent offset",
2453 debug_id);
2454 continue;
2455 }
2456 /*
2457 * Since the parent was already fixed up, convert it
2458 * back to kernel address space to access it
2459 */
2460 parent_buffer = parent->buffer -
467545d8
TK
2461 binder_alloc_get_user_buffer_offset(
2462 &proc->alloc);
e124de38
MC
2463
2464 fd_buf_size = sizeof(u32) * fda->num_fds;
2465 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2466 pr_err("transaction release %d invalid number of fds (%lld)\n",
2467 debug_id, (u64)fda->num_fds);
2468 continue;
2469 }
2470 if (fd_buf_size > parent->length ||
2471 fda->parent_offset > parent->length - fd_buf_size) {
2472 /* No space for all file descriptors here. */
2473 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2474 debug_id, (u64)fda->num_fds);
2475 continue;
2476 }
663d2e54 2477 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
e124de38
MC
2478 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2479 task_close_fd(proc, fd_array[fd_index]);
2480 } break;
355b0502 2481 default:
64dcfe6b 2482 pr_err("transaction release %d bad object type %x\n",
ce0c6598 2483 debug_id, hdr->type);
355b0502
GKH
2484 break;
2485 }
2486 }
2487}
2488
bfd49fea
MC
2489static int binder_translate_binder(struct flat_binder_object *fp,
2490 struct binder_transaction *t,
2491 struct binder_thread *thread)
2492{
2493 struct binder_node *node;
bfd49fea
MC
2494 struct binder_proc *proc = thread->proc;
2495 struct binder_proc *target_proc = t->to_proc;
f7d87412 2496 struct binder_ref_data rdata;
96dd75d9 2497 int ret = 0;
bfd49fea
MC
2498
2499 node = binder_get_node(proc, fp->binder);
2500 if (!node) {
14c312e9 2501 node = binder_new_node(proc, fp);
bfd49fea
MC
2502 if (!node)
2503 return -ENOMEM;
bfd49fea
MC
2504 }
2505 if (fp->cookie != node->cookie) {
2506 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2507 proc->pid, thread->pid, (u64)fp->binder,
2508 node->debug_id, (u64)fp->cookie,
2509 (u64)node->cookie);
96dd75d9
TK
2510 ret = -EINVAL;
2511 goto done;
2512 }
2513 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2514 ret = -EPERM;
2515 goto done;
bfd49fea 2516 }
bfd49fea 2517
f7d87412
TK
2518 ret = binder_inc_ref_for_node(target_proc, node,
2519 fp->hdr.type == BINDER_TYPE_BINDER,
2520 &thread->todo, &rdata);
2521 if (ret)
96dd75d9 2522 goto done;
bfd49fea
MC
2523
2524 if (fp->hdr.type == BINDER_TYPE_BINDER)
2525 fp->hdr.type = BINDER_TYPE_HANDLE;
2526 else
2527 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2528 fp->binder = 0;
f7d87412 2529 fp->handle = rdata.desc;
bfd49fea 2530 fp->cookie = 0;
bfd49fea 2531
f7d87412 2532 trace_binder_transaction_node_to_ref(t, node, &rdata);
bfd49fea
MC
2533 binder_debug(BINDER_DEBUG_TRANSACTION,
2534 " node %d u%016llx -> ref %d desc %d\n",
2535 node->debug_id, (u64)node->ptr,
f7d87412 2536 rdata.debug_id, rdata.desc);
96dd75d9
TK
2537done:
2538 binder_put_node(node);
2539 return ret;
bfd49fea
MC
2540}
2541
2542static int binder_translate_handle(struct flat_binder_object *fp,
2543 struct binder_transaction *t,
2544 struct binder_thread *thread)
2545{
bfd49fea
MC
2546 struct binder_proc *proc = thread->proc;
2547 struct binder_proc *target_proc = t->to_proc;
f7d87412
TK
2548 struct binder_node *node;
2549 struct binder_ref_data src_rdata;
96dd75d9 2550 int ret = 0;
bfd49fea 2551
f7d87412
TK
2552 node = binder_get_node_from_ref(proc, fp->handle,
2553 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2554 if (!node) {
bfd49fea
MC
2555 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2556 proc->pid, thread->pid, fp->handle);
2557 return -EINVAL;
2558 }
96dd75d9
TK
2559 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2560 ret = -EPERM;
2561 goto done;
2562 }
bfd49fea 2563
14c312e9 2564 binder_node_lock(node);
f7d87412 2565 if (node->proc == target_proc) {
bfd49fea
MC
2566 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2567 fp->hdr.type = BINDER_TYPE_BINDER;
2568 else
2569 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
f7d87412
TK
2570 fp->binder = node->ptr;
2571 fp->cookie = node->cookie;
14c312e9
TK
2572 if (node->proc)
2573 binder_inner_proc_lock(node->proc);
2574 binder_inc_node_nilocked(node,
2575 fp->hdr.type == BINDER_TYPE_BINDER,
2576 0, NULL);
2577 if (node->proc)
2578 binder_inner_proc_unlock(node->proc);
f7d87412 2579 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
bfd49fea
MC
2580 binder_debug(BINDER_DEBUG_TRANSACTION,
2581 " ref %d desc %d -> node %d u%016llx\n",
f7d87412
TK
2582 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2583 (u64)node->ptr);
14c312e9 2584 binder_node_unlock(node);
bfd49fea 2585 } else {
f7d87412 2586 struct binder_ref_data dest_rdata;
bfd49fea 2587
14c312e9 2588 binder_node_unlock(node);
f7d87412
TK
2589 ret = binder_inc_ref_for_node(target_proc, node,
2590 fp->hdr.type == BINDER_TYPE_HANDLE,
2591 NULL, &dest_rdata);
2592 if (ret)
96dd75d9 2593 goto done;
bfd49fea
MC
2594
2595 fp->binder = 0;
f7d87412 2596 fp->handle = dest_rdata.desc;
bfd49fea 2597 fp->cookie = 0;
f7d87412
TK
2598 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2599 &dest_rdata);
bfd49fea
MC
2600 binder_debug(BINDER_DEBUG_TRANSACTION,
2601 " ref %d desc %d -> ref %d desc %d (node %d)\n",
f7d87412
TK
2602 src_rdata.debug_id, src_rdata.desc,
2603 dest_rdata.debug_id, dest_rdata.desc,
2604 node->debug_id);
bfd49fea 2605 }
96dd75d9
TK
2606done:
2607 binder_put_node(node);
2608 return ret;
bfd49fea
MC
2609}
2610
2611static int binder_translate_fd(int fd,
2612 struct binder_transaction *t,
2613 struct binder_thread *thread,
2614 struct binder_transaction *in_reply_to)
2615{
2616 struct binder_proc *proc = thread->proc;
2617 struct binder_proc *target_proc = t->to_proc;
2618 int target_fd;
2619 struct file *file;
2620 int ret;
2621 bool target_allows_fd;
2622
2623 if (in_reply_to)
2624 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2625 else
2626 target_allows_fd = t->buffer->target_node->accept_fds;
2627 if (!target_allows_fd) {
2628 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2629 proc->pid, thread->pid,
2630 in_reply_to ? "reply" : "transaction",
2631 fd);
2632 ret = -EPERM;
2633 goto err_fd_not_accepted;
2634 }
2635
2636 file = fget(fd);
2637 if (!file) {
2638 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2639 proc->pid, thread->pid, fd);
2640 ret = -EBADF;
2641 goto err_fget;
2642 }
2643 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2644 if (ret < 0) {
2645 ret = -EPERM;
2646 goto err_security;
2647 }
2648
2649 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2650 if (target_fd < 0) {
2651 ret = -ENOMEM;
2652 goto err_get_unused_fd;
2653 }
2654 task_fd_install(target_proc, target_fd, file);
2655 trace_binder_transaction_fd(t, fd, target_fd);
2656 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2657 fd, target_fd);
2658
2659 return target_fd;
2660
2661err_get_unused_fd:
2662err_security:
2663 fput(file);
2664err_fget:
2665err_fd_not_accepted:
2666 return ret;
2667}
2668
e124de38
MC
2669static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2670 struct binder_buffer_object *parent,
2671 struct binder_transaction *t,
2672 struct binder_thread *thread,
2673 struct binder_transaction *in_reply_to)
2674{
2675 binder_size_t fdi, fd_buf_size, num_installed_fds;
2676 int target_fd;
2677 uintptr_t parent_buffer;
2678 u32 *fd_array;
2679 struct binder_proc *proc = thread->proc;
2680 struct binder_proc *target_proc = t->to_proc;
2681
2682 fd_buf_size = sizeof(u32) * fda->num_fds;
2683 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2684 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2685 proc->pid, thread->pid, (u64)fda->num_fds);
2686 return -EINVAL;
2687 }
2688 if (fd_buf_size > parent->length ||
2689 fda->parent_offset > parent->length - fd_buf_size) {
2690 /* No space for all file descriptors here. */
2691 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2692 proc->pid, thread->pid, (u64)fda->num_fds);
2693 return -EINVAL;
2694 }
2695 /*
2696 * Since the parent was already fixed up, convert it
2697 * back to the kernel address space to access it
2698 */
467545d8
TK
2699 parent_buffer = parent->buffer -
2700 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
663d2e54 2701 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
e124de38
MC
2702 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2703 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2704 proc->pid, thread->pid);
2705 return -EINVAL;
2706 }
2707 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2708 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2709 in_reply_to);
2710 if (target_fd < 0)
2711 goto err_translate_fd_failed;
2712 fd_array[fdi] = target_fd;
2713 }
2714 return 0;
2715
2716err_translate_fd_failed:
2717 /*
2718 * Failed to allocate fd or security error, free fds
2719 * installed so far.
2720 */
2721 num_installed_fds = fdi;
2722 for (fdi = 0; fdi < num_installed_fds; fdi++)
2723 task_close_fd(target_proc, fd_array[fdi]);
2724 return target_fd;
2725}
2726
dd9bc4f9
MC
2727static int binder_fixup_parent(struct binder_transaction *t,
2728 struct binder_thread *thread,
2729 struct binder_buffer_object *bp,
2730 binder_size_t *off_start,
2731 binder_size_t num_valid,
2732 struct binder_buffer_object *last_fixup_obj,
2733 binder_size_t last_fixup_min_off)
2734{
2735 struct binder_buffer_object *parent;
2736 u8 *parent_buffer;
2737 struct binder_buffer *b = t->buffer;
2738 struct binder_proc *proc = thread->proc;
2739 struct binder_proc *target_proc = t->to_proc;
2740
2741 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2742 return 0;
2743
2744 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2745 if (!parent) {
2746 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2747 proc->pid, thread->pid);
2748 return -EINVAL;
2749 }
2750
2751 if (!binder_validate_fixup(b, off_start,
2752 parent, bp->parent_offset,
2753 last_fixup_obj,
2754 last_fixup_min_off)) {
2755 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2756 proc->pid, thread->pid);
2757 return -EINVAL;
2758 }
2759
2760 if (parent->length < sizeof(binder_uintptr_t) ||
2761 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2762 /* No space for a pointer here! */
2763 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2764 proc->pid, thread->pid);
2765 return -EINVAL;
2766 }
663d2e54 2767 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
467545d8
TK
2768 binder_alloc_get_user_buffer_offset(
2769 &target_proc->alloc));
dd9bc4f9
MC
2770 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2771
2772 return 0;
2773}
2774
5347bf52
MC
2775/**
2776 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2777 * @t: transaction to send
2778 * @proc: process to send the transaction to
2779 * @thread: thread in @proc to send the transaction to (may be NULL)
2780 *
2781 * This function queues a transaction to the specified process. It will try
2782 * to find a thread in the target process to handle the transaction and
2783 * wake it up. If no thread is found, the work is queued to the proc
2784 * waitqueue.
2785 *
2786 * If the @thread parameter is not NULL, the transaction is always queued
2787 * to the waitlist of that specific thread.
2788 *
2789 * Return: true if the transactions was successfully queued
2790 * false if the target process or thread is dead
2791 */
2792static bool binder_proc_transaction(struct binder_transaction *t,
2793 struct binder_proc *proc,
2794 struct binder_thread *thread)
2795{
5347bf52 2796 struct binder_node *node = t->buffer->target_node;
7230f991 2797 struct binder_priority node_prio;
5347bf52 2798 bool oneway = !!(t->flags & TF_ONE_WAY);
95317055 2799 bool pending_async = false;
5347bf52
MC
2800
2801 BUG_ON(!node);
2802 binder_node_lock(node);
7230f991
MC
2803 node_prio.prio = node->min_priority;
2804 node_prio.sched_policy = node->sched_policy;
2805
5347bf52
MC
2806 if (oneway) {
2807 BUG_ON(thread);
2808 if (node->has_async_transaction) {
95317055 2809 pending_async = true;
5347bf52 2810 } else {
5a068558 2811 node->has_async_transaction = true;
5347bf52
MC
2812 }
2813 }
2814
2815 binder_inner_proc_lock(proc);
2816
2817 if (proc->is_dead || (thread && thread->is_dead)) {
2818 binder_inner_proc_unlock(proc);
2819 binder_node_unlock(node);
2820 return false;
2821 }
2822
95317055 2823 if (!thread && !pending_async)
5347bf52
MC
2824 thread = binder_select_thread_ilocked(proc);
2825
7230f991 2826 if (thread) {
39140a0f
MC
2827 binder_transaction_priority(thread->task, t, node_prio,
2828 node->inherit_rt);
95317055
MC
2829 binder_enqueue_thread_work_ilocked(thread, &t->work);
2830 } else if (!pending_async) {
2831 binder_enqueue_work_ilocked(&t->work, &proc->todo);
7230f991 2832 } else {
95317055 2833 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
7230f991 2834 }
5347bf52 2835
95317055 2836 if (!pending_async)
5347bf52
MC
2837 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2838
2839 binder_inner_proc_unlock(proc);
2840 binder_node_unlock(node);
2841
2842 return true;
2843}
2844
642da1da
TK
2845/**
2846 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2847 * @node: struct binder_node for which to get refs
2848 * @proc: returns @node->proc if valid
2849 * @error: if no @proc then returns BR_DEAD_REPLY
2850 *
2851 * User-space normally keeps the node alive when creating a transaction
2852 * since it has a reference to the target. The local strong ref keeps it
2853 * alive if the sending process dies before the target process processes
2854 * the transaction. If the source process is malicious or has a reference
2855 * counting bug, relying on the local strong ref can fail.
2856 *
2857 * Since user-space can cause the local strong ref to go away, we also take
2858 * a tmpref on the node to ensure it survives while we are constructing
2859 * the transaction. We also need a tmpref on the proc while we are
2860 * constructing the transaction, so we take that here as well.
2861 *
2862 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2863 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2864 * target proc has died, @error is set to BR_DEAD_REPLY
2865 */
2866static struct binder_node *binder_get_node_refs_for_txn(
2867 struct binder_node *node,
2868 struct binder_proc **procp,
2869 uint32_t *error)
2870{
2871 struct binder_node *target_node = NULL;
2872
2873 binder_node_inner_lock(node);
2874 if (node->proc) {
2875 target_node = node;
2876 binder_inc_node_nilocked(node, 1, 0, NULL);
2877 binder_inc_node_tmpref_ilocked(node);
2878 node->proc->tmp_ref++;
2879 *procp = node->proc;
2880 } else
2881 *error = BR_DEAD_REPLY;
2882 binder_node_inner_unlock(node);
2883
2884 return target_node;
2885}
2886
355b0502
GKH
2887static void binder_transaction(struct binder_proc *proc,
2888 struct binder_thread *thread,
843a2578
MC
2889 struct binder_transaction_data *tr, int reply,
2890 binder_size_t extra_buffers_size)
355b0502 2891{
bfd49fea 2892 int ret;
355b0502
GKH
2893 struct binder_transaction *t;
2894 struct binder_work *tcomplete;
dd9bc4f9 2895 binder_size_t *offp, *off_end, *off_start;
52354e59 2896 binder_size_t off_min;
dd9bc4f9 2897 u8 *sg_bufp, *sg_buf_end;
e482ec39 2898 struct binder_proc *target_proc = NULL;
355b0502
GKH
2899 struct binder_thread *target_thread = NULL;
2900 struct binder_node *target_node = NULL;
355b0502
GKH
2901 struct binder_transaction *in_reply_to = NULL;
2902 struct binder_transaction_log_entry *e;
0a0fdc1f
TK
2903 uint32_t return_error = 0;
2904 uint32_t return_error_param = 0;
2905 uint32_t return_error_line = 0;
dd9bc4f9
MC
2906 struct binder_buffer_object *last_fixup_obj = NULL;
2907 binder_size_t last_fixup_min_off = 0;
803df563 2908 struct binder_context *context = proc->context;
0f32aeb3 2909 int t_debug_id = atomic_inc_return(&binder_last_id);
1cac41cb
MB
2910 char *secctx = NULL;
2911 u32 secctx_sz = 0;
355b0502 2912
ec49bb00 2913 e = binder_transaction_log_add(&binder_transaction_log);
0f32aeb3 2914 e->debug_id = t_debug_id;
355b0502
GKH
2915 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2916 e->from_proc = proc->pid;
2917 e->from_thread = thread->pid;
2918 e->target_handle = tr->target.handle;
2919 e->data_size = tr->data_size;
2920 e->offsets_size = tr->offsets_size;
8b980bee 2921 e->context_name = proc->context->name;
355b0502
GKH
2922
2923 if (reply) {
89b657e0 2924 binder_inner_proc_lock(proc);
355b0502
GKH
2925 in_reply_to = thread->transaction_stack;
2926 if (in_reply_to == NULL) {
89b657e0 2927 binder_inner_proc_unlock(proc);
56b468fc 2928 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
355b0502
GKH
2929 proc->pid, thread->pid);
2930 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2931 return_error_param = -EPROTO;
2932 return_error_line = __LINE__;
355b0502
GKH
2933 goto err_empty_call_stack;
2934 }
355b0502 2935 if (in_reply_to->to_thread != thread) {
e482ec39 2936 spin_lock(&in_reply_to->lock);
56b468fc 2937 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
2938 proc->pid, thread->pid, in_reply_to->debug_id,
2939 in_reply_to->to_proc ?
2940 in_reply_to->to_proc->pid : 0,
2941 in_reply_to->to_thread ?
2942 in_reply_to->to_thread->pid : 0);
e482ec39 2943 spin_unlock(&in_reply_to->lock);
89b657e0 2944 binder_inner_proc_unlock(proc);
355b0502 2945 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2946 return_error_param = -EPROTO;
2947 return_error_line = __LINE__;
355b0502
GKH
2948 in_reply_to = NULL;
2949 goto err_bad_call_stack;
2950 }
2951 thread->transaction_stack = in_reply_to->to_parent;
89b657e0 2952 binder_inner_proc_unlock(proc);
89b657e0 2953 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
355b0502
GKH
2954 if (target_thread == NULL) {
2955 return_error = BR_DEAD_REPLY;
0a0fdc1f 2956 return_error_line = __LINE__;
355b0502
GKH
2957 goto err_dead_binder;
2958 }
2959 if (target_thread->transaction_stack != in_reply_to) {
56b468fc 2960 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
355b0502
GKH
2961 proc->pid, thread->pid,
2962 target_thread->transaction_stack ?
2963 target_thread->transaction_stack->debug_id : 0,
2964 in_reply_to->debug_id);
89b657e0 2965 binder_inner_proc_unlock(target_thread->proc);
355b0502 2966 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
2967 return_error_param = -EPROTO;
2968 return_error_line = __LINE__;
355b0502
GKH
2969 in_reply_to = NULL;
2970 target_thread = NULL;
2971 goto err_dead_binder;
2972 }
2973 target_proc = target_thread->proc;
e482ec39 2974 target_proc->tmp_ref++;
89b657e0 2975 binder_inner_proc_unlock(target_thread->proc);
355b0502
GKH
2976 } else {
2977 if (tr->target.handle) {
2978 struct binder_ref *ref;
10f62861 2979
f80cbc72
TK
2980 /*
2981 * There must already be a strong ref
2982 * on this node. If so, do a strong
2983 * increment on the node to ensure it
2984 * stays alive until the transaction is
2985 * done.
2986 */
6fcb2b9a
TK
2987 binder_proc_lock(proc);
2988 ref = binder_get_ref_olocked(proc, tr->target.handle,
2989 true);
f80cbc72 2990 if (ref) {
642da1da
TK
2991 target_node = binder_get_node_refs_for_txn(
2992 ref->node, &target_proc,
2993 &return_error);
2994 } else {
56b468fc 2995 binder_user_error("%d:%d got transaction to invalid handle\n",
642da1da 2996 proc->pid, thread->pid);
355b0502 2997 return_error = BR_FAILED_REPLY;
355b0502 2998 }
642da1da 2999 binder_proc_unlock(proc);
355b0502 3000 } else {
3490fdcb 3001 mutex_lock(&context->context_mgr_node_lock);
803df563 3002 target_node = context->binder_context_mgr_node;
642da1da
TK
3003 if (target_node)
3004 target_node = binder_get_node_refs_for_txn(
3005 target_node, &target_proc,
3006 &return_error);
3007 else
355b0502 3008 return_error = BR_DEAD_REPLY;
3490fdcb 3009 mutex_unlock(&context->context_mgr_node_lock);
5a068558
MB
3010 if (target_node && target_proc == proc) {
3011 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
3012 proc->pid, thread->pid);
3013 return_error = BR_FAILED_REPLY;
3014 return_error_param = -EINVAL;
3015 return_error_line = __LINE__;
3016 goto err_invalid_target_handle;
3017 }
355b0502 3018 }
642da1da
TK
3019 if (!target_node) {
3020 /*
3021 * return_error is set above
3022 */
3023 return_error_param = -EINVAL;
0a0fdc1f 3024 return_error_line = __LINE__;
355b0502
GKH
3025 goto err_dead_binder;
3026 }
642da1da 3027 e->to_node = target_node->debug_id;
1cac41cb
MB
3028#ifdef CONFIG_SAMSUNG_FREECESS
3029 if (target_proc
3030 && (target_proc->tsk->cred->euid.val > 10000)
3031 && (proc->pid != target_proc->pid)) {
3032 binder_report(proc->tsk, target_proc->tsk, tr->flags & TF_ONE_WAY);
3033 }
3034
3035#endif
79af7307
SS
3036 if (security_binder_transaction(proc->tsk,
3037 target_proc->tsk) < 0) {
3038 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3039 return_error_param = -EPERM;
3040 return_error_line = __LINE__;
79af7307
SS
3041 goto err_invalid_target_handle;
3042 }
89b657e0 3043 binder_inner_proc_lock(proc);
355b0502
GKH
3044 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
3045 struct binder_transaction *tmp;
10f62861 3046
355b0502
GKH
3047 tmp = thread->transaction_stack;
3048 if (tmp->to_thread != thread) {
e482ec39 3049 spin_lock(&tmp->lock);
56b468fc 3050 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
355b0502
GKH
3051 proc->pid, thread->pid, tmp->debug_id,
3052 tmp->to_proc ? tmp->to_proc->pid : 0,
3053 tmp->to_thread ?
3054 tmp->to_thread->pid : 0);
e482ec39 3055 spin_unlock(&tmp->lock);
89b657e0 3056 binder_inner_proc_unlock(proc);
355b0502 3057 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3058 return_error_param = -EPROTO;
3059 return_error_line = __LINE__;
355b0502
GKH
3060 goto err_bad_call_stack;
3061 }
3062 while (tmp) {
e482ec39
TK
3063 struct binder_thread *from;
3064
3065 spin_lock(&tmp->lock);
3066 from = tmp->from;
3067 if (from && from->proc == target_proc) {
3068 atomic_inc(&from->tmp_ref);
3069 target_thread = from;
3070 spin_unlock(&tmp->lock);
3071 break;
3072 }
3073 spin_unlock(&tmp->lock);
355b0502
GKH
3074 tmp = tmp->from_parent;
3075 }
3076 }
89b657e0 3077 binder_inner_proc_unlock(proc);
355b0502 3078 }
5347bf52 3079 if (target_thread)
355b0502 3080 e->to_thread = target_thread->pid;
355b0502
GKH
3081 e->to_proc = target_proc->pid;
3082
3083 /* TODO: reuse incoming transaction for reply */
3084 t = kzalloc(sizeof(*t), GFP_KERNEL);
3085 if (t == NULL) {
3086 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3087 return_error_param = -ENOMEM;
3088 return_error_line = __LINE__;
355b0502
GKH
3089 goto err_alloc_t_failed;
3090 }
3091 binder_stats_created(BINDER_STAT_TRANSACTION);
e482ec39 3092 spin_lock_init(&t->lock);
355b0502
GKH
3093
3094 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3095 if (tcomplete == NULL) {
3096 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3097 return_error_param = -ENOMEM;
3098 return_error_line = __LINE__;
355b0502
GKH
3099 goto err_alloc_tcomplete_failed;
3100 }
3101 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3102
0f32aeb3 3103 t->debug_id = t_debug_id;
355b0502
GKH
3104
3105 if (reply)
3106 binder_debug(BINDER_DEBUG_TRANSACTION,
843a2578 3107 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
3108 proc->pid, thread->pid, t->debug_id,
3109 target_proc->pid, target_thread->pid,
da49889d
AH
3110 (u64)tr->data.ptr.buffer,
3111 (u64)tr->data.ptr.offsets,
843a2578
MC
3112 (u64)tr->data_size, (u64)tr->offsets_size,
3113 (u64)extra_buffers_size);
355b0502
GKH
3114 else
3115 binder_debug(BINDER_DEBUG_TRANSACTION,
843a2578 3116 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
355b0502
GKH
3117 proc->pid, thread->pid, t->debug_id,
3118 target_proc->pid, target_node->debug_id,
da49889d
AH
3119 (u64)tr->data.ptr.buffer,
3120 (u64)tr->data.ptr.offsets,
843a2578
MC
3121 (u64)tr->data_size, (u64)tr->offsets_size,
3122 (u64)extra_buffers_size);
355b0502
GKH
3123
3124 if (!reply && !(tr->flags & TF_ONE_WAY))
3125 t->from = thread;
3126 else
3127 t->from = NULL;
57bab7cb 3128 t->sender_euid = task_euid(proc->tsk);
355b0502
GKH
3129 t->to_proc = target_proc;
3130 t->to_thread = target_thread;
3131 t->code = tr->code;
3132 t->flags = tr->flags;
d30e6a87
MC
3133 if (!(t->flags & TF_ONE_WAY) &&
3134 binder_supported_policy(current->policy)) {
3135 /* Inherit supported policies for synchronous transactions */
3136 t->priority.sched_policy = current->policy;
3137 t->priority.prio = current->normal_prio;
3138 } else {
3139 /* Otherwise, fall back to the default priority */
3140 t->priority = target_proc->default_priority;
3141 }
1cac41cb
MB
3142 if (target_node && target_node->txn_security_ctx) {
3143 u32 secid;
3144
3145 security_task_getsecid(proc->tsk, &secid);
3146 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3147 if (ret) {
3148 return_error = BR_FAILED_REPLY;
3149 return_error_param = ret;
3150 return_error_line = __LINE__;
3151 goto err_get_secctx_failed;
3152 }
3153 extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
3154 }
3155
975a1ac9
AH
3156
3157 trace_binder_transaction(reply, t, target_node);
3158
467545d8 3159 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
843a2578
MC
3160 tr->offsets_size, extra_buffers_size,
3161 !reply && (t->flags & TF_ONE_WAY));
0a0fdc1f
TK
3162 if (IS_ERR(t->buffer)) {
3163 /*
3164 * -ESRCH indicates VMA cleared. The target is dying.
3165 */
3166 return_error_param = PTR_ERR(t->buffer);
3167 return_error = return_error_param == -ESRCH ?
3168 BR_DEAD_REPLY : BR_FAILED_REPLY;
3169 return_error_line = __LINE__;
3170 t->buffer = NULL;
355b0502
GKH
3171 goto err_binder_alloc_buf_failed;
3172 }
1cac41cb
MB
3173 if (secctx) {
3174 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3175 ALIGN(tr->offsets_size, sizeof(void *)) +
3176 ALIGN(extra_buffers_size, sizeof(void *)) -
3177 ALIGN(secctx_sz, sizeof(u64));
3178 char *kptr = t->buffer->data + buf_offset;
3179
3180 t->security_ctx = (uintptr_t)kptr +
3181 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
3182 memcpy(kptr, secctx, secctx_sz);
3183 security_release_secctx(secctx, secctx_sz);
3184 secctx = NULL;
3185 }
3186
355b0502
GKH
3187 t->buffer->debug_id = t->debug_id;
3188 t->buffer->transaction = t;
3189 t->buffer->target_node = target_node;
975a1ac9 3190 trace_binder_transaction_alloc_buf(t->buffer);
dd9bc4f9
MC
3191 off_start = (binder_size_t *)(t->buffer->data +
3192 ALIGN(tr->data_size, sizeof(void *)));
3193 offp = off_start;
355b0502 3194
da49889d
AH
3195 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
3196 tr->data.ptr.buffer, tr->data_size)) {
56b468fc
AS
3197 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3198 proc->pid, thread->pid);
355b0502 3199 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3200 return_error_param = -EFAULT;
3201 return_error_line = __LINE__;
355b0502
GKH
3202 goto err_copy_data_failed;
3203 }
da49889d
AH
3204 if (copy_from_user(offp, (const void __user *)(uintptr_t)
3205 tr->data.ptr.offsets, tr->offsets_size)) {
56b468fc
AS
3206 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3207 proc->pid, thread->pid);
355b0502 3208 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3209 return_error_param = -EFAULT;
3210 return_error_line = __LINE__;
355b0502
GKH
3211 goto err_copy_data_failed;
3212 }
da49889d
AH
3213 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3214 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3215 proc->pid, thread->pid, (u64)tr->offsets_size);
355b0502 3216 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3217 return_error_param = -EINVAL;
3218 return_error_line = __LINE__;
355b0502
GKH
3219 goto err_bad_offset;
3220 }
dd9bc4f9
MC
3221 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3222 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3223 proc->pid, thread->pid,
df3087d4 3224 (u64)extra_buffers_size);
dd9bc4f9 3225 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3226 return_error_param = -EINVAL;
3227 return_error_line = __LINE__;
dd9bc4f9
MC
3228 goto err_bad_offset;
3229 }
3230 off_end = (void *)off_start + tr->offsets_size;
3231 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
3232 sg_buf_end = sg_bufp + extra_buffers_size;
52354e59 3233 off_min = 0;
355b0502 3234 for (; offp < off_end; offp++) {
ce0c6598
MC
3235 struct binder_object_header *hdr;
3236 size_t object_size = binder_validate_object(t->buffer, *offp);
10f62861 3237
ce0c6598
MC
3238 if (object_size == 0 || *offp < off_min) {
3239 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
52354e59
AH
3240 proc->pid, thread->pid, (u64)*offp,
3241 (u64)off_min,
ce0c6598 3242 (u64)t->buffer->data_size);
355b0502 3243 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3244 return_error_param = -EINVAL;
3245 return_error_line = __LINE__;
355b0502
GKH
3246 goto err_bad_offset;
3247 }
ce0c6598
MC
3248
3249 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
3250 off_min = *offp + object_size;
3251 switch (hdr->type) {
355b0502
GKH
3252 case BINDER_TYPE_BINDER:
3253 case BINDER_TYPE_WEAK_BINDER: {
ce0c6598 3254 struct flat_binder_object *fp;
10f62861 3255
ce0c6598 3256 fp = to_flat_binder_object(hdr);
bfd49fea
MC
3257 ret = binder_translate_binder(fp, t, thread);
3258 if (ret < 0) {
79af7307 3259 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3260 return_error_param = ret;
3261 return_error_line = __LINE__;
bfd49fea 3262 goto err_translate_failed;
79af7307 3263 }
355b0502
GKH
3264 } break;
3265 case BINDER_TYPE_HANDLE:
3266 case BINDER_TYPE_WEAK_HANDLE: {
ce0c6598 3267 struct flat_binder_object *fp;
10f62861 3268
ce0c6598 3269 fp = to_flat_binder_object(hdr);
bfd49fea
MC
3270 ret = binder_translate_handle(fp, t, thread);
3271 if (ret < 0) {
79af7307 3272 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3273 return_error_param = ret;
3274 return_error_line = __LINE__;
bfd49fea 3275 goto err_translate_failed;
355b0502
GKH
3276 }
3277 } break;
3278
3279 case BINDER_TYPE_FD: {
ce0c6598 3280 struct binder_fd_object *fp = to_binder_fd_object(hdr);
bfd49fea
MC
3281 int target_fd = binder_translate_fd(fp->fd, t, thread,
3282 in_reply_to);
355b0502 3283
355b0502 3284 if (target_fd < 0) {
355b0502 3285 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3286 return_error_param = target_fd;
3287 return_error_line = __LINE__;
bfd49fea 3288 goto err_translate_failed;
355b0502 3289 }
ce0c6598
MC
3290 fp->pad_binder = 0;
3291 fp->fd = target_fd;
355b0502 3292 } break;
e124de38
MC
3293 case BINDER_TYPE_FDA: {
3294 struct binder_fd_array_object *fda =
3295 to_binder_fd_array_object(hdr);
3296 struct binder_buffer_object *parent =
3297 binder_validate_ptr(t->buffer, fda->parent,
3298 off_start,
3299 offp - off_start);
3300 if (!parent) {
3301 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3302 proc->pid, thread->pid);
355b0502 3303 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3304 return_error_param = -EINVAL;
3305 return_error_line = __LINE__;
e124de38 3306 goto err_bad_parent;
355b0502 3307 }
e124de38
MC
3308 if (!binder_validate_fixup(t->buffer, off_start,
3309 parent, fda->parent_offset,
3310 last_fixup_obj,
3311 last_fixup_min_off)) {
3312 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3313 proc->pid, thread->pid);
79af7307 3314 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3315 return_error_param = -EINVAL;
3316 return_error_line = __LINE__;
e124de38 3317 goto err_bad_parent;
79af7307 3318 }
e124de38
MC
3319 ret = binder_translate_fd_array(fda, parent, t, thread,
3320 in_reply_to);
3321 if (ret < 0) {
355b0502 3322 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3323 return_error_param = ret;
3324 return_error_line = __LINE__;
e124de38 3325 goto err_translate_failed;
355b0502 3326 }
e124de38
MC
3327 last_fixup_obj = parent;
3328 last_fixup_min_off =
3329 fda->parent_offset + sizeof(u32) * fda->num_fds;
3330 } break;
dd9bc4f9
MC
3331 case BINDER_TYPE_PTR: {
3332 struct binder_buffer_object *bp =
3333 to_binder_buffer_object(hdr);
3334 size_t buf_left = sg_buf_end - sg_bufp;
3335
3336 if (bp->length > buf_left) {
3337 binder_user_error("%d:%d got transaction with too large buffer\n",
3338 proc->pid, thread->pid);
3339 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3340 return_error_param = -EINVAL;
3341 return_error_line = __LINE__;
dd9bc4f9
MC
3342 goto err_bad_offset;
3343 }
3344 if (copy_from_user(sg_bufp,
3345 (const void __user *)(uintptr_t)
3346 bp->buffer, bp->length)) {
3347 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3348 proc->pid, thread->pid);
0a0fdc1f 3349 return_error_param = -EFAULT;
dd9bc4f9 3350 return_error = BR_FAILED_REPLY;
0a0fdc1f 3351 return_error_line = __LINE__;
dd9bc4f9
MC
3352 goto err_copy_data_failed;
3353 }
3354 /* Fixup buffer pointer to target proc address space */
3355 bp->buffer = (uintptr_t)sg_bufp +
467545d8
TK
3356 binder_alloc_get_user_buffer_offset(
3357 &target_proc->alloc);
dd9bc4f9
MC
3358 sg_bufp += ALIGN(bp->length, sizeof(u64));
3359
3360 ret = binder_fixup_parent(t, thread, bp, off_start,
3361 offp - off_start,
3362 last_fixup_obj,
3363 last_fixup_min_off);
3364 if (ret < 0) {
3365 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3366 return_error_param = ret;
3367 return_error_line = __LINE__;
dd9bc4f9
MC
3368 goto err_translate_failed;
3369 }
3370 last_fixup_obj = bp;
3371 last_fixup_min_off = 0;
355b0502 3372 } break;
355b0502 3373 default:
64dcfe6b 3374 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
ce0c6598 3375 proc->pid, thread->pid, hdr->type);
355b0502 3376 return_error = BR_FAILED_REPLY;
0a0fdc1f
TK
3377 return_error_param = -EINVAL;
3378 return_error_line = __LINE__;
355b0502
GKH
3379 goto err_bad_object_type;
3380 }
3381 }
6ea60271 3382 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
14c312e9 3383 t->work.type = BINDER_WORK_TRANSACTION;
6ea60271 3384
355b0502 3385 if (reply) {
95317055 3386 binder_enqueue_thread_work(thread, tcomplete);
89b657e0
MC
3387 binder_inner_proc_lock(target_proc);
3388 if (target_thread->is_dead) {
3389 binder_inner_proc_unlock(target_proc);
e482ec39 3390 goto err_dead_proc_or_thread;
89b657e0 3391 }
355b0502 3392 BUG_ON(t->buffer->async_transaction != 0);
89b657e0 3393 binder_pop_transaction_ilocked(target_thread, in_reply_to);
95317055 3394 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
89b657e0 3395 binder_inner_proc_unlock(target_proc);
5347bf52 3396 wake_up_interruptible_sync(&target_thread->wait);
76b376ea 3397 binder_restore_priority(current, in_reply_to->saved_priority);
16273538 3398 binder_free_transaction(in_reply_to);
355b0502
GKH
3399 } else if (!(t->flags & TF_ONE_WAY)) {
3400 BUG_ON(t->buffer->async_transaction != 0);
89b657e0 3401 binder_inner_proc_lock(proc);
6b6637fd
MC
3402 /*
3403 * Defer the TRANSACTION_COMPLETE, so we don't return to
3404 * userspace immediately; this allows the target process to
3405 * immediately start processing this transaction, reducing
3406 * latency. We will then return the TRANSACTION_COMPLETE when
3407 * the target replies (or there is an error).
3408 */
3409 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
355b0502
GKH
3410 t->need_reply = 1;
3411 t->from_parent = thread->transaction_stack;
3412 thread->transaction_stack = t;
89b657e0 3413 binder_inner_proc_unlock(proc);
5347bf52 3414 if (!binder_proc_transaction(t, target_proc, target_thread)) {
89b657e0
MC
3415 binder_inner_proc_lock(proc);
3416 binder_pop_transaction_ilocked(thread, t);
3417 binder_inner_proc_unlock(proc);
e482ec39
TK
3418 goto err_dead_proc_or_thread;
3419 }
355b0502
GKH
3420 } else {
3421 BUG_ON(target_node == NULL);
3422 BUG_ON(t->buffer->async_transaction != 1);
95317055 3423 binder_enqueue_thread_work(thread, tcomplete);
5347bf52 3424 if (!binder_proc_transaction(t, target_proc, NULL))
e482ec39 3425 goto err_dead_proc_or_thread;
0cebb407 3426 }
e482ec39
TK
3427 if (target_thread)
3428 binder_thread_dec_tmpref(target_thread);
3429 binder_proc_dec_tmpref(target_proc);
642da1da
TK
3430 if (target_node)
3431 binder_dec_node_tmpref(target_node);
0f32aeb3
TK
3432 /*
3433 * write barrier to synchronize with initialization
3434 * of log entry
3435 */
3436 smp_wmb();
3437 WRITE_ONCE(e->debug_id_done, t_debug_id);
355b0502
GKH
3438 return;
3439
e482ec39
TK
3440err_dead_proc_or_thread:
3441 return_error = BR_DEAD_REPLY;
3442 return_error_line = __LINE__;
ab10c4d8 3443 binder_dequeue_work(proc, tcomplete);
bfd49fea 3444err_translate_failed:
355b0502
GKH
3445err_bad_object_type:
3446err_bad_offset:
e124de38 3447err_bad_parent:
355b0502 3448err_copy_data_failed:
975a1ac9 3449 trace_binder_transaction_failed_buffer_release(t->buffer);
355b0502 3450 binder_transaction_buffer_release(target_proc, t->buffer, offp);
642da1da
TK
3451 if (target_node)
3452 binder_dec_node_tmpref(target_node);
f80cbc72 3453 target_node = NULL;
355b0502 3454 t->buffer->transaction = NULL;
467545d8 3455 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
355b0502 3456err_binder_alloc_buf_failed:
1cac41cb
MB
3457 if (secctx)
3458 security_release_secctx(secctx, secctx_sz);
3459err_get_secctx_failed:
355b0502
GKH
3460 kfree(tcomplete);
3461 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3462err_alloc_tcomplete_failed:
3463 kfree(t);
3464 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3465err_alloc_t_failed:
3466err_bad_call_stack:
3467err_empty_call_stack:
3468err_dead_binder:
3469err_invalid_target_handle:
e482ec39
TK
3470 if (target_thread)
3471 binder_thread_dec_tmpref(target_thread);
3472 if (target_proc)
3473 binder_proc_dec_tmpref(target_proc);
642da1da 3474 if (target_node) {
f80cbc72 3475 binder_dec_node(target_node, 1, 0);
642da1da
TK
3476 binder_dec_node_tmpref(target_node);
3477 }
f80cbc72 3478
355b0502 3479 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
0a0fdc1f
TK
3480 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3481 proc->pid, thread->pid, return_error, return_error_param,
3482 (u64)tr->data_size, (u64)tr->offsets_size,
3483 return_error_line);
355b0502
GKH
3484
3485 {
3486 struct binder_transaction_log_entry *fe;
10f62861 3487
0a0fdc1f
TK
3488 e->return_error = return_error;
3489 e->return_error_param = return_error_param;
3490 e->return_error_line = return_error_line;
ec49bb00 3491 fe = binder_transaction_log_add(&binder_transaction_log_failed);
355b0502 3492 *fe = *e;
0f32aeb3
TK
3493 /*
3494 * write barrier to synchronize with initialization
3495 * of log entry
3496 */
3497 smp_wmb();
3498 WRITE_ONCE(e->debug_id_done, t_debug_id);
3499 WRITE_ONCE(fe->debug_id_done, t_debug_id);
355b0502
GKH
3500 }
3501
3a822b33 3502 BUG_ON(thread->return_error.cmd != BR_OK);
355b0502 3503 if (in_reply_to) {
76b376ea 3504 binder_restore_priority(current, in_reply_to->saved_priority);
3a822b33 3505 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
95317055 3506 binder_enqueue_thread_work(thread, &thread->return_error.work);
355b0502 3507 binder_send_failed_reply(in_reply_to, return_error);
3a822b33
TK
3508 } else {
3509 thread->return_error.cmd = return_error;
95317055 3510 binder_enqueue_thread_work(thread, &thread->return_error.work);
3a822b33 3511 }
355b0502
GKH
3512}
3513
fb07ebc3
BP
3514static int binder_thread_write(struct binder_proc *proc,
3515 struct binder_thread *thread,
da49889d
AH
3516 binder_uintptr_t binder_buffer, size_t size,
3517 binder_size_t *consumed)
355b0502
GKH
3518{
3519 uint32_t cmd;
803df563 3520 struct binder_context *context = proc->context;
da49889d 3521 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
3522 void __user *ptr = buffer + *consumed;
3523 void __user *end = buffer + size;
3524
3a822b33 3525 while (ptr < end && thread->return_error.cmd == BR_OK) {
f7d87412
TK
3526 int ret;
3527
355b0502
GKH
3528 if (get_user(cmd, (uint32_t __user *)ptr))
3529 return -EFAULT;
3530 ptr += sizeof(uint32_t);
975a1ac9 3531 trace_binder_command(cmd);
ec49bb00 3532 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
f716ecfc
BJS
3533 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3534 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3535 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
355b0502
GKH
3536 }
3537 switch (cmd) {
3538 case BC_INCREFS:
3539 case BC_ACQUIRE:
3540 case BC_RELEASE:
3541 case BC_DECREFS: {
3542 uint32_t target;
355b0502 3543 const char *debug_string;
f7d87412
TK
3544 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3545 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3546 struct binder_ref_data rdata;
355b0502
GKH
3547
3548 if (get_user(target, (uint32_t __user *)ptr))
3549 return -EFAULT;
3490fdcb 3550
355b0502 3551 ptr += sizeof(uint32_t);
f7d87412
TK
3552 ret = -1;
3553 if (increment && !target) {
3490fdcb 3554 struct binder_node *ctx_mgr_node;
3490fdcb
TK
3555 mutex_lock(&context->context_mgr_node_lock);
3556 ctx_mgr_node = context->binder_context_mgr_node;
f7d87412
TK
3557 if (ctx_mgr_node)
3558 ret = binder_inc_ref_for_node(
3559 proc, ctx_mgr_node,
3560 strong, NULL, &rdata);
3490fdcb
TK
3561 mutex_unlock(&context->context_mgr_node_lock);
3562 }
f7d87412
TK
3563 if (ret)
3564 ret = binder_update_ref_for_handle(
3565 proc, target, increment, strong,
3566 &rdata);
3567 if (!ret && rdata.desc != target) {
3568 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3569 proc->pid, thread->pid,
3570 target, rdata.desc);
355b0502
GKH
3571 }
3572 switch (cmd) {
3573 case BC_INCREFS:
3574 debug_string = "IncRefs";
355b0502
GKH
3575 break;
3576 case BC_ACQUIRE:
3577 debug_string = "Acquire";
355b0502
GKH
3578 break;
3579 case BC_RELEASE:
3580 debug_string = "Release";
355b0502
GKH
3581 break;
3582 case BC_DECREFS:
3583 default:
3584 debug_string = "DecRefs";
f7d87412
TK
3585 break;
3586 }
3587 if (ret) {
3588 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3589 proc->pid, thread->pid, debug_string,
3590 strong, target, ret);
355b0502
GKH
3591 break;
3592 }
3593 binder_debug(BINDER_DEBUG_USER_REFS,
f7d87412
TK
3594 "%d:%d %s ref %d desc %d s %d w %d\n",
3595 proc->pid, thread->pid, debug_string,
3596 rdata.debug_id, rdata.desc, rdata.strong,
3597 rdata.weak);
355b0502
GKH
3598 break;
3599 }
3600 case BC_INCREFS_DONE:
3601 case BC_ACQUIRE_DONE: {
da49889d
AH
3602 binder_uintptr_t node_ptr;
3603 binder_uintptr_t cookie;
355b0502 3604 struct binder_node *node;
14c312e9 3605 bool free_node;
355b0502 3606
da49889d 3607 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3608 return -EFAULT;
da49889d
AH
3609 ptr += sizeof(binder_uintptr_t);
3610 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3611 return -EFAULT;
da49889d 3612 ptr += sizeof(binder_uintptr_t);
355b0502
GKH
3613 node = binder_get_node(proc, node_ptr);
3614 if (node == NULL) {
da49889d 3615 binder_user_error("%d:%d %s u%016llx no match\n",
355b0502
GKH
3616 proc->pid, thread->pid,
3617 cmd == BC_INCREFS_DONE ?
3618 "BC_INCREFS_DONE" :
3619 "BC_ACQUIRE_DONE",
da49889d 3620 (u64)node_ptr);
355b0502
GKH
3621 break;
3622 }
3623 if (cookie != node->cookie) {
da49889d 3624 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
355b0502
GKH
3625 proc->pid, thread->pid,
3626 cmd == BC_INCREFS_DONE ?
3627 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
da49889d
AH
3628 (u64)node_ptr, node->debug_id,
3629 (u64)cookie, (u64)node->cookie);
96dd75d9 3630 binder_put_node(node);
355b0502
GKH
3631 break;
3632 }
14c312e9 3633 binder_node_inner_lock(node);
355b0502
GKH
3634 if (cmd == BC_ACQUIRE_DONE) {
3635 if (node->pending_strong_ref == 0) {
56b468fc 3636 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
355b0502
GKH
3637 proc->pid, thread->pid,
3638 node->debug_id);
14c312e9 3639 binder_node_inner_unlock(node);
96dd75d9 3640 binder_put_node(node);
355b0502
GKH
3641 break;
3642 }
3643 node->pending_strong_ref = 0;
3644 } else {
3645 if (node->pending_weak_ref == 0) {
56b468fc 3646 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
355b0502
GKH
3647 proc->pid, thread->pid,
3648 node->debug_id);
14c312e9 3649 binder_node_inner_unlock(node);
96dd75d9 3650 binder_put_node(node);
355b0502
GKH
3651 break;
3652 }
3653 node->pending_weak_ref = 0;
3654 }
14c312e9
TK
3655 free_node = binder_dec_node_nilocked(node,
3656 cmd == BC_ACQUIRE_DONE, 0);
3657 WARN_ON(free_node);
355b0502 3658 binder_debug(BINDER_DEBUG_USER_REFS,
96dd75d9 3659 "%d:%d %s node %d ls %d lw %d tr %d\n",
355b0502
GKH
3660 proc->pid, thread->pid,
3661 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
96dd75d9
TK
3662 node->debug_id, node->local_strong_refs,
3663 node->local_weak_refs, node->tmp_refs);
14c312e9 3664 binder_node_inner_unlock(node);
96dd75d9 3665 binder_put_node(node);
355b0502
GKH
3666 break;
3667 }
3668 case BC_ATTEMPT_ACQUIRE:
56b468fc 3669 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
355b0502
GKH
3670 return -EINVAL;
3671 case BC_ACQUIRE_RESULT:
56b468fc 3672 pr_err("BC_ACQUIRE_RESULT not supported\n");
355b0502
GKH
3673 return -EINVAL;
3674
3675 case BC_FREE_BUFFER: {
da49889d 3676 binder_uintptr_t data_ptr;
355b0502
GKH
3677 struct binder_buffer *buffer;
3678
da49889d 3679 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
355b0502 3680 return -EFAULT;
da49889d 3681 ptr += sizeof(binder_uintptr_t);
355b0502 3682
db516584
TK
3683 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3684 data_ptr);
1cac41cb
MB
3685 if (IS_ERR_OR_NULL(buffer)) {
3686 if (PTR_ERR(buffer) == -EPERM) {
3687 binder_user_error(
3688 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3689 proc->pid, thread->pid,
3690 (u64)data_ptr);
3691 } else {
3692 binder_user_error(
3693 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3694 proc->pid, thread->pid,
3695 (u64)data_ptr);
3696 }
355b0502
GKH
3697 break;
3698 }
3699 binder_debug(BINDER_DEBUG_FREE_BUFFER,
da49889d
AH
3700 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3701 proc->pid, thread->pid, (u64)data_ptr,
3702 buffer->debug_id,
355b0502
GKH
3703 buffer->transaction ? "active" : "finished");
3704
3705 if (buffer->transaction) {
3706 buffer->transaction->buffer = NULL;
3707 buffer->transaction = NULL;
3708 }
3709 if (buffer->async_transaction && buffer->target_node) {
57628830
TK
3710 struct binder_node *buf_node;
3711 struct binder_work *w;
3712
3713 buf_node = buffer->target_node;
14c312e9 3714 binder_node_inner_lock(buf_node);
57628830
TK
3715 BUG_ON(!buf_node->has_async_transaction);
3716 BUG_ON(buf_node->proc != proc);
57628830
TK
3717 w = binder_dequeue_work_head_ilocked(
3718 &buf_node->async_todo);
6f227409 3719 if (!w) {
5a068558 3720 buf_node->has_async_transaction = false;
6f227409 3721 } else {
57628830 3722 binder_enqueue_work_ilocked(
6f227409
MC
3723 w, &proc->todo);
3724 binder_wakeup_proc_ilocked(proc);
3725 }
14c312e9 3726 binder_node_inner_unlock(buf_node);
355b0502 3727 }
975a1ac9 3728 trace_binder_transaction_buffer_release(buffer);
355b0502 3729 binder_transaction_buffer_release(proc, buffer, NULL);
467545d8 3730 binder_alloc_free_buf(&proc->alloc, buffer);
355b0502
GKH
3731 break;
3732 }
3733
dd9bc4f9
MC
3734 case BC_TRANSACTION_SG:
3735 case BC_REPLY_SG: {
3736 struct binder_transaction_data_sg tr;
3737
3738 if (copy_from_user(&tr, ptr, sizeof(tr)))
3739 return -EFAULT;
3740 ptr += sizeof(tr);
3741 binder_transaction(proc, thread, &tr.transaction_data,
3742 cmd == BC_REPLY_SG, tr.buffers_size);
3743 break;
3744 }
355b0502
GKH
3745 case BC_TRANSACTION:
3746 case BC_REPLY: {
3747 struct binder_transaction_data tr;
3748
3749 if (copy_from_user(&tr, ptr, sizeof(tr)))
3750 return -EFAULT;
3751 ptr += sizeof(tr);
843a2578
MC
3752 binder_transaction(proc, thread, &tr,
3753 cmd == BC_REPLY, 0);
355b0502
GKH
3754 break;
3755 }
3756
3757 case BC_REGISTER_LOOPER:
3758 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3759 "%d:%d BC_REGISTER_LOOPER\n",
355b0502 3760 proc->pid, thread->pid);
814ce251 3761 binder_inner_proc_lock(proc);
355b0502
GKH
3762 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3763 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3764 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
355b0502
GKH
3765 proc->pid, thread->pid);
3766 } else if (proc->requested_threads == 0) {
3767 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3768 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
355b0502
GKH
3769 proc->pid, thread->pid);
3770 } else {
3771 proc->requested_threads--;
3772 proc->requested_threads_started++;
3773 }
3774 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
814ce251 3775 binder_inner_proc_unlock(proc);
355b0502
GKH
3776 break;
3777 case BC_ENTER_LOOPER:
3778 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3779 "%d:%d BC_ENTER_LOOPER\n",
355b0502
GKH
3780 proc->pid, thread->pid);
3781 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3782 thread->looper |= BINDER_LOOPER_STATE_INVALID;
56b468fc 3783 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
355b0502
GKH
3784 proc->pid, thread->pid);
3785 }
3786 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3787 break;
3788 case BC_EXIT_LOOPER:
3789 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 3790 "%d:%d BC_EXIT_LOOPER\n",
355b0502
GKH
3791 proc->pid, thread->pid);
3792 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3793 break;
3794
3795 case BC_REQUEST_DEATH_NOTIFICATION:
3796 case BC_CLEAR_DEATH_NOTIFICATION: {
3797 uint32_t target;
da49889d 3798 binder_uintptr_t cookie;
355b0502 3799 struct binder_ref *ref;
6fcb2b9a 3800 struct binder_ref_death *death = NULL;
355b0502
GKH
3801
3802 if (get_user(target, (uint32_t __user *)ptr))
3803 return -EFAULT;
3804 ptr += sizeof(uint32_t);
da49889d 3805 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502 3806 return -EFAULT;
da49889d 3807 ptr += sizeof(binder_uintptr_t);
6fcb2b9a
TK
3808 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3809 /*
3810 * Allocate memory for death notification
3811 * before taking lock
3812 */
3813 death = kzalloc(sizeof(*death), GFP_KERNEL);
3814 if (death == NULL) {
3815 WARN_ON(thread->return_error.cmd !=
3816 BR_OK);
3817 thread->return_error.cmd = BR_ERROR;
95317055
MC
3818 binder_enqueue_thread_work(
3819 thread,
3820 &thread->return_error.work);
6fcb2b9a
TK
3821 binder_debug(
3822 BINDER_DEBUG_FAILED_TRANSACTION,
3823 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3824 proc->pid, thread->pid);
3825 break;
3826 }
3827 }
3828 binder_proc_lock(proc);
3829 ref = binder_get_ref_olocked(proc, target, false);
355b0502 3830 if (ref == NULL) {
56b468fc 3831 binder_user_error("%d:%d %s invalid ref %d\n",
355b0502
GKH
3832 proc->pid, thread->pid,
3833 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3834 "BC_REQUEST_DEATH_NOTIFICATION" :
3835 "BC_CLEAR_DEATH_NOTIFICATION",
3836 target);
6fcb2b9a
TK
3837 binder_proc_unlock(proc);
3838 kfree(death);
355b0502
GKH
3839 break;
3840 }
3841
3842 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 3843 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
355b0502
GKH
3844 proc->pid, thread->pid,
3845 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3846 "BC_REQUEST_DEATH_NOTIFICATION" :
3847 "BC_CLEAR_DEATH_NOTIFICATION",
f7d87412
TK
3848 (u64)cookie, ref->data.debug_id,
3849 ref->data.desc, ref->data.strong,
3850 ref->data.weak, ref->node->debug_id);
355b0502 3851
6c8ad5b3 3852 binder_node_lock(ref->node);
355b0502
GKH
3853 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3854 if (ref->death) {
56b468fc 3855 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
355b0502 3856 proc->pid, thread->pid);
6c8ad5b3 3857 binder_node_unlock(ref->node);
6fcb2b9a
TK
3858 binder_proc_unlock(proc);
3859 kfree(death);
355b0502
GKH
3860 break;
3861 }
3862 binder_stats_created(BINDER_STAT_DEATH);
3863 INIT_LIST_HEAD(&death->work.entry);
3864 death->cookie = cookie;
3865 ref->death = death;
3866 if (ref->node->proc == NULL) {
3867 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
c825eca5
MC
3868
3869 binder_inner_proc_lock(proc);
3870 binder_enqueue_work_ilocked(
3871 &ref->death->work, &proc->todo);
3872 binder_wakeup_proc_ilocked(proc);
3873 binder_inner_proc_unlock(proc);
355b0502
GKH
3874 }
3875 } else {
3876 if (ref->death == NULL) {
56b468fc 3877 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
355b0502 3878 proc->pid, thread->pid);
14c312e9 3879 binder_node_unlock(ref->node);
6fcb2b9a 3880 binder_proc_unlock(proc);
355b0502
GKH
3881 break;
3882 }
3883 death = ref->death;
3884 if (death->cookie != cookie) {
da49889d 3885 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
355b0502 3886 proc->pid, thread->pid,
da49889d
AH
3887 (u64)death->cookie,
3888 (u64)cookie);
14c312e9 3889 binder_node_unlock(ref->node);
6fcb2b9a 3890 binder_proc_unlock(proc);
355b0502
GKH
3891 break;
3892 }
3893 ref->death = NULL;
57628830 3894 binder_inner_proc_lock(proc);
355b0502
GKH
3895 if (list_empty(&death->work.entry)) {
3896 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
57628830
TK
3897 if (thread->looper &
3898 (BINDER_LOOPER_STATE_REGISTERED |
3899 BINDER_LOOPER_STATE_ENTERED))
95317055
MC
3900 binder_enqueue_thread_work_ilocked(
3901 thread,
3902 &death->work);
57628830
TK
3903 else {
3904 binder_enqueue_work_ilocked(
3905 &death->work,
3906 &proc->todo);
c9cd6356 3907 binder_wakeup_proc_ilocked(
5347bf52 3908 proc);
355b0502
GKH
3909 }
3910 } else {
3911 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3912 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3913 }
57628830 3914 binder_inner_proc_unlock(proc);
355b0502 3915 }
6c8ad5b3 3916 binder_node_unlock(ref->node);
6fcb2b9a 3917 binder_proc_unlock(proc);
355b0502
GKH
3918 } break;
3919 case BC_DEAD_BINDER_DONE: {
3920 struct binder_work *w;
da49889d 3921 binder_uintptr_t cookie;
355b0502 3922 struct binder_ref_death *death = NULL;
10f62861 3923
da49889d 3924 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
355b0502
GKH
3925 return -EFAULT;
3926
3e908446 3927 ptr += sizeof(cookie);
57628830
TK
3928 binder_inner_proc_lock(proc);
3929 list_for_each_entry(w, &proc->delivered_death,
3930 entry) {
3931 struct binder_ref_death *tmp_death =
3932 container_of(w,
3933 struct binder_ref_death,
3934 work);
10f62861 3935
355b0502
GKH
3936 if (tmp_death->cookie == cookie) {
3937 death = tmp_death;
3938 break;
3939 }
3940 }
3941 binder_debug(BINDER_DEBUG_DEAD_BINDER,
da49889d
AH
3942 "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
3943 proc->pid, thread->pid, (u64)cookie,
3944 death);
355b0502 3945 if (death == NULL) {
da49889d
AH
3946 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3947 proc->pid, thread->pid, (u64)cookie);
57628830 3948 binder_inner_proc_unlock(proc);
355b0502
GKH
3949 break;
3950 }
57628830 3951 binder_dequeue_work_ilocked(&death->work);
355b0502
GKH
3952 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3953 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
57628830
TK
3954 if (thread->looper &
3955 (BINDER_LOOPER_STATE_REGISTERED |
3956 BINDER_LOOPER_STATE_ENTERED))
95317055
MC
3957 binder_enqueue_thread_work_ilocked(
3958 thread, &death->work);
57628830
TK
3959 else {
3960 binder_enqueue_work_ilocked(
3961 &death->work,
3962 &proc->todo);
5347bf52 3963 binder_wakeup_proc_ilocked(proc);
355b0502
GKH
3964 }
3965 }
57628830 3966 binder_inner_proc_unlock(proc);
355b0502
GKH
3967 } break;
3968
3969 default:
56b468fc 3970 pr_err("%d:%d unknown command %d\n",
355b0502
GKH
3971 proc->pid, thread->pid, cmd);
3972 return -EINVAL;
3973 }
3974 *consumed = ptr - buffer;
3975 }
3976 return 0;
3977}
3978
fb07ebc3
BP
3979static void binder_stat_br(struct binder_proc *proc,
3980 struct binder_thread *thread, uint32_t cmd)
355b0502 3981{
975a1ac9 3982 trace_binder_return(cmd);
ec49bb00 3983 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
f716ecfc
BJS
3984 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3985 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3986 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
355b0502
GKH
3987 }
3988}
3989
9b9340c5
TK
3990static int binder_put_node_cmd(struct binder_proc *proc,
3991 struct binder_thread *thread,
3992 void __user **ptrp,
3993 binder_uintptr_t node_ptr,
3994 binder_uintptr_t node_cookie,
3995 int node_debug_id,
3996 uint32_t cmd, const char *cmd_name)
3997{
3998 void __user *ptr = *ptrp;
3999
4000 if (put_user(cmd, (uint32_t __user *)ptr))
4001 return -EFAULT;
4002 ptr += sizeof(uint32_t);
4003
4004 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4005 return -EFAULT;
4006 ptr += sizeof(binder_uintptr_t);
4007
4008 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4009 return -EFAULT;
4010 ptr += sizeof(binder_uintptr_t);
4011
4012 binder_stat_br(proc, thread, cmd);
4013 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4014 proc->pid, thread->pid, cmd_name, node_debug_id,
4015 (u64)node_ptr, (u64)node_cookie);
4016
4017 *ptrp = ptr;
4018 return 0;
4019}
4020
c9cd6356
MC
4021static int binder_wait_for_work(struct binder_thread *thread,
4022 bool do_proc_work)
4023{
4024 DEFINE_WAIT(wait);
4025 struct binder_proc *proc = thread->proc;
4026 int ret = 0;
4027
4028 freezer_do_not_count();
4029 binder_inner_proc_lock(proc);
4030 for (;;) {
4031 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4032 if (binder_has_work_ilocked(thread, do_proc_work))
4033 break;
4034 if (do_proc_work)
4035 list_add(&thread->waiting_thread_node,
4036 &proc->waiting_threads);
4037 binder_inner_proc_unlock(proc);
4038 schedule();
4039 binder_inner_proc_lock(proc);
4040 list_del_init(&thread->waiting_thread_node);
4041 if (signal_pending(current)) {
4042 ret = -ERESTARTSYS;
4043 break;
4044 }
4045 }
4046 finish_wait(&thread->wait, &wait);
4047 binder_inner_proc_unlock(proc);
4048 freezer_count();
4049
4050 return ret;
4051}
4052
355b0502
GKH
4053static int binder_thread_read(struct binder_proc *proc,
4054 struct binder_thread *thread,
da49889d
AH
4055 binder_uintptr_t binder_buffer, size_t size,
4056 binder_size_t *consumed, int non_block)
355b0502 4057{
da49889d 4058 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
355b0502
GKH
4059 void __user *ptr = buffer + *consumed;
4060 void __user *end = buffer + size;
4061
4062 int ret = 0;
4063 int wait_for_proc_work;
4064
4065 if (*consumed == 0) {
4066 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4067 return -EFAULT;
4068 ptr += sizeof(uint32_t);
4069 }
4070
4071retry:
89b657e0 4072 binder_inner_proc_lock(proc);
c9cd6356 4073 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
89b657e0 4074 binder_inner_proc_unlock(proc);
355b0502 4075
355b0502 4076 thread->looper |= BINDER_LOOPER_STATE_WAITING;
975a1ac9 4077
975a1ac9
AH
4078 trace_binder_wait_for_work(wait_for_proc_work,
4079 !!thread->transaction_stack,
57628830 4080 !binder_worklist_empty(proc, &thread->todo));
355b0502
GKH
4081 if (wait_for_proc_work) {
4082 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4083 BINDER_LOOPER_STATE_ENTERED))) {
56b468fc 4084 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
355b0502
GKH
4085 proc->pid, thread->pid, thread->looper);
4086 wait_event_interruptible(binder_user_error_wait,
4087 binder_stop_on_user_error < 2);
4088 }
76b376ea 4089 binder_restore_priority(current, proc->default_priority);
c9cd6356
MC
4090 }
4091
4092 if (non_block) {
4093 if (!binder_has_work(thread, wait_for_proc_work))
4094 ret = -EAGAIN;
355b0502 4095 } else {
c9cd6356 4096 ret = binder_wait_for_work(thread, wait_for_proc_work);
355b0502 4097 }
975a1ac9 4098
355b0502
GKH
4099 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4100
4101 if (ret)
4102 return ret;
4103
4104 while (1) {
4105 uint32_t cmd;
1cac41cb
MB
4106 struct binder_transaction_data_secctx tr;
4107 struct binder_transaction_data *trd = &tr.transaction_data;
4108
57628830
TK
4109 struct binder_work *w = NULL;
4110 struct list_head *list = NULL;
355b0502 4111 struct binder_transaction *t = NULL;
e482ec39 4112 struct binder_thread *t_from;
1cac41cb 4113 size_t trsize = sizeof(*trd);
355b0502 4114
f73f378b 4115 binder_inner_proc_lock(proc);
57628830
TK
4116 if (!binder_worklist_empty_ilocked(&thread->todo))
4117 list = &thread->todo;
4118 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4119 wait_for_proc_work)
4120 list = &proc->todo;
4121 else {
4122 binder_inner_proc_unlock(proc);
4123
395262a9 4124 /* no data added */
afda44d0 4125 if (ptr - buffer == 4 && !thread->looper_need_return)
355b0502
GKH
4126 goto retry;
4127 break;
4128 }
4129
f73f378b
TK
4130 if (end - ptr < sizeof(tr) + 4) {
4131 binder_inner_proc_unlock(proc);
355b0502 4132 break;
f73f378b 4133 }
57628830 4134 w = binder_dequeue_work_head_ilocked(list);
95317055
MC
4135 if (binder_worklist_empty_ilocked(&thread->todo))
4136 thread->process_todo = false;
355b0502
GKH
4137
4138 switch (w->type) {
4139 case BINDER_WORK_TRANSACTION: {
f73f378b 4140 binder_inner_proc_unlock(proc);
355b0502
GKH
4141 t = container_of(w, struct binder_transaction, work);
4142 } break;
3a822b33
TK
4143 case BINDER_WORK_RETURN_ERROR: {
4144 struct binder_error *e = container_of(
4145 w, struct binder_error, work);
4146
4147 WARN_ON(e->cmd == BR_OK);
f73f378b 4148 binder_inner_proc_unlock(proc);
3a822b33
TK
4149 if (put_user(e->cmd, (uint32_t __user *)ptr))
4150 return -EFAULT;
5a068558 4151 cmd = e->cmd;
3a822b33
TK
4152 e->cmd = BR_OK;
4153 ptr += sizeof(uint32_t);
4154
4155 binder_stat_br(proc, thread, cmd);
3a822b33 4156 } break;
355b0502 4157 case BINDER_WORK_TRANSACTION_COMPLETE: {
f73f378b 4158 binder_inner_proc_unlock(proc);
355b0502
GKH
4159 cmd = BR_TRANSACTION_COMPLETE;
4160 if (put_user(cmd, (uint32_t __user *)ptr))
4161 return -EFAULT;
4162 ptr += sizeof(uint32_t);
4163
4164 binder_stat_br(proc, thread, cmd);
4165 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
56b468fc 4166 "%d:%d BR_TRANSACTION_COMPLETE\n",
355b0502 4167 proc->pid, thread->pid);
355b0502
GKH
4168 kfree(w);
4169 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4170 } break;
4171 case BINDER_WORK_NODE: {
4172 struct binder_node *node = container_of(w, struct binder_node, work);
9b9340c5
TK
4173 int strong, weak;
4174 binder_uintptr_t node_ptr = node->ptr;
4175 binder_uintptr_t node_cookie = node->cookie;
4176 int node_debug_id = node->debug_id;
4177 int has_weak_ref;
4178 int has_strong_ref;
4179 void __user *orig_ptr = ptr;
4180
4181 BUG_ON(proc != node->proc);
4182 strong = node->internal_strong_refs ||
4183 node->local_strong_refs;
4184 weak = !hlist_empty(&node->refs) ||
96dd75d9
TK
4185 node->local_weak_refs ||
4186 node->tmp_refs || strong;
9b9340c5
TK
4187 has_strong_ref = node->has_strong_ref;
4188 has_weak_ref = node->has_weak_ref;
4189
4190 if (weak && !has_weak_ref) {
355b0502
GKH
4191 node->has_weak_ref = 1;
4192 node->pending_weak_ref = 1;
4193 node->local_weak_refs++;
9b9340c5
TK
4194 }
4195 if (strong && !has_strong_ref) {
355b0502
GKH
4196 node->has_strong_ref = 1;
4197 node->pending_strong_ref = 1;
4198 node->local_strong_refs++;
9b9340c5
TK
4199 }
4200 if (!strong && has_strong_ref)
355b0502 4201 node->has_strong_ref = 0;
9b9340c5 4202 if (!weak && has_weak_ref)
355b0502 4203 node->has_weak_ref = 0;
9b9340c5
TK
4204 if (!weak && !strong) {
4205 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4206 "%d:%d node %d u%016llx c%016llx deleted\n",
4207 proc->pid, thread->pid,
4208 node_debug_id,
4209 (u64)node_ptr,
4210 (u64)node_cookie);
4211 rb_erase(&node->rb_node, &proc->nodes);
f73f378b 4212 binder_inner_proc_unlock(proc);
14c312e9
TK
4213 binder_node_lock(node);
4214 /*
4215 * Acquire the node lock before freeing the
4216 * node to serialize with other threads that
4217 * may have been holding the node lock while
4218 * decrementing this node (avoids race where
4219 * this thread frees while the other thread
4220 * is unlocking the node after the final
4221 * decrement)
4222 */
4223 binder_node_unlock(node);
f73f378b
TK
4224 binder_free_node(node);
4225 } else
4226 binder_inner_proc_unlock(proc);
4227
9b9340c5
TK
4228 if (weak && !has_weak_ref)
4229 ret = binder_put_node_cmd(
4230 proc, thread, &ptr, node_ptr,
4231 node_cookie, node_debug_id,
4232 BR_INCREFS, "BR_INCREFS");
4233 if (!ret && strong && !has_strong_ref)
4234 ret = binder_put_node_cmd(
4235 proc, thread, &ptr, node_ptr,
4236 node_cookie, node_debug_id,
4237 BR_ACQUIRE, "BR_ACQUIRE");
4238 if (!ret && !strong && has_strong_ref)
4239 ret = binder_put_node_cmd(
4240 proc, thread, &ptr, node_ptr,
4241 node_cookie, node_debug_id,
4242 BR_RELEASE, "BR_RELEASE");
4243 if (!ret && !weak && has_weak_ref)
4244 ret = binder_put_node_cmd(
4245 proc, thread, &ptr, node_ptr,
4246 node_cookie, node_debug_id,
4247 BR_DECREFS, "BR_DECREFS");
4248 if (orig_ptr == ptr)
4249 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4250 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4251 proc->pid, thread->pid,
4252 node_debug_id,
4253 (u64)node_ptr,
4254 (u64)node_cookie);
4255 if (ret)
4256 return ret;
355b0502
GKH
4257 } break;
4258 case BINDER_WORK_DEAD_BINDER:
4259 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4260 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4261 struct binder_ref_death *death;
4262 uint32_t cmd;
6c8ad5b3 4263 binder_uintptr_t cookie;
355b0502
GKH
4264
4265 death = container_of(w, struct binder_ref_death, work);
4266 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4267 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4268 else
4269 cmd = BR_DEAD_BINDER;
6c8ad5b3
MC
4270 cookie = death->cookie;
4271
355b0502 4272 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
da49889d 4273 "%d:%d %s %016llx\n",
355b0502
GKH
4274 proc->pid, thread->pid,
4275 cmd == BR_DEAD_BINDER ?
4276 "BR_DEAD_BINDER" :
4277 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6c8ad5b3 4278 (u64)cookie);
355b0502 4279 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
6c8ad5b3 4280 binder_inner_proc_unlock(proc);
355b0502
GKH
4281 kfree(death);
4282 binder_stats_deleted(BINDER_STAT_DEATH);
f73f378b 4283 } else {
57628830
TK
4284 binder_enqueue_work_ilocked(
4285 w, &proc->delivered_death);
f73f378b
TK
4286 binder_inner_proc_unlock(proc);
4287 }
6c8ad5b3
MC
4288 if (put_user(cmd, (uint32_t __user *)ptr))
4289 return -EFAULT;
4290 ptr += sizeof(uint32_t);
4291 if (put_user(cookie,
4292 (binder_uintptr_t __user *)ptr))
4293 return -EFAULT;
4294 ptr += sizeof(binder_uintptr_t);
4295 binder_stat_br(proc, thread, cmd);
355b0502
GKH
4296 if (cmd == BR_DEAD_BINDER)
4297 goto done; /* DEAD_BINDER notifications can cause transactions */
4298 } break;
4299 }
4300
4301 if (!t)
4302 continue;
4303
4304 BUG_ON(t->buffer == NULL);
4305 if (t->buffer->target_node) {
4306 struct binder_node *target_node = t->buffer->target_node;
7230f991 4307 struct binder_priority node_prio;
10f62861 4308
1cac41cb
MB
4309 trd->target.ptr = target_node->ptr;
4310 trd->cookie = target_node->cookie;
4311
7230f991
MC
4312 node_prio.sched_policy = target_node->sched_policy;
4313 node_prio.prio = target_node->min_priority;
39140a0f
MC
4314 binder_transaction_priority(current, t, node_prio,
4315 target_node->inherit_rt);
355b0502
GKH
4316 cmd = BR_TRANSACTION;
4317 } else {
1cac41cb
MB
4318 trd->target.ptr = 0;
4319 trd->cookie = 0;
355b0502
GKH
4320 cmd = BR_REPLY;
4321 }
1cac41cb
MB
4322 trd->code = t->code;
4323 trd->flags = t->flags;
4324 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
355b0502 4325
e482ec39
TK
4326 t_from = binder_get_txn_from(t);
4327 if (t_from) {
4328 struct task_struct *sender = t_from->proc->tsk;
10f62861 4329
1cac41cb 4330 trd->sender_pid = task_tgid_nr_ns(sender,
17cf22c3 4331 task_active_pid_ns(current));
355b0502 4332 } else {
1cac41cb 4333 trd->sender_pid = 0;
355b0502
GKH
4334 }
4335
1cac41cb
MB
4336 trd->data_size = t->buffer->data_size;
4337 trd->offsets_size = t->buffer->offsets_size;
4338 trd->data.ptr.buffer = (binder_uintptr_t)
467545d8
TK
4339 ((uintptr_t)t->buffer->data +
4340 binder_alloc_get_user_buffer_offset(&proc->alloc));
1cac41cb 4341 trd->data.ptr.offsets = trd->data.ptr.buffer +
355b0502
GKH
4342 ALIGN(t->buffer->data_size,
4343 sizeof(void *));
1cac41cb
MB
4344 tr.secctx = t->security_ctx;
4345 if (t->security_ctx) {
4346 cmd = BR_TRANSACTION_SEC_CTX;
4347 trsize = sizeof(tr);
4348 }
4349
355b0502 4350
e482ec39
TK
4351 if (put_user(cmd, (uint32_t __user *)ptr)) {
4352 if (t_from)
4353 binder_thread_dec_tmpref(t_from);
3cc62103
MC
4354
4355 binder_cleanup_transaction(t, "put_user failed",
4356 BR_FAILED_REPLY);
4357
355b0502 4358 return -EFAULT;
e482ec39 4359 }
355b0502 4360 ptr += sizeof(uint32_t);
1cac41cb 4361 if (copy_to_user(ptr, &tr, trsize)) {
e482ec39
TK
4362 if (t_from)
4363 binder_thread_dec_tmpref(t_from);
3cc62103
MC
4364
4365 binder_cleanup_transaction(t, "copy_to_user failed",
4366 BR_FAILED_REPLY);
4367
355b0502 4368 return -EFAULT;
e482ec39 4369 }
1cac41cb
MB
4370 ptr += trsize;
4371
975a1ac9 4372 trace_binder_transaction_received(t);
355b0502
GKH
4373 binder_stat_br(proc, thread, cmd);
4374 binder_debug(BINDER_DEBUG_TRANSACTION,
da49889d 4375 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
355b0502
GKH
4376 proc->pid, thread->pid,
4377 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
1cac41cb
MB
4378 (cmd == BR_TRANSACTION_SEC_CTX) ?
4379 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
e482ec39
TK
4380 t->debug_id, t_from ? t_from->proc->pid : 0,
4381 t_from ? t_from->pid : 0, cmd,
355b0502 4382 t->buffer->data_size, t->buffer->offsets_size,
1cac41cb
MB
4383 (u64)trd->data.ptr.buffer,
4384 (u64)trd->data.ptr.offsets);
355b0502 4385
e482ec39
TK
4386 if (t_from)
4387 binder_thread_dec_tmpref(t_from);
355b0502 4388 t->buffer->allow_user_free = 1;
1cac41cb 4389 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
89b657e0 4390 binder_inner_proc_lock(thread->proc);
355b0502
GKH
4391 t->to_parent = thread->transaction_stack;
4392 t->to_thread = thread;
4393 thread->transaction_stack = t;
89b657e0 4394 binder_inner_proc_unlock(thread->proc);
355b0502 4395 } else {
16273538 4396 binder_free_transaction(t);
355b0502
GKH
4397 }
4398 break;
4399 }
4400
4401done:
4402
4403 *consumed = ptr - buffer;
814ce251 4404 binder_inner_proc_lock(proc);
c9cd6356
MC
4405 if (proc->requested_threads == 0 &&
4406 list_empty(&thread->proc->waiting_threads) &&
355b0502
GKH
4407 proc->requested_threads_started < proc->max_threads &&
4408 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4409 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4410 /*spawn a new thread if we leave this out */) {
4411 proc->requested_threads++;
814ce251 4412 binder_inner_proc_unlock(proc);
355b0502 4413 binder_debug(BINDER_DEBUG_THREADS,
56b468fc 4414 "%d:%d BR_SPAWN_LOOPER\n",
355b0502
GKH
4415 proc->pid, thread->pid);
4416 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4417 return -EFAULT;
89334ab4 4418 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
814ce251
TK
4419 } else
4420 binder_inner_proc_unlock(proc);
355b0502
GKH
4421 return 0;
4422}
4423
57628830
TK
4424static void binder_release_work(struct binder_proc *proc,
4425 struct list_head *list)
355b0502
GKH
4426{
4427 struct binder_work *w;
10f62861 4428
57628830
TK
4429 while (1) {
4430 w = binder_dequeue_work_head(proc, list);
4431 if (!w)
4432 return;
4433
355b0502
GKH
4434 switch (w->type) {
4435 case BINDER_WORK_TRANSACTION: {
4436 struct binder_transaction *t;
4437
4438 t = container_of(w, struct binder_transaction, work);
3cc62103
MC
4439
4440 binder_cleanup_transaction(t, "process died.",
4441 BR_DEAD_REPLY);
355b0502 4442 } break;
3a822b33
TK
4443 case BINDER_WORK_RETURN_ERROR: {
4444 struct binder_error *e = container_of(
4445 w, struct binder_error, work);
4446
4447 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4448 "undelivered TRANSACTION_ERROR: %u\n",
4449 e->cmd);
4450 } break;
355b0502 4451 case BINDER_WORK_TRANSACTION_COMPLETE: {
675d66b0 4452 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc 4453 "undelivered TRANSACTION_COMPLETE\n");
355b0502
GKH
4454 kfree(w);
4455 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4456 } break;
675d66b0
AH
4457 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4458 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4459 struct binder_ref_death *death;
4460
4461 death = container_of(w, struct binder_ref_death, work);
4462 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
da49889d
AH
4463 "undelivered death notification, %016llx\n",
4464 (u64)death->cookie);
675d66b0
AH
4465 kfree(death);
4466 binder_stats_deleted(BINDER_STAT_DEATH);
4467 } break;
355b0502 4468 default:
56b468fc 4469 pr_err("unexpected work type, %d, not freed\n",
675d66b0 4470 w->type);
355b0502
GKH
4471 break;
4472 }
4473 }
4474
4475}
4476
e4951233
TK
4477static struct binder_thread *binder_get_thread_ilocked(
4478 struct binder_proc *proc, struct binder_thread *new_thread)
355b0502
GKH
4479{
4480 struct binder_thread *thread = NULL;
4481 struct rb_node *parent = NULL;
4482 struct rb_node **p = &proc->threads.rb_node;
4483
4484 while (*p) {
4485 parent = *p;
4486 thread = rb_entry(parent, struct binder_thread, rb_node);
4487
4488 if (current->pid < thread->pid)
4489 p = &(*p)->rb_left;
4490 else if (current->pid > thread->pid)
4491 p = &(*p)->rb_right;
4492 else
e4951233 4493 return thread;
355b0502 4494 }
e4951233
TK
4495 if (!new_thread)
4496 return NULL;
4497 thread = new_thread;
4498 binder_stats_created(BINDER_STAT_THREAD);
4499 thread->proc = proc;
4500 thread->pid = current->pid;
7230f991
MC
4501 get_task_struct(current);
4502 thread->task = current;
e4951233
TK
4503 atomic_set(&thread->tmp_ref, 0);
4504 init_waitqueue_head(&thread->wait);
4505 INIT_LIST_HEAD(&thread->todo);
4506 rb_link_node(&thread->rb_node, parent, p);
4507 rb_insert_color(&thread->rb_node, &proc->threads);
4508 thread->looper_need_return = true;
4509 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4510 thread->return_error.cmd = BR_OK;
4511 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4512 thread->reply_error.cmd = BR_OK;
c9cd6356 4513 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
e4951233
TK
4514 return thread;
4515}
4516
4517static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4518{
4519 struct binder_thread *thread;
4520 struct binder_thread *new_thread;
4521
4522 binder_inner_proc_lock(proc);
4523 thread = binder_get_thread_ilocked(proc, NULL);
4524 binder_inner_proc_unlock(proc);
4525 if (!thread) {
4526 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4527 if (new_thread == NULL)
355b0502 4528 return NULL;
e4951233
TK
4529 binder_inner_proc_lock(proc);
4530 thread = binder_get_thread_ilocked(proc, new_thread);
4531 binder_inner_proc_unlock(proc);
4532 if (thread != new_thread)
4533 kfree(new_thread);
355b0502
GKH
4534 }
4535 return thread;
4536}
4537
e482ec39
TK
4538static void binder_free_proc(struct binder_proc *proc)
4539{
4540 BUG_ON(!list_empty(&proc->todo));
4541 BUG_ON(!list_empty(&proc->delivered_death));
4542 binder_alloc_deferred_release(&proc->alloc);
4543 put_task_struct(proc->tsk);
4544 binder_stats_deleted(BINDER_STAT_PROC);
4545 kfree(proc);
4546}
4547
4548static void binder_free_thread(struct binder_thread *thread)
4549{
4550 BUG_ON(!list_empty(&thread->todo));
4551 binder_stats_deleted(BINDER_STAT_THREAD);
4552 binder_proc_dec_tmpref(thread->proc);
7230f991 4553 put_task_struct(thread->task);
e482ec39
TK
4554 kfree(thread);
4555}
4556
4557static int binder_thread_release(struct binder_proc *proc,
4558 struct binder_thread *thread)
355b0502
GKH
4559{
4560 struct binder_transaction *t;
4561 struct binder_transaction *send_reply = NULL;
4562 int active_transactions = 0;
e482ec39 4563 struct binder_transaction *last_t = NULL;
355b0502 4564
e4951233 4565 binder_inner_proc_lock(thread->proc);
e482ec39
TK
4566 /*
4567 * take a ref on the proc so it survives
4568 * after we remove this thread from proc->threads.
4569 * The corresponding dec is when we actually
4570 * free the thread in binder_free_thread()
4571 */
4572 proc->tmp_ref++;
4573 /*
4574 * take a ref on this thread to ensure it
4575 * survives while we are releasing it
4576 */
4577 atomic_inc(&thread->tmp_ref);
355b0502
GKH
4578 rb_erase(&thread->rb_node, &proc->threads);
4579 t = thread->transaction_stack;
e482ec39
TK
4580 if (t) {
4581 spin_lock(&t->lock);
4582 if (t->to_thread == thread)
4583 send_reply = t;
4584 }
4585 thread->is_dead = true;
4586
355b0502 4587 while (t) {
e482ec39 4588 last_t = t;
355b0502
GKH
4589 active_transactions++;
4590 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
56b468fc
AS
4591 "release %d:%d transaction %d %s, still active\n",
4592 proc->pid, thread->pid,
355b0502
GKH
4593 t->debug_id,
4594 (t->to_thread == thread) ? "in" : "out");
4595
4596 if (t->to_thread == thread) {
4597 t->to_proc = NULL;
4598 t->to_thread = NULL;
4599 if (t->buffer) {
4600 t->buffer->transaction = NULL;
4601 t->buffer = NULL;
4602 }
4603 t = t->to_parent;
4604 } else if (t->from == thread) {
4605 t->from = NULL;
4606 t = t->from_parent;
4607 } else
4608 BUG();
e482ec39
TK
4609 spin_unlock(&last_t->lock);
4610 if (t)
4611 spin_lock(&t->lock);
355b0502 4612 }
5a068558
MB
4613
4614 /*
4615 * If this thread used poll, make sure we remove the waitqueue
4616 * from any epoll data structures holding it with POLLFREE.
4617 * waitqueue_active() is safe to use here because we're holding
4618 * the inner lock.
4619 */
4620 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4621 waitqueue_active(&thread->wait)) {
4622 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4623 }
4624
e4951233 4625 binder_inner_proc_unlock(thread->proc);
e482ec39 4626
5a068558
MB
4627 /*
4628 * This is needed to avoid races between wake_up_poll() above and
4629 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4630 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4631 * lock, so we can be sure it's done after calling synchronize_rcu().
4632 */
4633 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4634 synchronize_rcu();
4635
355b0502
GKH
4636 if (send_reply)
4637 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
57628830 4638 binder_release_work(proc, &thread->todo);
e482ec39 4639 binder_thread_dec_tmpref(thread);
355b0502
GKH
4640 return active_transactions;
4641}
4642
4643static unsigned int binder_poll(struct file *filp,
4644 struct poll_table_struct *wait)
4645{
4646 struct binder_proc *proc = filp->private_data;
4647 struct binder_thread *thread = NULL;
c9cd6356 4648 bool wait_for_proc_work;
355b0502 4649
355b0502 4650 thread = binder_get_thread(proc);
5a068558
MB
4651 if (!thread)
4652 return POLLERR;
355b0502 4653
89b657e0 4654 binder_inner_proc_lock(thread->proc);
c9cd6356
MC
4655 thread->looper |= BINDER_LOOPER_STATE_POLL;
4656 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4657
89b657e0 4658 binder_inner_proc_unlock(thread->proc);
975a1ac9 4659
c9cd6356
MC
4660 poll_wait(filp, &thread->wait, wait);
4661
4d666b50 4662 if (binder_has_work(thread, wait_for_proc_work))
c9cd6356
MC
4663 return POLLIN;
4664
355b0502
GKH
4665 return 0;
4666}
4667
78260ac6
TR
4668static int binder_ioctl_write_read(struct file *filp,
4669 unsigned int cmd, unsigned long arg,
4670 struct binder_thread *thread)
4671{
4672 int ret = 0;
4673 struct binder_proc *proc = filp->private_data;
4674 unsigned int size = _IOC_SIZE(cmd);
4675 void __user *ubuf = (void __user *)arg;
4676 struct binder_write_read bwr;
4677
4678 if (size != sizeof(struct binder_write_read)) {
4679 ret = -EINVAL;
4680 goto out;
4681 }
4682 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4683 ret = -EFAULT;
4684 goto out;
4685 }
4686 binder_debug(BINDER_DEBUG_READ_WRITE,
4687 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4688 proc->pid, thread->pid,
4689 (u64)bwr.write_size, (u64)bwr.write_buffer,
4690 (u64)bwr.read_size, (u64)bwr.read_buffer);
4691
4692 if (bwr.write_size > 0) {
4693 ret = binder_thread_write(proc, thread,
4694 bwr.write_buffer,
4695 bwr.write_size,
4696 &bwr.write_consumed);
4697 trace_binder_write_done(ret);
4698 if (ret < 0) {
4699 bwr.read_consumed = 0;
4700 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4701 ret = -EFAULT;
4702 goto out;
4703 }
4704 }
4705 if (bwr.read_size > 0) {
4706 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4707 bwr.read_size,
4708 &bwr.read_consumed,
4709 filp->f_flags & O_NONBLOCK);
4710 trace_binder_read_done(ret);
c9cd6356
MC
4711 binder_inner_proc_lock(proc);
4712 if (!binder_worklist_empty_ilocked(&proc->todo))
5347bf52 4713 binder_wakeup_proc_ilocked(proc);
c9cd6356 4714 binder_inner_proc_unlock(proc);
78260ac6
TR
4715 if (ret < 0) {
4716 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4717 ret = -EFAULT;
4718 goto out;
4719 }
4720 }
4721 binder_debug(BINDER_DEBUG_READ_WRITE,
4722 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4723 proc->pid, thread->pid,
4724 (u64)bwr.write_consumed, (u64)bwr.write_size,
4725 (u64)bwr.read_consumed, (u64)bwr.read_size);
4726 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4727 ret = -EFAULT;
4728 goto out;
4729 }
4730out:
4731 return ret;
4732}
4733
1cac41cb
MB
4734static int binder_ioctl_set_ctx_mgr(struct file *filp,
4735 struct flat_binder_object *fbo)
78260ac6
TR
4736{
4737 int ret = 0;
4738 struct binder_proc *proc = filp->private_data;
803df563 4739 struct binder_context *context = proc->context;
3490fdcb 4740 struct binder_node *new_node;
78260ac6
TR
4741 kuid_t curr_euid = current_euid();
4742
3490fdcb 4743 mutex_lock(&context->context_mgr_node_lock);
803df563 4744 if (context->binder_context_mgr_node) {
78260ac6
TR
4745 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4746 ret = -EBUSY;
4747 goto out;
4748 }
79af7307
SS
4749 ret = security_binder_set_context_mgr(proc->tsk);
4750 if (ret < 0)
4751 goto out;
803df563
MC
4752 if (uid_valid(context->binder_context_mgr_uid)) {
4753 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
78260ac6
TR
4754 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4755 from_kuid(&init_user_ns, curr_euid),
4756 from_kuid(&init_user_ns,
803df563 4757 context->binder_context_mgr_uid));
78260ac6
TR
4758 ret = -EPERM;
4759 goto out;
4760 }
4761 } else {
803df563 4762 context->binder_context_mgr_uid = curr_euid;
78260ac6 4763 }
1cac41cb 4764 new_node = binder_new_node(proc, fbo);
3490fdcb 4765 if (!new_node) {
78260ac6
TR
4766 ret = -ENOMEM;
4767 goto out;
4768 }
14c312e9 4769 binder_node_lock(new_node);
3490fdcb
TK
4770 new_node->local_weak_refs++;
4771 new_node->local_strong_refs++;
4772 new_node->has_strong_ref = 1;
4773 new_node->has_weak_ref = 1;
4774 context->binder_context_mgr_node = new_node;
14c312e9 4775 binder_node_unlock(new_node);
96dd75d9 4776 binder_put_node(new_node);
78260ac6 4777out:
3490fdcb 4778 mutex_unlock(&context->context_mgr_node_lock);
78260ac6
TR
4779 return ret;
4780}
4781
89ce9d97 4782static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
1cac41cb
MB
4783 struct binder_node_debug_info *info)
4784{
89ce9d97
CC
4785 struct rb_node *n;
4786 binder_uintptr_t ptr = info->ptr;
4787
4788 memset(info, 0, sizeof(*info));
4789
4790 binder_inner_proc_lock(proc);
4791 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4792 struct binder_node *node = rb_entry(n, struct binder_node,
4793 rb_node);
4794 if (node->ptr > ptr) {
4795 info->ptr = node->ptr;
4796 info->cookie = node->cookie;
4797 info->has_strong_ref = node->has_strong_ref;
4798 info->has_weak_ref = node->has_weak_ref;
4799 break;
4800 }
4801 }
4802 binder_inner_proc_unlock(proc);
4803
4804 return 0;
4805}
4806
355b0502
GKH
4807static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4808{
4809 int ret;
4810 struct binder_proc *proc = filp->private_data;
4811 struct binder_thread *thread;
4812 unsigned int size = _IOC_SIZE(cmd);
4813 void __user *ubuf = (void __user *)arg;
4814
78260ac6
TR
4815 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4816 proc->pid, current->pid, cmd, arg);*/
355b0502 4817
3de14ff3
SY
4818 binder_selftest_alloc(&proc->alloc);
4819
975a1ac9
AH
4820 trace_binder_ioctl(cmd, arg);
4821
355b0502
GKH
4822 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4823 if (ret)
975a1ac9 4824 goto err_unlocked;
355b0502 4825
355b0502
GKH
4826 thread = binder_get_thread(proc);
4827 if (thread == NULL) {
4828 ret = -ENOMEM;
4829 goto err;
4830 }
4831
4832 switch (cmd) {
78260ac6
TR
4833 case BINDER_WRITE_READ:
4834 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4835 if (ret)
355b0502 4836 goto err;
355b0502 4837 break;
814ce251
TK
4838 case BINDER_SET_MAX_THREADS: {
4839 int max_threads;
4840
4841 if (copy_from_user(&max_threads, ubuf,
4842 sizeof(max_threads))) {
355b0502
GKH
4843 ret = -EINVAL;
4844 goto err;
4845 }
814ce251
TK
4846 binder_inner_proc_lock(proc);
4847 proc->max_threads = max_threads;
4848 binder_inner_proc_unlock(proc);
355b0502 4849 break;
814ce251 4850 }
1cac41cb
MB
4851 case BINDER_SET_CONTEXT_MGR_EXT: {
4852 struct flat_binder_object fbo;
4853
4854 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4855 ret = -EINVAL;
4856 goto err;
4857 }
4858 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4859 if (ret)
4860 goto err;
4861 break;
4862 }
4863
355b0502 4864 case BINDER_SET_CONTEXT_MGR:
1cac41cb 4865 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
78260ac6 4866 if (ret)
355b0502 4867 goto err;
355b0502
GKH
4868 break;
4869 case BINDER_THREAD_EXIT:
56b468fc 4870 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
355b0502 4871 proc->pid, thread->pid);
e482ec39 4872 binder_thread_release(proc, thread);
355b0502
GKH
4873 thread = NULL;
4874 break;
36c89c0a
MM
4875 case BINDER_VERSION: {
4876 struct binder_version __user *ver = ubuf;
4877
355b0502
GKH
4878 if (size != sizeof(struct binder_version)) {
4879 ret = -EINVAL;
4880 goto err;
4881 }
36c89c0a
MM
4882 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4883 &ver->protocol_version)) {
355b0502
GKH
4884 ret = -EINVAL;
4885 goto err;
4886 }
4887 break;
36c89c0a 4888 }
89ce9d97
CC
4889 case BINDER_GET_NODE_DEBUG_INFO: {
4890 struct binder_node_debug_info info;
4891
4892 if (copy_from_user(&info, ubuf, sizeof(info))) {
4893 ret = -EFAULT;
4894 goto err;
4895 }
4896
4897 ret = binder_ioctl_get_node_debug_info(proc, &info);
4898 if (ret < 0)
4899 goto err;
4900
4901 if (copy_to_user(ubuf, &info, sizeof(info))) {
4902 ret = -EFAULT;
4903 goto err;
4904 }
4905 break;
4906 }
355b0502
GKH
4907 default:
4908 ret = -EINVAL;
4909 goto err;
4910 }
4911 ret = 0;
4912err:
4913 if (thread)
afda44d0 4914 thread->looper_need_return = false;
355b0502
GKH
4915 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4916 if (ret && ret != -ERESTARTSYS)
56b468fc 4917 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
975a1ac9
AH
4918err_unlocked:
4919 trace_binder_ioctl_done(ret);
355b0502
GKH
4920 return ret;
4921}
4922
4923static void binder_vma_open(struct vm_area_struct *vma)
4924{
4925 struct binder_proc *proc = vma->vm_private_data;
10f62861 4926
355b0502 4927 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4928 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4929 proc->pid, vma->vm_start, vma->vm_end,
4930 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4931 (unsigned long)pgprot_val(vma->vm_page_prot));
355b0502
GKH
4932}
4933
4934static void binder_vma_close(struct vm_area_struct *vma)
4935{
4936 struct binder_proc *proc = vma->vm_private_data;
10f62861 4937
355b0502 4938 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
56b468fc 4939 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
355b0502
GKH
4940 proc->pid, vma->vm_start, vma->vm_end,
4941 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4942 (unsigned long)pgprot_val(vma->vm_page_prot));
467545d8 4943 binder_alloc_vma_close(&proc->alloc);
5a068558 4944 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
355b0502
GKH
4945}
4946
ddac7d5f
VM
4947static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4948{
4949 return VM_FAULT_SIGBUS;
4950}
4951
7cbea8dc 4952static const struct vm_operations_struct binder_vm_ops = {
355b0502
GKH
4953 .open = binder_vma_open,
4954 .close = binder_vma_close,
ddac7d5f 4955 .fault = binder_vm_fault,
355b0502
GKH
4956};
4957
467545d8
TK
4958static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4959{
4960 int ret;
4961 struct binder_proc *proc = filp->private_data;
4962 const char *failure_string;
4963
4964 if (proc->tsk != current->group_leader)
4965 return -EINVAL;
4966
4967 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4968 vma->vm_end = vma->vm_start + SZ_4M;
4969
4970 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4971 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4972 __func__, proc->pid, vma->vm_start, vma->vm_end,
4973 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4974 (unsigned long)pgprot_val(vma->vm_page_prot));
4975
4976 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4977 ret = -EPERM;
4978 failure_string = "bad vm_flags";
4979 goto err_bad_arg;
4980 }
5a068558
MB
4981 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
4982 vma->vm_flags &= ~VM_MAYWRITE;
4983
467545d8
TK
4984 vma->vm_ops = &binder_vm_ops;
4985 vma->vm_private_data = proc;
4986
4987 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
5a068558
MB
4988 if (ret)
4989 return ret;
4990 mutex_lock(&proc->files_lock);
4991 proc->files = get_files_struct(current);
4992 mutex_unlock(&proc->files_lock);
4993 return 0;
467545d8 4994
355b0502 4995err_bad_arg:
5a068558 4996 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
355b0502
GKH
4997 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4998 return ret;
4999}
5000
5001static int binder_open(struct inode *nodp, struct file *filp)
5002{
5003 struct binder_proc *proc;
04e3812e 5004 struct binder_device *binder_dev;
355b0502 5005
5a068558 5006 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
355b0502
GKH
5007 current->group_leader->pid, current->pid);
5008
5009 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5010 if (proc == NULL)
5011 return -ENOMEM;
b0f59d6d
TK
5012 spin_lock_init(&proc->inner_lock);
5013 spin_lock_init(&proc->outer_lock);
35979513
MC
5014 get_task_struct(current->group_leader);
5015 proc->tsk = current->group_leader;
5a068558 5016 mutex_init(&proc->files_lock);
355b0502 5017 INIT_LIST_HEAD(&proc->todo);
d30e6a87
MC
5018 if (binder_supported_policy(current->policy)) {
5019 proc->default_priority.sched_policy = current->policy;
5020 proc->default_priority.prio = current->normal_prio;
5021 } else {
5022 proc->default_priority.sched_policy = SCHED_NORMAL;
5023 proc->default_priority.prio = NICE_TO_PRIO(0);
5024 }
5025
04e3812e
MC
5026 binder_dev = container_of(filp->private_data, struct binder_device,
5027 miscdev);
5028 proc->context = &binder_dev->context;
467545d8 5029 binder_alloc_init(&proc->alloc);
975a1ac9 5030
355b0502 5031 binder_stats_created(BINDER_STAT_PROC);
355b0502
GKH
5032 proc->pid = current->group_leader->pid;
5033 INIT_LIST_HEAD(&proc->delivered_death);
c9cd6356 5034 INIT_LIST_HEAD(&proc->waiting_threads);
355b0502 5035 filp->private_data = proc;
975a1ac9 5036
3490fdcb
TK
5037 mutex_lock(&binder_procs_lock);
5038 hlist_add_head(&proc->proc_node, &binder_procs);
5039 mutex_unlock(&binder_procs_lock);
5040
16b66554 5041 if (binder_debugfs_dir_entry_proc) {
355b0502 5042 char strbuf[11];
10f62861 5043
355b0502 5044 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
8b980bee
MC
5045 /*
5046 * proc debug entries are shared between contexts, so
5047 * this will fail if the process tries to open the driver
5048 * again with a different context. The priting code will
5049 * anyway print all contexts that a given PID has, so this
5050 * is not a problem.
5051 */
5a068558 5052 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
8b980bee
MC
5053 binder_debugfs_dir_entry_proc,
5054 (void *)(unsigned long)proc->pid,
5055 &binder_proc_fops);
355b0502
GKH
5056 }
5057
5058 return 0;
5059}
5060
5061static int binder_flush(struct file *filp, fl_owner_t id)
5062{
5063 struct binder_proc *proc = filp->private_data;
5064
5065 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5066
5067 return 0;
5068}
5069
5070static void binder_deferred_flush(struct binder_proc *proc)
5071{
5072 struct rb_node *n;
5073 int wake_count = 0;
10f62861 5074
e4951233 5075 binder_inner_proc_lock(proc);
355b0502
GKH
5076 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5077 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
10f62861 5078
afda44d0 5079 thread->looper_need_return = true;
355b0502
GKH
5080 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5081 wake_up_interruptible(&thread->wait);
5082 wake_count++;
5083 }
5084 }
e4951233 5085 binder_inner_proc_unlock(proc);
355b0502
GKH
5086
5087 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5088 "binder_flush: %d woke %d threads\n", proc->pid,
5089 wake_count);
5090}
5091
5092static int binder_release(struct inode *nodp, struct file *filp)
5093{
5094 struct binder_proc *proc = filp->private_data;
10f62861 5095
16b66554 5096 debugfs_remove(proc->debugfs_entry);
355b0502
GKH
5097 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5098
5099 return 0;
5100}
5101
008fa749
ME
5102static int binder_node_release(struct binder_node *node, int refs)
5103{
5104 struct binder_ref *ref;
5105 int death = 0;
f73f378b 5106 struct binder_proc *proc = node->proc;
008fa749 5107
57628830 5108 binder_release_work(proc, &node->async_todo);
f73f378b 5109
14c312e9 5110 binder_node_lock(node);
f73f378b 5111 binder_inner_proc_lock(proc);
57628830 5112 binder_dequeue_work_ilocked(&node->work);
96dd75d9
TK
5113 /*
5114 * The caller must have taken a temporary ref on the node,
5115 */
5116 BUG_ON(!node->tmp_refs);
5117 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
f73f378b 5118 binder_inner_proc_unlock(proc);
14c312e9 5119 binder_node_unlock(node);
f73f378b 5120 binder_free_node(node);
008fa749
ME
5121
5122 return refs;
5123 }
5124
5125 node->proc = NULL;
5126 node->local_strong_refs = 0;
5127 node->local_weak_refs = 0;
f73f378b 5128 binder_inner_proc_unlock(proc);
3490fdcb
TK
5129
5130 spin_lock(&binder_dead_nodes_lock);
ec49bb00 5131 hlist_add_head(&node->dead_node, &binder_dead_nodes);
3490fdcb 5132 spin_unlock(&binder_dead_nodes_lock);
008fa749
ME
5133
5134 hlist_for_each_entry(ref, &node->refs, node_entry) {
5135 refs++;
6c8ad5b3
MC
5136 /*
5137 * Need the node lock to synchronize
5138 * with new notification requests and the
5139 * inner lock to synchronize with queued
5140 * death notifications.
5141 */
5142 binder_inner_proc_lock(ref->proc);
5143 if (!ref->death) {
5144 binder_inner_proc_unlock(ref->proc);
e194fd8a 5145 continue;
6c8ad5b3 5146 }
008fa749
ME
5147
5148 death++;
5149
6c8ad5b3
MC
5150 BUG_ON(!list_empty(&ref->death->work.entry));
5151 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5152 binder_enqueue_work_ilocked(&ref->death->work,
5153 &ref->proc->todo);
5347bf52 5154 binder_wakeup_proc_ilocked(ref->proc);
57628830 5155 binder_inner_proc_unlock(ref->proc);
008fa749
ME
5156 }
5157
008fa749
ME
5158 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5159 "node %d now dead, refs %d, death %d\n",
5160 node->debug_id, refs, death);
14c312e9 5161 binder_node_unlock(node);
96dd75d9 5162 binder_put_node(node);
008fa749
ME
5163
5164 return refs;
5165}
5166
355b0502
GKH
5167static void binder_deferred_release(struct binder_proc *proc)
5168{
803df563 5169 struct binder_context *context = proc->context;
355b0502 5170 struct rb_node *n;
467545d8 5171 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
355b0502 5172
5a068558
MB
5173 BUG_ON(proc->files);
5174
3490fdcb 5175 mutex_lock(&binder_procs_lock);
355b0502 5176 hlist_del(&proc->proc_node);
3490fdcb 5177 mutex_unlock(&binder_procs_lock);
53413e7d 5178
3490fdcb 5179 mutex_lock(&context->context_mgr_node_lock);
803df563
MC
5180 if (context->binder_context_mgr_node &&
5181 context->binder_context_mgr_node->proc == proc) {
355b0502 5182 binder_debug(BINDER_DEBUG_DEAD_BINDER,
c07c933f
ME
5183 "%s: %d context_mgr_node gone\n",
5184 __func__, proc->pid);
803df563 5185 context->binder_context_mgr_node = NULL;
355b0502 5186 }
3490fdcb 5187 mutex_unlock(&context->context_mgr_node_lock);
e4951233 5188 binder_inner_proc_lock(proc);
e482ec39
TK
5189 /*
5190 * Make sure proc stays alive after we
5191 * remove all the threads
5192 */
5193 proc->tmp_ref++;
355b0502 5194
e482ec39 5195 proc->is_dead = true;
355b0502
GKH
5196 threads = 0;
5197 active_transactions = 0;
5198 while ((n = rb_first(&proc->threads))) {
53413e7d
ME
5199 struct binder_thread *thread;
5200
5201 thread = rb_entry(n, struct binder_thread, rb_node);
e4951233 5202 binder_inner_proc_unlock(proc);
355b0502 5203 threads++;
e482ec39 5204 active_transactions += binder_thread_release(proc, thread);
e4951233 5205 binder_inner_proc_lock(proc);
355b0502 5206 }
53413e7d 5207
355b0502
GKH
5208 nodes = 0;
5209 incoming_refs = 0;
5210 while ((n = rb_first(&proc->nodes))) {
53413e7d 5211 struct binder_node *node;
355b0502 5212
53413e7d 5213 node = rb_entry(n, struct binder_node, rb_node);
355b0502 5214 nodes++;
96dd75d9
TK
5215 /*
5216 * take a temporary ref on the node before
5217 * calling binder_node_release() which will either
5218 * kfree() the node or call binder_put_node()
5219 */
46655970 5220 binder_inc_node_tmpref_ilocked(node);
355b0502 5221 rb_erase(&node->rb_node, &proc->nodes);
46655970 5222 binder_inner_proc_unlock(proc);
ec49bb00 5223 incoming_refs = binder_node_release(node, incoming_refs);
46655970 5224 binder_inner_proc_lock(proc);
355b0502 5225 }
46655970 5226 binder_inner_proc_unlock(proc);
53413e7d 5227
355b0502 5228 outgoing_refs = 0;
6fcb2b9a 5229 binder_proc_lock(proc);
355b0502 5230 while ((n = rb_first(&proc->refs_by_desc))) {
53413e7d
ME
5231 struct binder_ref *ref;
5232
5233 ref = rb_entry(n, struct binder_ref, rb_node_desc);
355b0502 5234 outgoing_refs++;
6fcb2b9a
TK
5235 binder_cleanup_ref_olocked(ref);
5236 binder_proc_unlock(proc);
f7d87412 5237 binder_free_ref(ref);
6fcb2b9a 5238 binder_proc_lock(proc);
355b0502 5239 }
6fcb2b9a 5240 binder_proc_unlock(proc);
53413e7d 5241
57628830
TK
5242 binder_release_work(proc, &proc->todo);
5243 binder_release_work(proc, &proc->delivered_death);
355b0502 5244
355b0502 5245 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
467545d8 5246 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
c07c933f 5247 __func__, proc->pid, threads, nodes, incoming_refs,
467545d8 5248 outgoing_refs, active_transactions);
355b0502 5249
e482ec39 5250 binder_proc_dec_tmpref(proc);
355b0502
GKH
5251}
5252
5253static void binder_deferred_func(struct work_struct *work)
5254{
5255 struct binder_proc *proc;
5a068558
MB
5256 struct files_struct *files;
5257
355b0502 5258 int defer;
10f62861 5259
355b0502 5260 do {
ec49bb00
TK
5261 mutex_lock(&binder_deferred_lock);
5262 if (!hlist_empty(&binder_deferred_list)) {
5263 proc = hlist_entry(binder_deferred_list.first,
355b0502
GKH
5264 struct binder_proc, deferred_work_node);
5265 hlist_del_init(&proc->deferred_work_node);
5266 defer = proc->deferred_work;
5267 proc->deferred_work = 0;
5268 } else {
5269 proc = NULL;
5270 defer = 0;
5271 }
ec49bb00 5272 mutex_unlock(&binder_deferred_lock);
355b0502 5273
5a068558
MB
5274 files = NULL;
5275 if (defer & BINDER_DEFERRED_PUT_FILES) {
5276 mutex_lock(&proc->files_lock);
5277 files = proc->files;
5278 if (files)
5279 proc->files = NULL;
5280 mutex_unlock(&proc->files_lock);
5281 }
5282
355b0502
GKH
5283 if (defer & BINDER_DEFERRED_FLUSH)
5284 binder_deferred_flush(proc);
5285
5286 if (defer & BINDER_DEFERRED_RELEASE)
5287 binder_deferred_release(proc); /* frees proc */
5a068558
MB
5288
5289 if (files)
5290 put_files_struct(files);
355b0502
GKH
5291 } while (proc);
5292}
ec49bb00 5293static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
355b0502
GKH
5294
5295static void
5296binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5297{
ec49bb00 5298 mutex_lock(&binder_deferred_lock);
355b0502
GKH
5299 proc->deferred_work |= defer;
5300 if (hlist_unhashed(&proc->deferred_work_node)) {
5301 hlist_add_head(&proc->deferred_work_node,
ec49bb00 5302 &binder_deferred_list);
1cac41cb 5303 schedule_work(&binder_deferred_work);
355b0502 5304 }
ec49bb00 5305 mutex_unlock(&binder_deferred_lock);
355b0502
GKH
5306}
5307
da957e45
TK
5308static void print_binder_transaction_ilocked(struct seq_file *m,
5309 struct binder_proc *proc,
5310 const char *prefix,
5311 struct binder_transaction *t)
5249f488 5312{
da957e45
TK
5313 struct binder_proc *to_proc;
5314 struct binder_buffer *buffer = t->buffer;
5315
e482ec39 5316 spin_lock(&t->lock);
da957e45 5317 to_proc = t->to_proc;
5249f488 5318 seq_printf(m,
d30e6a87 5319 "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
5249f488
AH
5320 prefix, t->debug_id, t,
5321 t->from ? t->from->proc->pid : 0,
5322 t->from ? t->from->pid : 0,
da957e45 5323 to_proc ? to_proc->pid : 0,
5249f488 5324 t->to_thread ? t->to_thread->pid : 0,
d30e6a87
MC
5325 t->code, t->flags, t->priority.sched_policy,
5326 t->priority.prio, t->need_reply);
e482ec39
TK
5327 spin_unlock(&t->lock);
5328
da957e45
TK
5329 if (proc != to_proc) {
5330 /*
5331 * Can only safely deref buffer if we are holding the
5332 * correct proc inner lock for this node
5333 */
5334 seq_puts(m, "\n");
5335 return;
5336 }
5337
5338 if (buffer == NULL) {
5249f488
AH
5339 seq_puts(m, " buffer free\n");
5340 return;
355b0502 5341 }
da957e45
TK
5342 if (buffer->target_node)
5343 seq_printf(m, " node %d", buffer->target_node->debug_id);
5249f488 5344 seq_printf(m, " size %zd:%zd data %p\n",
da957e45
TK
5345 buffer->data_size, buffer->offsets_size,
5346 buffer->data);
355b0502
GKH
5347}
5348
da957e45
TK
5349static void print_binder_work_ilocked(struct seq_file *m,
5350 struct binder_proc *proc,
5351 const char *prefix,
5352 const char *transaction_prefix,
5353 struct binder_work *w)
355b0502
GKH
5354{
5355 struct binder_node *node;
5356 struct binder_transaction *t;
5357
5358 switch (w->type) {
5359 case BINDER_WORK_TRANSACTION:
5360 t = container_of(w, struct binder_transaction, work);
da957e45
TK
5361 print_binder_transaction_ilocked(
5362 m, proc, transaction_prefix, t);
355b0502 5363 break;
3a822b33
TK
5364 case BINDER_WORK_RETURN_ERROR: {
5365 struct binder_error *e = container_of(
5366 w, struct binder_error, work);
5367
5368 seq_printf(m, "%stransaction error: %u\n",
5369 prefix, e->cmd);
5370 } break;
355b0502 5371 case BINDER_WORK_TRANSACTION_COMPLETE:
5249f488 5372 seq_printf(m, "%stransaction complete\n", prefix);
355b0502
GKH
5373 break;
5374 case BINDER_WORK_NODE:
5375 node = container_of(w, struct binder_node, work);
da49889d
AH
5376 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5377 prefix, node->debug_id,
5378 (u64)node->ptr, (u64)node->cookie);
355b0502
GKH
5379 break;
5380 case BINDER_WORK_DEAD_BINDER:
5249f488 5381 seq_printf(m, "%shas dead binder\n", prefix);
355b0502
GKH
5382 break;
5383 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5249f488 5384 seq_printf(m, "%shas cleared dead binder\n", prefix);
355b0502
GKH
5385 break;
5386 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5249f488 5387 seq_printf(m, "%shas cleared death notification\n", prefix);
355b0502
GKH
5388 break;
5389 default:
5249f488 5390 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
355b0502
GKH
5391 break;
5392 }
355b0502
GKH
5393}
5394
57628830
TK
5395static void print_binder_thread_ilocked(struct seq_file *m,
5396 struct binder_thread *thread,
5397 int print_always)
355b0502
GKH
5398{
5399 struct binder_transaction *t;
5400 struct binder_work *w;
5249f488
AH
5401 size_t start_pos = m->count;
5402 size_t header_pos;
355b0502 5403
e482ec39 5404 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
afda44d0 5405 thread->pid, thread->looper,
e482ec39
TK
5406 thread->looper_need_return,
5407 atomic_read(&thread->tmp_ref));
5249f488 5408 header_pos = m->count;
355b0502
GKH
5409 t = thread->transaction_stack;
5410 while (t) {
355b0502 5411 if (t->from == thread) {
da957e45
TK
5412 print_binder_transaction_ilocked(m, thread->proc,
5413 " outgoing transaction", t);
355b0502
GKH
5414 t = t->from_parent;
5415 } else if (t->to_thread == thread) {
da957e45 5416 print_binder_transaction_ilocked(m, thread->proc,
5249f488 5417 " incoming transaction", t);
355b0502
GKH
5418 t = t->to_parent;
5419 } else {
da957e45
TK
5420 print_binder_transaction_ilocked(m, thread->proc,
5421 " bad transaction", t);
355b0502
GKH
5422 t = NULL;
5423 }
5424 }
5425 list_for_each_entry(w, &thread->todo, entry) {
da957e45 5426 print_binder_work_ilocked(m, thread->proc, " ",
57628830 5427 " pending transaction", w);
355b0502 5428 }
5249f488
AH
5429 if (!print_always && m->count == header_pos)
5430 m->count = start_pos;
355b0502
GKH
5431}
5432
46655970
TK
5433static void print_binder_node_nilocked(struct seq_file *m,
5434 struct binder_node *node)
355b0502
GKH
5435{
5436 struct binder_ref *ref;
355b0502
GKH
5437 struct binder_work *w;
5438 int count;
5439
5440 count = 0;
b67bfe0d 5441 hlist_for_each_entry(ref, &node->refs, node_entry)
355b0502
GKH
5442 count++;
5443
adb68543 5444 seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
da49889d 5445 node->debug_id, (u64)node->ptr, (u64)node->cookie,
adb68543 5446 node->sched_policy, node->min_priority,
5249f488
AH
5447 node->has_strong_ref, node->has_weak_ref,
5448 node->local_strong_refs, node->local_weak_refs,
96dd75d9 5449 node->internal_strong_refs, count, node->tmp_refs);
355b0502 5450 if (count) {
5249f488 5451 seq_puts(m, " proc");
b67bfe0d 5452 hlist_for_each_entry(ref, &node->refs, node_entry)
5249f488 5453 seq_printf(m, " %d", ref->proc->pid);
355b0502 5454 }
5249f488 5455 seq_puts(m, "\n");
57628830 5456 if (node->proc) {
57628830 5457 list_for_each_entry(w, &node->async_todo, entry)
da957e45 5458 print_binder_work_ilocked(m, node->proc, " ",
57628830 5459 " pending async transaction", w);
57628830 5460 }
355b0502
GKH
5461}
5462
6fcb2b9a
TK
5463static void print_binder_ref_olocked(struct seq_file *m,
5464 struct binder_ref *ref)
355b0502 5465{
14c312e9 5466 binder_node_lock(ref->node);
f7d87412
TK
5467 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5468 ref->data.debug_id, ref->data.desc,
5469 ref->node->proc ? "" : "dead ",
5470 ref->node->debug_id, ref->data.strong,
5471 ref->data.weak, ref->death);
14c312e9 5472 binder_node_unlock(ref->node);
355b0502
GKH
5473}
5474
5249f488
AH
5475static void print_binder_proc(struct seq_file *m,
5476 struct binder_proc *proc, int print_all)
355b0502
GKH
5477{
5478 struct binder_work *w;
5479 struct rb_node *n;
5249f488
AH
5480 size_t start_pos = m->count;
5481 size_t header_pos;
46655970 5482 struct binder_node *last_node = NULL;
5249f488
AH
5483
5484 seq_printf(m, "proc %d\n", proc->pid);
8b980bee 5485 seq_printf(m, "context %s\n", proc->context->name);
5249f488
AH
5486 header_pos = m->count;
5487
57628830 5488 binder_inner_proc_lock(proc);
5249f488 5489 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
57628830 5490 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5249f488 5491 rb_node), print_all);
46655970 5492
5249f488 5493 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
355b0502
GKH
5494 struct binder_node *node = rb_entry(n, struct binder_node,
5495 rb_node);
46655970
TK
5496 /*
5497 * take a temporary reference on the node so it
5498 * survives and isn't removed from the tree
5499 * while we print it.
5500 */
5501 binder_inc_node_tmpref_ilocked(node);
5502 /* Need to drop inner lock to take node lock */
5503 binder_inner_proc_unlock(proc);
5504 if (last_node)
5505 binder_put_node(last_node);
5506 binder_node_inner_lock(node);
5507 print_binder_node_nilocked(m, node);
5508 binder_node_inner_unlock(node);
5509 last_node = node;
5510 binder_inner_proc_lock(proc);
355b0502 5511 }
46655970
TK
5512 binder_inner_proc_unlock(proc);
5513 if (last_node)
5514 binder_put_node(last_node);
5515
355b0502 5516 if (print_all) {
6fcb2b9a 5517 binder_proc_lock(proc);
355b0502 5518 for (n = rb_first(&proc->refs_by_desc);
5249f488 5519 n != NULL;
355b0502 5520 n = rb_next(n))
6fcb2b9a
TK
5521 print_binder_ref_olocked(m, rb_entry(n,
5522 struct binder_ref,
5523 rb_node_desc));
5524 binder_proc_unlock(proc);
355b0502 5525 }
467545d8 5526 binder_alloc_print_allocated(m, &proc->alloc);
57628830 5527 binder_inner_proc_lock(proc);
5249f488 5528 list_for_each_entry(w, &proc->todo, entry)
da957e45
TK
5529 print_binder_work_ilocked(m, proc, " ",
5530 " pending transaction", w);
355b0502 5531 list_for_each_entry(w, &proc->delivered_death, entry) {
5249f488 5532 seq_puts(m, " has delivered dead binder\n");
355b0502
GKH
5533 break;
5534 }
57628830 5535 binder_inner_proc_unlock(proc);
5249f488
AH
5536 if (!print_all && m->count == header_pos)
5537 m->count = start_pos;
355b0502
GKH
5538}
5539
1cac41cb
MB
5540#ifdef CONFIG_SAMSUNG_FREECESS
5541static void binder_in_transaction(struct binder_proc *proc)
5542{
5543 struct rb_node *n = NULL;
5544 struct binder_thread *thread = NULL;
5545 int uid = -1;
5546 struct task_struct *tsk = NULL;
5547 struct binder_transaction *t = NULL;
5548 bool empty = true;
5549 bool found = false;
5550
5551 //check binder threads todo and transcation_stack list
5552 binder_inner_proc_lock(proc);
5553 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5554 thread = rb_entry(n, struct binder_thread, rb_node);
5555 empty = binder_worklist_empty_ilocked(&thread->todo);
5556 tsk = thread->task;
5557
5558 if (tsk != NULL) {
5559 //have some binders to do
5560 if (!empty) {
5561 //report uid to FW, only report one time
5562 uid = tsk->cred->euid.val;
5563 binder_inner_proc_unlock(proc);
5564 cfb_report(uid, "thread");
5565 return;
5566 }
5567
5568 //processing one binder call
5569 t = thread->transaction_stack;
5570 if (t) {
5571 spin_lock(&t->lock);
5572 if (t->to_thread == thread) {
5573 //check incoming, it has one
5574 found = true;
5575 uid = tsk->cred->euid.val;
5576 }
5577 spin_unlock(&t->lock);
5578 if (found == true){
5579 //report uid to FW, only report one time
5580 binder_inner_proc_unlock(proc);
5581 cfb_report(uid, "transaction_stack");
5582 return;
5583 }
5584 }
5585 }
5586 }
5587
5588 //check binder proc todo list
5589 empty = binder_worklist_empty_ilocked(&proc->todo);
5590 tsk = proc->tsk;
5591 if (tsk != NULL && !empty) {
5592 //report uid to FW
5593 uid = tsk->cred->euid.val;
5594 binder_inner_proc_unlock(proc);
5595 cfb_report(uid, "proc");
5596 }
5597 else
5598 binder_inner_proc_unlock(proc);
5599}
5600
5601void binders_in_transcation(int uid)
5602{
5603 struct binder_proc *itr;
5604
5605 mutex_lock(&binder_procs_lock);
5606 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5607 if (itr != NULL && (itr->tsk->cred->euid.val == uid)) {
5608 binder_in_transaction(itr);
5609 }
5610 }
5611 mutex_unlock(&binder_procs_lock);
5612}
5613#endif
5614
167bccbd 5615static const char * const binder_return_strings[] = {
355b0502
GKH
5616 "BR_ERROR",
5617 "BR_OK",
5618 "BR_TRANSACTION",
5619 "BR_REPLY",
5620 "BR_ACQUIRE_RESULT",
5621 "BR_DEAD_REPLY",
5622 "BR_TRANSACTION_COMPLETE",
5623 "BR_INCREFS",
5624 "BR_ACQUIRE",
5625 "BR_RELEASE",
5626 "BR_DECREFS",
5627 "BR_ATTEMPT_ACQUIRE",
5628 "BR_NOOP",
5629 "BR_SPAWN_LOOPER",
5630 "BR_FINISHED",
5631 "BR_DEAD_BINDER",
5632 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5633 "BR_FAILED_REPLY"
5634};
5635
167bccbd 5636static const char * const binder_command_strings[] = {
355b0502
GKH
5637 "BC_TRANSACTION",
5638 "BC_REPLY",
5639 "BC_ACQUIRE_RESULT",
5640 "BC_FREE_BUFFER",
5641 "BC_INCREFS",
5642 "BC_ACQUIRE",
5643 "BC_RELEASE",
5644 "BC_DECREFS",
5645 "BC_INCREFS_DONE",
5646 "BC_ACQUIRE_DONE",
5647 "BC_ATTEMPT_ACQUIRE",
5648 "BC_REGISTER_LOOPER",
5649 "BC_ENTER_LOOPER",
5650 "BC_EXIT_LOOPER",
5651 "BC_REQUEST_DEATH_NOTIFICATION",
5652 "BC_CLEAR_DEATH_NOTIFICATION",
dd9bc4f9
MC
5653 "BC_DEAD_BINDER_DONE",
5654 "BC_TRANSACTION_SG",
5655 "BC_REPLY_SG",
355b0502
GKH
5656};
5657
167bccbd 5658static const char * const binder_objstat_strings[] = {
355b0502
GKH
5659 "proc",
5660 "thread",
5661 "node",
5662 "ref",
5663 "death",
5664 "transaction",
5665 "transaction_complete"
5666};
5667
5249f488 5668static void print_binder_stats(struct seq_file *m, const char *prefix,
ec49bb00 5669 struct binder_stats *stats)
355b0502
GKH
5670{
5671 int i;
5672
5673 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5249f488 5674 ARRAY_SIZE(binder_command_strings));
355b0502 5675 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
f716ecfc
BJS
5676 int temp = atomic_read(&stats->bc[i]);
5677
5678 if (temp)
5249f488 5679 seq_printf(m, "%s%s: %d\n", prefix,
f716ecfc 5680 binder_command_strings[i], temp);
355b0502
GKH
5681 }
5682
5683 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5249f488 5684 ARRAY_SIZE(binder_return_strings));
355b0502 5685 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
f716ecfc
BJS
5686 int temp = atomic_read(&stats->br[i]);
5687
5688 if (temp)
5249f488 5689 seq_printf(m, "%s%s: %d\n", prefix,
f716ecfc 5690 binder_return_strings[i], temp);
355b0502
GKH
5691 }
5692
ec49bb00 5693 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5249f488 5694 ARRAY_SIZE(binder_objstat_strings));
ec49bb00
TK
5695 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5696 ARRAY_SIZE(stats->obj_deleted));
5697 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
f716ecfc
BJS
5698 int created = atomic_read(&stats->obj_created[i]);
5699 int deleted = atomic_read(&stats->obj_deleted[i]);
5700
5701 if (created || deleted)
5702 seq_printf(m, "%s%s: active %d total %d\n",
5703 prefix,
ec49bb00 5704 binder_objstat_strings[i],
f716ecfc
BJS
5705 created - deleted,
5706 created);
355b0502 5707 }
467545d8
TK
5708}
5709
5249f488
AH
5710static void print_binder_proc_stats(struct seq_file *m,
5711 struct binder_proc *proc)
355b0502
GKH
5712{
5713 struct binder_work *w;
c9cd6356 5714 struct binder_thread *thread;
355b0502 5715 struct rb_node *n;
c9cd6356 5716 int count, strong, weak, ready_threads;
e4951233
TK
5717 size_t free_async_space =
5718 binder_alloc_get_free_async_space(&proc->alloc);
355b0502 5719
5249f488 5720 seq_printf(m, "proc %d\n", proc->pid);
8b980bee 5721 seq_printf(m, "context %s\n", proc->context->name);
355b0502 5722 count = 0;
c9cd6356 5723 ready_threads = 0;
e4951233 5724 binder_inner_proc_lock(proc);
355b0502
GKH
5725 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5726 count++;
c9cd6356
MC
5727
5728 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5729 ready_threads++;
5730
5249f488
AH
5731 seq_printf(m, " threads: %d\n", count);
5732 seq_printf(m, " requested threads: %d+%d/%d\n"
355b0502
GKH
5733 " ready threads %d\n"
5734 " free async space %zd\n", proc->requested_threads,
5735 proc->requested_threads_started, proc->max_threads,
c9cd6356 5736 ready_threads,
e4951233 5737 free_async_space);
355b0502
GKH
5738 count = 0;
5739 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5740 count++;
46655970 5741 binder_inner_proc_unlock(proc);
5249f488 5742 seq_printf(m, " nodes: %d\n", count);
355b0502
GKH
5743 count = 0;
5744 strong = 0;
5745 weak = 0;
6fcb2b9a 5746 binder_proc_lock(proc);
355b0502
GKH
5747 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5748 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5749 rb_node_desc);
5750 count++;
f7d87412
TK
5751 strong += ref->data.strong;
5752 weak += ref->data.weak;
355b0502 5753 }
6fcb2b9a 5754 binder_proc_unlock(proc);
5249f488 5755 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
355b0502 5756
467545d8 5757 count = binder_alloc_get_allocated_count(&proc->alloc);
5249f488 5758 seq_printf(m, " buffers: %d\n", count);
355b0502 5759
798dfdd8
SY
5760 binder_alloc_print_pages(m, &proc->alloc);
5761
355b0502 5762 count = 0;
57628830 5763 binder_inner_proc_lock(proc);
355b0502 5764 list_for_each_entry(w, &proc->todo, entry) {
57628830 5765 if (w->type == BINDER_WORK_TRANSACTION)
355b0502 5766 count++;
355b0502 5767 }
57628830 5768 binder_inner_proc_unlock(proc);
5249f488 5769 seq_printf(m, " pending transactions: %d\n", count);
355b0502 5770
ec49bb00 5771 print_binder_stats(m, " ", &proc->stats);
355b0502
GKH
5772}
5773
5774
5249f488 5775static int binder_state_show(struct seq_file *m, void *unused)
355b0502
GKH
5776{
5777 struct binder_proc *proc;
355b0502 5778 struct binder_node *node;
14c312e9 5779 struct binder_node *last_node = NULL;
355b0502 5780
ec49bb00 5781 seq_puts(m, "binder state:\n");
355b0502 5782
3490fdcb 5783 spin_lock(&binder_dead_nodes_lock);
ec49bb00
TK
5784 if (!hlist_empty(&binder_dead_nodes))
5785 seq_puts(m, "dead nodes:\n");
14c312e9
TK
5786 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5787 /*
5788 * take a temporary reference on the node so it
5789 * survives and isn't removed from the list
5790 * while we print it.
5791 */
5792 node->tmp_refs++;
5793 spin_unlock(&binder_dead_nodes_lock);
5794 if (last_node)
5795 binder_put_node(last_node);
5796 binder_node_lock(node);
46655970 5797 print_binder_node_nilocked(m, node);
14c312e9
TK
5798 binder_node_unlock(node);
5799 last_node = node;
5800 spin_lock(&binder_dead_nodes_lock);
5801 }
3490fdcb 5802 spin_unlock(&binder_dead_nodes_lock);
14c312e9
TK
5803 if (last_node)
5804 binder_put_node(last_node);
d6bbb327 5805
3490fdcb 5806 mutex_lock(&binder_procs_lock);
ec49bb00
TK
5807 hlist_for_each_entry(proc, &binder_procs, proc_node)
5808 print_binder_proc(m, proc, 1);
3490fdcb 5809 mutex_unlock(&binder_procs_lock);
8881f118 5810
5249f488 5811 return 0;
355b0502
GKH
5812}
5813
5249f488 5814static int binder_stats_show(struct seq_file *m, void *unused)
355b0502
GKH
5815{
5816 struct binder_proc *proc;
355b0502 5817
5249f488 5818 seq_puts(m, "binder stats:\n");
355b0502 5819
ec49bb00 5820 print_binder_stats(m, "", &binder_stats);
355b0502 5821
3490fdcb 5822 mutex_lock(&binder_procs_lock);
ec49bb00
TK
5823 hlist_for_each_entry(proc, &binder_procs, proc_node)
5824 print_binder_proc_stats(m, proc);
3490fdcb 5825 mutex_unlock(&binder_procs_lock);
8881f118 5826
5249f488 5827 return 0;
355b0502
GKH
5828}
5829
5249f488 5830static int binder_transactions_show(struct seq_file *m, void *unused)
355b0502
GKH
5831{
5832 struct binder_proc *proc;
355b0502 5833
ec49bb00 5834 seq_puts(m, "binder transactions:\n");
3490fdcb 5835 mutex_lock(&binder_procs_lock);
ec49bb00
TK
5836 hlist_for_each_entry(proc, &binder_procs, proc_node)
5837 print_binder_proc(m, proc, 0);
3490fdcb 5838 mutex_unlock(&binder_procs_lock);
8881f118 5839
5249f488 5840 return 0;
355b0502
GKH
5841}
5842
5249f488 5843static int binder_proc_show(struct seq_file *m, void *unused)
355b0502 5844{
aa29c32d 5845 struct binder_proc *itr;
8b980bee 5846 int pid = (unsigned long)m->private;
355b0502 5847
3490fdcb 5848 mutex_lock(&binder_procs_lock);
ec49bb00
TK
5849 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5850 if (itr->pid == pid) {
5851 seq_puts(m, "binder proc state:\n");
5852 print_binder_proc(m, itr, 1);
aa29c32d
RA
5853 }
5854 }
3490fdcb
TK
5855 mutex_unlock(&binder_procs_lock);
5856
5249f488 5857 return 0;
355b0502
GKH
5858}
5859
5249f488 5860static void print_binder_transaction_log_entry(struct seq_file *m,
355b0502
GKH
5861 struct binder_transaction_log_entry *e)
5862{
0f32aeb3
TK
5863 int debug_id = READ_ONCE(e->debug_id_done);
5864 /*
5865 * read barrier to guarantee debug_id_done read before
5866 * we print the log values
5867 */
5868 smp_rmb();
5249f488 5869 seq_printf(m,
0f32aeb3 5870 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5249f488
AH
5871 e->debug_id, (e->call_type == 2) ? "reply" :
5872 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
8b980bee 5873 e->from_thread, e->to_proc, e->to_thread, e->context_name,
0a0fdc1f
TK
5874 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5875 e->return_error, e->return_error_param,
5876 e->return_error_line);
0f32aeb3
TK
5877 /*
5878 * read-barrier to guarantee read of debug_id_done after
5879 * done printing the fields of the entry
5880 */
5881 smp_rmb();
5882 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5883 "\n" : " (incomplete)\n");
355b0502
GKH
5884}
5885
ec49bb00 5886static int binder_transaction_log_show(struct seq_file *m, void *unused)
355b0502 5887{
ec49bb00 5888 struct binder_transaction_log *log = m->private;
0f32aeb3
TK
5889 unsigned int log_cur = atomic_read(&log->cur);
5890 unsigned int count;
5891 unsigned int cur;
355b0502 5892 int i;
ec49bb00 5893
0f32aeb3
TK
5894 count = log_cur + 1;
5895 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5896 0 : count % ARRAY_SIZE(log->entry);
5897 if (count > ARRAY_SIZE(log->entry) || log->full)
5898 count = ARRAY_SIZE(log->entry);
5899 for (i = 0; i < count; i++) {
5900 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5901
5902 print_binder_transaction_log_entry(m, &log->entry[index]);
355b0502 5903 }
5249f488 5904 return 0;
355b0502
GKH
5905}
5906
5907static const struct file_operations binder_fops = {
5908 .owner = THIS_MODULE,
5909 .poll = binder_poll,
5910 .unlocked_ioctl = binder_ioctl,
da49889d 5911 .compat_ioctl = binder_ioctl,
355b0502
GKH
5912 .mmap = binder_mmap,
5913 .open = binder_open,
5914 .flush = binder_flush,
5915 .release = binder_release,
5916};
5917
5249f488
AH
5918BINDER_DEBUG_ENTRY(state);
5919BINDER_DEBUG_ENTRY(stats);
5920BINDER_DEBUG_ENTRY(transactions);
5921BINDER_DEBUG_ENTRY(transaction_log);
5922
04e3812e
MC
5923static int __init init_binder_device(const char *name)
5924{
5925 int ret;
5926 struct binder_device *binder_device;
5927
5928 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5929 if (!binder_device)
5930 return -ENOMEM;
5931
5932 binder_device->miscdev.fops = &binder_fops;
5933 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5934 binder_device->miscdev.name = name;
5935
ec49bb00
TK
5936 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5937 binder_device->context.name = name;
3490fdcb 5938 mutex_init(&binder_device->context.context_mgr_node_lock);
04e3812e
MC
5939
5940 ret = misc_register(&binder_device->miscdev);
5941 if (ret < 0) {
ec49bb00
TK
5942 kfree(binder_device);
5943 return ret;
04e3812e
MC
5944 }
5945
5946 hlist_add_head(&binder_device->hlist, &binder_devices);
5947
5948 return ret;
5949}
5950
355b0502
GKH
5951static int __init binder_init(void)
5952{
ec49bb00 5953 int ret;
04e3812e
MC
5954 char *device_name, *device_names;
5955 struct binder_device *device;
5956 struct hlist_node *tmp;
355b0502 5957
5a068558
MB
5958 ret = binder_alloc_shrinker_init();
5959 if (ret)
5960 return ret;
f73e8e76 5961
0f32aeb3
TK
5962 atomic_set(&binder_transaction_log.cur, ~0U);
5963 atomic_set(&binder_transaction_log_failed.cur, ~0U);
3c762a49 5964
16b66554
AH
5965 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5966 if (binder_debugfs_dir_entry_root)
5967 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5968 binder_debugfs_dir_entry_root);
04e3812e 5969
16b66554
AH
5970 if (binder_debugfs_dir_entry_root) {
5971 debugfs_create_file("state",
5a068558 5972 0444,
16b66554
AH
5973 binder_debugfs_dir_entry_root,
5974 NULL,
5975 &binder_state_fops);
5976 debugfs_create_file("stats",
5a068558 5977 0444,
16b66554
AH
5978 binder_debugfs_dir_entry_root,
5979 NULL,
5980 &binder_stats_fops);
5981 debugfs_create_file("transactions",
5a068558 5982 0444,
16b66554
AH
5983 binder_debugfs_dir_entry_root,
5984 NULL,
5985 &binder_transactions_fops);
5986 debugfs_create_file("transaction_log",
5a068558 5987 0444,
16b66554 5988 binder_debugfs_dir_entry_root,
ec49bb00 5989 &binder_transaction_log,
16b66554
AH
5990 &binder_transaction_log_fops);
5991 debugfs_create_file("failed_transaction_log",
5a068558 5992 0444,
16b66554 5993 binder_debugfs_dir_entry_root,
ec49bb00
TK
5994 &binder_transaction_log_failed,
5995 &binder_transaction_log_fops);
5996 }
5997
5998 /*
5999 * Copy the module_parameter string, because we don't want to
6000 * tokenize it in-place.
6001 */
6002 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
6003 if (!device_names) {
6004 ret = -ENOMEM;
6005 goto err_alloc_device_names_failed;
6006 }
6007 strcpy(device_names, binder_devices_param);
6008
6009 while ((device_name = strsep(&device_names, ","))) {
6010 ret = init_binder_device(device_name);
6011 if (ret)
6012 goto err_init_binder_device_failed;
04e3812e
MC
6013 }
6014
6015 return ret;
6016
6017err_init_binder_device_failed:
6018 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6019 misc_deregister(&device->miscdev);
6020 hlist_del(&device->hlist);
ec49bb00 6021 kfree(device);
04e3812e 6022 }
ec49bb00
TK
6023err_alloc_device_names_failed:
6024 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6025
355b0502
GKH
6026 return ret;
6027}
6028
6029device_initcall(binder_init);
6030
975a1ac9
AH
6031#define CREATE_TRACE_POINTS
6032#include "binder_trace.h"
6033
355b0502 6034MODULE_LICENSE("GPL v2");