binder: binder: fix possible UAF when freeing buffer
[GitHub/MotorolaMobilityLLC/kernel-slsi.git] / drivers / android / binder.c
index 12ec925c954e91b5280112cd1f1cbc60007cf411..b02d183f57a433660d9c99435a450dfc60e16899 100644 (file)
@@ -527,7 +527,8 @@ struct binder_priority {
  * @requested_threads_started: number binder threads started
  *                        (protected by @inner_lock)
  * @tmp_ref:              temporary reference to indicate proc is in use
- *                        (protected by @inner_lock)
+ *                        (atomic since @proc->inner_lock cannot
+ *                        always be acquired)
  * @default_priority:     default scheduler priority
  *                        (invariant after initialized)
  * @debugfs_entry:        debugfs node
@@ -561,7 +562,7 @@ struct binder_proc {
        int max_threads;
        int requested_threads;
        int requested_threads_started;
-       int tmp_ref;
+       atomic_t tmp_ref;
        struct binder_priority default_priority;
        struct dentry *debugfs_entry;
        struct binder_alloc alloc;
@@ -2143,9 +2144,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
 static void binder_proc_dec_tmpref(struct binder_proc *proc)
 {
        binder_inner_proc_lock(proc);
-       proc->tmp_ref--;
+       atomic_dec(&proc->tmp_ref);
        if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
-                       !proc->tmp_ref) {
+                       !atomic_read(&proc->tmp_ref)) {
                binder_inner_proc_unlock(proc);
                binder_free_proc(proc);
                return;
@@ -2207,18 +2208,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
 
 static void binder_free_transaction(struct binder_transaction *t)
 {
-       struct binder_proc *target_proc = t->to_proc;
+       struct binder_proc *target_proc;
 
+       spin_lock(&t->lock);
+       target_proc = t->to_proc;
        if (target_proc) {
+               atomic_inc(&target_proc->tmp_ref);
+               spin_unlock(&t->lock);
+
                binder_inner_proc_lock(target_proc);
                if (t->buffer)
                        t->buffer->transaction = NULL;
                binder_inner_proc_unlock(target_proc);
+               binder_proc_dec_tmpref(target_proc);
+       } else {
+               /*
+                * If the transaction has no target_proc, then
+                * t->buffer->transaction * has already been cleared.
+                */
+               spin_unlock(&t->lock);
        }
-       /*
-        * If the transaction has no target_proc, then
-        * t->buffer->transaction has already been cleared.
-        */
        kfree(t);
        binder_stats_deleted(BINDER_STAT_TRANSACTION);
 }
@@ -2973,7 +2982,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
                target_node = node;
                binder_inc_node_nilocked(node, 1, 0, NULL);
                binder_inc_node_tmpref_ilocked(node);
-               node->proc->tmp_ref++;
+               atomic_inc(&node->proc->tmp_ref);
                *procp = node->proc;
        } else
                *error = BR_DEAD_REPLY;
@@ -3075,7 +3084,7 @@ static void binder_transaction(struct binder_proc *proc,
                        goto err_dead_binder;
                }
                target_proc = target_thread->proc;
-               target_proc->tmp_ref++;
+               atomic_inc(&target_proc->tmp_ref);
                binder_inner_proc_unlock(target_thread->proc);
        } else {
                if (tr->target.handle) {
@@ -4705,7 +4714,7 @@ static int binder_thread_release(struct binder_proc *proc,
         * The corresponding dec is when we actually
         * free the thread in binder_free_thread()
         */
-       proc->tmp_ref++;
+       atomic_inc(&proc->tmp_ref);
        /*
         * take a ref on this thread to ensure it
         * survives while we are releasing it
@@ -5150,6 +5159,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
                return -ENOMEM;
        spin_lock_init(&proc->inner_lock);
        spin_lock_init(&proc->outer_lock);
+       atomic_set(&proc->tmp_ref, 0);
        get_task_struct(current->group_leader);
        proc->tsk = current->group_leader;
        mutex_init(&proc->files_lock);
@@ -5329,7 +5339,7 @@ static void binder_deferred_release(struct binder_proc *proc)
         * Make sure proc stays alive after we
         * remove all the threads
         */
-       proc->tmp_ref++;
+       atomic_inc(&proc->tmp_ref);
 
        proc->is_dead = true;
        threads = 0;