binder: binder: fix possible UAF when freeing buffer pie-9.0.0-release-psa MMI-PSAS29.160-55-7-1
authorJignesh Patel <jignesh@motorola.com>
Fri, 15 Nov 2019 04:43:01 +0000 (10:13 +0530)
committerchenyt9 <chenyt9@lenovo.com>
Wed, 17 Jun 2020 06:46:27 +0000 (14:46 +0800)
There is a race between the binder driver cleaning
up a completed transaction via binder_free_transaction()
and a user calling binder_ioctl(BC_FREE_BUFFER) to
release a buffer. It doesn't matter which is first but
they need to be protected against running concurrently
which can result in a UAF.

Mot-CRs-fixed: (CR)
CVE-Fixed: CVE-2019-2213
Bug: 133758011

The following revert is needed to apply the correct patch, hence
reverting
Revert "[RAMEN9610-20513]binder: fix possible UAF when freeing buffer"

This reverts commit e114db7c075820499ae09168cbc5b14786a552b8.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Jignesh Patel <jignesh@motorola.com>
Change-Id: Ife23f7a2178678252a2f68c6a64c0800a621110f
Reviewed-on: https://gerrit.mot.com/1434961
SME-Granted: SME Approvals Granted
SLTApproved: Slta Waiver
Tested-by: Jira Key
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key
Reviewed-on: https://gerrit.mot.com/1456141
Reviewed-by: Varun Shrivastava <varunshrivastava@motorola.com>
Reviewed-by: Sindhu C <a12924@motorola.com>
SLTApproved: Sindhu C <a12924@motorola.com>
Tested-by: Anandappan ChakRavarthy <pjwt34@motorola.com>
drivers/android/binder.c

index 12ec925c954e91b5280112cd1f1cbc60007cf411..b02d183f57a433660d9c99435a450dfc60e16899 100644 (file)
@@ -527,7 +527,8 @@ struct binder_priority {
  * @requested_threads_started: number binder threads started
  *                        (protected by @inner_lock)
  * @tmp_ref:              temporary reference to indicate proc is in use
- *                        (protected by @inner_lock)
+ *                        (atomic since @proc->inner_lock cannot
+ *                        always be acquired)
  * @default_priority:     default scheduler priority
  *                        (invariant after initialized)
  * @debugfs_entry:        debugfs node
@@ -561,7 +562,7 @@ struct binder_proc {
        int max_threads;
        int requested_threads;
        int requested_threads_started;
-       int tmp_ref;
+       atomic_t tmp_ref;
        struct binder_priority default_priority;
        struct dentry *debugfs_entry;
        struct binder_alloc alloc;
@@ -2143,9 +2144,9 @@ static void binder_thread_dec_tmpref(struct binder_thread *thread)
 static void binder_proc_dec_tmpref(struct binder_proc *proc)
 {
        binder_inner_proc_lock(proc);
-       proc->tmp_ref--;
+       atomic_dec(&proc->tmp_ref);
        if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
-                       !proc->tmp_ref) {
+                       !atomic_read(&proc->tmp_ref)) {
                binder_inner_proc_unlock(proc);
                binder_free_proc(proc);
                return;
@@ -2207,18 +2208,26 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
 
 static void binder_free_transaction(struct binder_transaction *t)
 {
-       struct binder_proc *target_proc = t->to_proc;
+       struct binder_proc *target_proc;
 
+       spin_lock(&t->lock);
+       target_proc = t->to_proc;
        if (target_proc) {
+               atomic_inc(&target_proc->tmp_ref);
+               spin_unlock(&t->lock);
+
                binder_inner_proc_lock(target_proc);
                if (t->buffer)
                        t->buffer->transaction = NULL;
                binder_inner_proc_unlock(target_proc);
+               binder_proc_dec_tmpref(target_proc);
+       } else {
+               /*
+                * If the transaction has no target_proc, then
+                * t->buffer->transaction * has already been cleared.
+                */
+               spin_unlock(&t->lock);
        }
-       /*
-        * If the transaction has no target_proc, then
-        * t->buffer->transaction has already been cleared.
-        */
        kfree(t);
        binder_stats_deleted(BINDER_STAT_TRANSACTION);
 }
@@ -2973,7 +2982,7 @@ static struct binder_node *binder_get_node_refs_for_txn(
                target_node = node;
                binder_inc_node_nilocked(node, 1, 0, NULL);
                binder_inc_node_tmpref_ilocked(node);
-               node->proc->tmp_ref++;
+               atomic_inc(&node->proc->tmp_ref);
                *procp = node->proc;
        } else
                *error = BR_DEAD_REPLY;
@@ -3075,7 +3084,7 @@ static void binder_transaction(struct binder_proc *proc,
                        goto err_dead_binder;
                }
                target_proc = target_thread->proc;
-               target_proc->tmp_ref++;
+               atomic_inc(&target_proc->tmp_ref);
                binder_inner_proc_unlock(target_thread->proc);
        } else {
                if (tr->target.handle) {
@@ -4705,7 +4714,7 @@ static int binder_thread_release(struct binder_proc *proc,
         * The corresponding dec is when we actually
         * free the thread in binder_free_thread()
         */
-       proc->tmp_ref++;
+       atomic_inc(&proc->tmp_ref);
        /*
         * take a ref on this thread to ensure it
         * survives while we are releasing it
@@ -5150,6 +5159,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
                return -ENOMEM;
        spin_lock_init(&proc->inner_lock);
        spin_lock_init(&proc->outer_lock);
+       atomic_set(&proc->tmp_ref, 0);
        get_task_struct(current->group_leader);
        proc->tsk = current->group_leader;
        mutex_init(&proc->files_lock);
@@ -5329,7 +5339,7 @@ static void binder_deferred_release(struct binder_proc *proc)
         * Make sure proc stays alive after we
         * remove all the threads
         */
-       proc->tmp_ref++;
+       atomic_inc(&proc->tmp_ref);
 
        proc->is_dead = true;
        threads = 0;