android: binder: Disable preemption while holding the global binder lock.
authorRiley Andrews <riandrews@google.com>
Wed, 21 Oct 2015 23:30:51 +0000 (16:30 -0700)
committerStricted <info@stricted.net>
Thu, 11 Oct 2018 16:12:57 +0000 (18:12 +0200)
(cherry pick from commit f681f0264fd2c51aa12190ff9be04622a7c8ca3f)

Signed-off-by: Riley Andrews <riandrews@google.com>
Bug: 30141999
Change-Id: I66ed44990d5c347d197e61dc49a37b5228c748d0

drivers/android/binder.c

index deb932dea529f01467334e677a23c7d2021d5647..8326482981abfe13c8853a4012873fc6c46cd301 100644 (file)
@@ -435,6 +435,7 @@ static inline void binder_lock(const char *tag)
 {
        trace_binder_lock(tag);
        mutex_lock(&binder_main_lock);
+       preempt_disable();
        trace_binder_locked(tag);
 }
 
@@ -442,8 +443,62 @@ static inline void binder_unlock(const char *tag)
 {
        trace_binder_unlock(tag);
        mutex_unlock(&binder_main_lock);
+       preempt_enable();
 }
 
+static inline void *kzalloc_preempt_disabled(size_t size)
+{
+       void *ptr;
+
+       ptr = kzalloc(size, GFP_NOWAIT);
+       if (ptr)
+               return ptr;
+
+       preempt_enable_no_resched();
+       ptr = kzalloc(size, GFP_KERNEL);
+       preempt_disable();
+
+       return ptr;
+}
+
+static inline long copy_to_user_preempt_disabled(void __user *to, const void *from, long n)
+{
+       long ret;
+
+       preempt_enable_no_resched();
+       ret = copy_to_user(to, from, n);
+       preempt_disable();
+       return ret;
+}
+
+static inline long copy_from_user_preempt_disabled(void *to, const void __user *from, long n)
+{
+       long ret;
+
+       preempt_enable_no_resched();
+       ret = copy_from_user(to, from, n);
+       preempt_disable();
+       return ret;
+}
+
+#define get_user_preempt_disabled(x, ptr)      \
+({                                             \
+       int __ret;                              \
+       preempt_enable_no_resched();            \
+       __ret = get_user(x, ptr);               \
+       preempt_disable();                      \
+       __ret;                                  \
+})
+
+#define put_user_preempt_disabled(x, ptr)      \
+({                                             \
+       int __ret;                              \
+       preempt_enable_no_resched();            \
+       __ret = put_user(x, ptr);               \
+       preempt_disable();                      \
+       __ret;                                  \
+})
+
 static void binder_set_nice(long nice)
 {
        long min_nice;
@@ -577,6 +632,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
        else
                mm = get_task_mm(proc->tsk);
 
+       preempt_enable_no_resched();
+
        if (mm) {
                down_write(&mm->mmap_sem);
                vma = proc->vma;
@@ -630,6 +687,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
                up_write(&mm->mmap_sem);
                mmput(mm);
        }
+
+       preempt_disable();
+
        return 0;
 
 free_range:
@@ -652,6 +712,9 @@ err_no_vma:
                up_write(&mm->mmap_sem);
                mmput(mm);
        }
+
+       preempt_disable();
+
        return -ENOMEM;
 }
 
@@ -911,7 +974,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
                        return NULL;
        }
 
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       node = kzalloc_preempt_disabled(sizeof(*node));
        if (node == NULL)
                return NULL;
        binder_stats_created(BINDER_STAT_NODE);
@@ -1052,7 +1115,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
                else
                        return ref;
        }
-       new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+       new_ref = kzalloc_preempt_disabled(sizeof(*ref));
        if (new_ref == NULL)
                return NULL;
        binder_stats_created(BINDER_STAT_REF);
@@ -1453,14 +1516,14 @@ static void binder_transaction(struct binder_proc *proc,
        e->to_proc = target_proc->pid;
 
        /* TODO: reuse incoming transaction for reply */
-       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       t = kzalloc_preempt_disabled(sizeof(*t));
        if (t == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_t_failed;
        }
        binder_stats_created(BINDER_STAT_TRANSACTION);
 
-       tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+       tcomplete = kzalloc_preempt_disabled(sizeof(*tcomplete));
        if (tcomplete == NULL) {
                return_error = BR_FAILED_REPLY;
                goto err_alloc_tcomplete_failed;
@@ -1517,14 +1580,14 @@ static void binder_transaction(struct binder_proc *proc,
        offp = (binder_size_t *)(t->buffer->data +
                                 ALIGN(tr->data_size, sizeof(void *)));
 
-       if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+       if (copy_from_user_preempt_disabled(t->buffer->data, (const void __user *)(uintptr_t)
                           tr->data.ptr.buffer, tr->data_size)) {
                binder_user_error("%d:%d got transaction with invalid data ptr\n",
                                proc->pid, thread->pid);
                return_error = BR_FAILED_REPLY;
                goto err_copy_data_failed;
        }
-       if (copy_from_user(offp, (const void __user *)(uintptr_t)
+       if (copy_from_user_preempt_disabled(offp, (const void __user *)(uintptr_t)
                           tr->data.ptr.offsets, tr->offsets_size)) {
                binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
                                proc->pid, thread->pid);
@@ -1729,9 +1792,7 @@ static void binder_transaction(struct binder_proc *proc,
        list_add_tail(&tcomplete->entry, &thread->todo);
        if (target_wait) {
                if (reply || !(t->flags & TF_ONE_WAY)) {
-                       preempt_disable();
                        wake_up_interruptible_sync(target_wait);
-                       sched_preempt_enable_no_resched();
                } else {
                        wake_up_interruptible(target_wait);
                }
@@ -1794,7 +1855,7 @@ int binder_thread_write(struct binder_proc *proc,
        void __user *end = buffer + size;
 
        while (ptr < end && thread->return_error == BR_OK) {
-               if (get_user(cmd, (uint32_t __user *)ptr))
+               if (get_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                trace_binder_command(cmd);
@@ -1812,7 +1873,7 @@ int binder_thread_write(struct binder_proc *proc,
                        struct binder_ref *ref;
                        const char *debug_string;
 
-                       if (get_user(target, (uint32_t __user *)ptr))
+                       if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
                        if (target == 0 && binder_context_mgr_node &&
@@ -1871,10 +1932,10 @@ int binder_thread_write(struct binder_proc *proc,
                        binder_uintptr_t cookie;
                        struct binder_node *node;
 
-                       if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+                       if (get_user_preempt_disabled(node_ptr, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                       if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
                        node = binder_get_node(proc, node_ptr);
@@ -1932,7 +1993,7 @@ int binder_thread_write(struct binder_proc *proc,
                        binder_uintptr_t data_ptr;
                        struct binder_buffer *buffer;
 
-                       if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+                       if (get_user_preempt_disabled(data_ptr, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
 
@@ -1974,7 +2035,7 @@ int binder_thread_write(struct binder_proc *proc,
                case BC_REPLY: {
                        struct binder_transaction_data tr;
 
-                       if (copy_from_user(&tr, ptr, sizeof(tr)))
+                       if (copy_from_user_preempt_disabled(&tr, ptr, sizeof(tr)))
                                return -EFAULT;
                        ptr += sizeof(tr);
                        binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
@@ -2024,10 +2085,10 @@ int binder_thread_write(struct binder_proc *proc,
                        struct binder_ref *ref;
                        struct binder_ref_death *death;
 
-                       if (get_user(target, (uint32_t __user *)ptr))
+                       if (get_user_preempt_disabled(target, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                       if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
                        ref = binder_get_ref(proc, target, false);
@@ -2056,7 +2117,7 @@ int binder_thread_write(struct binder_proc *proc,
                                                proc->pid, thread->pid);
                                        break;
                                }
-                               death = kzalloc(sizeof(*death), GFP_KERNEL);
+                               death = kzalloc_preempt_disabled(sizeof(*death));
                                if (death == NULL) {
                                        thread->return_error = BR_ERROR;
                                        binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2111,7 +2172,7 @@ int binder_thread_write(struct binder_proc *proc,
                        binder_uintptr_t cookie;
                        struct binder_ref_death *death = NULL;
 
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                       if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
 
                        ptr += sizeof(void *);
@@ -2191,7 +2252,7 @@ static int binder_thread_read(struct binder_proc *proc,
        int wait_for_proc_work;
 
        if (*consumed == 0) {
-               if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+               if (put_user_preempt_disabled(BR_NOOP, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
        }
@@ -2202,7 +2263,7 @@ retry:
 
        if (thread->return_error != BR_OK && ptr < end) {
                if (thread->return_error2 != BR_OK) {
-                       if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+                       if (put_user_preempt_disabled(thread->return_error2, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
                        binder_stat_br(proc, thread, thread->return_error2);
@@ -2210,7 +2271,7 @@ retry:
                                goto done;
                        thread->return_error2 = BR_OK;
                }
-               if (put_user(thread->return_error, (uint32_t __user *)ptr))
+               if (put_user_preempt_disabled(thread->return_error, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
                binder_stat_br(proc, thread, thread->return_error);
@@ -2288,7 +2349,7 @@ retry:
                } break;
                case BINDER_WORK_TRANSACTION_COMPLETE: {
                        cmd = BR_TRANSACTION_COMPLETE;
-                       if (put_user(cmd, (uint32_t __user *)ptr))
+                       if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
 
@@ -2330,14 +2391,14 @@ retry:
                                node->has_weak_ref = 0;
                        }
                        if (cmd != BR_NOOP) {
-                               if (put_user(cmd, (uint32_t __user *)ptr))
+                               if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
                                        return -EFAULT;
                                ptr += sizeof(uint32_t);
-                               if (put_user(node->ptr,
+                               if (put_user_preempt_disabled(node->ptr,
                                             (binder_uintptr_t __user *)ptr))
                                        return -EFAULT;
                                ptr += sizeof(binder_uintptr_t);
-                               if (put_user(node->cookie,
+                               if (put_user_preempt_disabled(node->cookie,
                                             (binder_uintptr_t __user *)ptr))
                                        return -EFAULT;
                                ptr += sizeof(binder_uintptr_t);
@@ -2381,10 +2442,10 @@ retry:
                                cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
                        else
                                cmd = BR_DEAD_BINDER;
-                       if (put_user(cmd, (uint32_t __user *)ptr))
+                       if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(uint32_t);
-                       if (put_user(death->cookie,
+                       if (put_user_preempt_disabled(death->cookie,
                                     (binder_uintptr_t __user *)ptr))
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
@@ -2452,10 +2513,10 @@ retry:
                                        ALIGN(t->buffer->data_size,
                                            sizeof(void *));
 
-               if (put_user(cmd, (uint32_t __user *)ptr))
+               if (put_user_preempt_disabled(cmd, (uint32_t __user *)ptr))
                        return -EFAULT;
                ptr += sizeof(uint32_t);
-               if (copy_to_user(ptr, &tr, sizeof(tr)))
+               if (copy_to_user_preempt_disabled(ptr, &tr, sizeof(tr)))
                        return -EFAULT;
                ptr += sizeof(tr);
 
@@ -2497,7 +2558,7 @@ done:
                binder_debug(BINDER_DEBUG_THREADS,
                             "%d:%d BR_SPAWN_LOOPER\n",
                             proc->pid, thread->pid);
-               if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+               if (put_user_preempt_disabled(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
                        return -EFAULT;
                binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
        }
@@ -2572,7 +2633,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
                        break;
        }
        if (*p == NULL) {
-               thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+               thread = kzalloc_preempt_disabled(sizeof(*thread));
                if (thread == NULL)
                        return NULL;
                binder_stats_created(BINDER_STAT_THREAD);
@@ -2676,7 +2737,7 @@ static int binder_ioctl_write_read(struct file *filp,
                ret = -EINVAL;
                goto out;
        }
-       if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+       if (copy_from_user_preempt_disabled(&bwr, ubuf, sizeof(bwr))) {
                ret = -EFAULT;
                goto out;
        }
@@ -2694,7 +2755,7 @@ static int binder_ioctl_write_read(struct file *filp,
                trace_binder_write_done(ret);
                if (ret < 0) {
                        bwr.read_consumed = 0;
-                       if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                       if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
                                ret = -EFAULT;
                        goto out;
                }
@@ -2708,7 +2769,7 @@ static int binder_ioctl_write_read(struct file *filp,
                if (!list_empty(&proc->todo))
                        wake_up_interruptible(&proc->wait);
                if (ret < 0) {
-                       if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                       if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr)))
                                ret = -EFAULT;
                        goto out;
                }
@@ -2718,7 +2779,7 @@ static int binder_ioctl_write_read(struct file *filp,
                     proc->pid, thread->pid,
                     (u64)bwr.write_consumed, (u64)bwr.write_size,
                     (u64)bwr.read_consumed, (u64)bwr.read_size);
-       if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+       if (copy_to_user_preempt_disabled(ubuf, &bwr, sizeof(bwr))) {
                ret = -EFAULT;
                goto out;
        }
@@ -2796,7 +2857,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        goto err;
                break;
        case BINDER_SET_MAX_THREADS:
-               if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+               if (copy_from_user_preempt_disabled(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
                        ret = -EINVAL;
                        goto err;
                }
@@ -2819,7 +2880,8 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        ret = -EINVAL;
                        goto err;
                }
-               if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+
+               if (put_user_preempt_disabled(BINDER_CURRENT_PROTOCOL_VERSION,
                             &ver->protocol_version)) {
                        ret = -EINVAL;
                        goto err;
@@ -2882,6 +2944,7 @@ static struct vm_operations_struct binder_vm_ops = {
 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 {
        int ret;
+
        struct vm_struct *area;
        struct binder_proc *proc = filp->private_data;
        const char *failure_string;
@@ -2942,7 +3005,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        vma->vm_ops = &binder_vm_ops;
        vma->vm_private_data = proc;
 
-       if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+       /* binder_update_page_range assumes preemption is disabled */
+       preempt_disable();
+       ret = binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma);
+       preempt_enable_no_resched();
+       if (ret) {
                ret = -ENOMEM;
                failure_string = "alloc small buf";
                goto err_alloc_small_buf_failed;
@@ -3212,8 +3279,12 @@ static void binder_deferred_func(struct work_struct *work)
        int defer;
 
        do {
-               binder_lock(__func__);
+               trace_binder_lock(__func__);
+               mutex_lock(&binder_main_lock);
+               trace_binder_locked(__func__);
+
                mutex_lock(&binder_deferred_lock);
+               preempt_disable();
                if (!hlist_empty(&binder_deferred_list)) {
                        proc = hlist_entry(binder_deferred_list.first,
                                        struct binder_proc, deferred_work_node);
@@ -3239,7 +3310,9 @@ static void binder_deferred_func(struct work_struct *work)
                if (defer & BINDER_DEFERRED_RELEASE)
                        binder_deferred_release(proc); /* frees proc */
 
-               binder_unlock(__func__);
+               trace_binder_unlock(__func__);
+               mutex_unlock(&binder_main_lock);
+               preempt_enable_no_resched();
                if (files)
                        put_files_struct(files);
        } while (proc);