FROMLIST: binder: protect against two threads freeing buffer
authorTodd Kjos <tkjos@google.com>
Fri, 21 Apr 2017 21:32:11 +0000 (14:32 -0700)
committerTodd Kjos <tkjos@google.com>
Thu, 13 Jul 2017 15:34:20 +0000 (08:34 -0700)
(from https://patchwork.kernel.org/patch/9817815/)

Adds protection against malicious user code freeing
the same buffer at the same time which could cause
a crash. Cannot happen under normal use.

Change-Id: I9461229401aaa7f3b5b2477960f79d4d1bd17fee
Signed-off-by: Todd Kjos <tkjos@google.com>
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h [new file with mode: 0644]

index e872bf79fd0625e55ebd521316685ade3f5017b4..48d2119a25dd8ea3be81cafe86b0af6e3bc0d2da 100644 (file)
@@ -2024,8 +2024,8 @@ static int binder_thread_write(struct binder_proc *proc,
                                return -EFAULT;
                        ptr += sizeof(binder_uintptr_t);
 
-                       buffer = binder_alloc_buffer_lookup(&proc->alloc,
-                                                           data_ptr);
+                       buffer = binder_alloc_prepare_to_free(&proc->alloc,
+                                                             data_ptr);
                        if (buffer == NULL) {
                                binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
                                        proc->pid, thread->pid, (u64)data_ptr);
index f6aca5f1ae7d8e2cc25f26ec243b30fe26242e12..af86433accea6f919feff0b4454ed66b9c8be8f5 100644 (file)
@@ -116,7 +116,7 @@ static void binder_insert_allocated_buffer_locked(
        rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
 }
 
-static struct binder_buffer *binder_alloc_buffer_lookup_locked(
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
                struct binder_alloc *alloc,
                uintptr_t user_ptr)
 {
@@ -135,8 +135,19 @@ static struct binder_buffer *binder_alloc_buffer_lookup_locked(
                        n = n->rb_left;
                else if (kern_ptr > buffer)
                        n = n->rb_right;
-               else
+               else {
+                       /*
+                        * Guard against user threads attempting to
+                        * free the buffer twice
+                        */
+                       if (buffer->free_in_progress) {
+                               pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+                                      alloc->pid, current->pid, (u64)user_ptr);
+                               return NULL;
+                       }
+                       buffer->free_in_progress = 1;
                        return buffer;
+               }
        }
        return NULL;
 }
@@ -152,13 +163,13 @@ static struct binder_buffer *binder_alloc_buffer_lookup_locked(
  *
  * Return:     Pointer to buffer or NULL
  */
-struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc,
-                                                uintptr_t user_ptr)
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+                                                  uintptr_t user_ptr)
 {
        struct binder_buffer *buffer;
 
        mutex_lock(&alloc->mutex);
-       buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr);
+       buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
        mutex_unlock(&alloc->mutex);
        return buffer;
 }
@@ -358,6 +369,7 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
 
        rb_erase(best_fit, &alloc->free_buffers);
        buffer->free = 0;
+       buffer->free_in_progress = 0;
        binder_insert_allocated_buffer_locked(alloc, buffer);
        if (buffer_size != size) {
                struct binder_buffer *new_buffer = (void *)buffer->data + size;
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644 (file)
index 0000000..088e4ff
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry:              entry alloc->buffers
+ * @rb_node:            node for allocated_buffers/free_buffers rb trees
+ * @free:               true if buffer is free
+ * @allow_user_free:    describe the second member of struct blah,
+ * @async_transaction:  describe the second member of struct blah,
+ * @debug_id:           describe the second member of struct blah,
+ * @transaction:        describe the second member of struct blah,
+ * @target_node:        describe the second member of struct blah,
+ * @data_size:          describe the second member of struct blah,
+ * @offsets_size:       describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i              describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+       struct list_head entry; /* free and allocated entries by address */
+       struct rb_node rb_node; /* free entry by size or allocated entry */
+                               /* by address */
+       unsigned free:1;
+       unsigned allow_user_free:1;
+       unsigned async_transaction:1;
+       unsigned free_in_progress:1;
+       unsigned debug_id:28;
+
+       struct binder_transaction *transaction;
+
+       struct binder_node *target_node;
+       size_t data_size;
+       size_t offsets_size;
+       size_t extra_buffers_size;
+       uint8_t data[0];
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma:                vm_area_struct passed to mmap_handler
+ *                      (invarient after mmap)
+ * @tsk:                tid for task that called init for this proc
+ *                      (invariant after init)
+ * @vma_vm_mm:          copy of vma->vm_mm (invarient after mmap)
+ * @buffer:             base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers:            list of all buffers for this proc
+ * @free_buffers:       rb tree of buffers available for allocation
+ *                      sorted by size
+ * @allocated_buffers:  rb tree of allocated buffers sorted by address
+ * @free_async_space:   VA space available for async buffers. This is
+ *                      initialized at mmap time to 1/2 the full VA space
+ * @pages:              array of physical page addresses for each
+ *                      page of mmap'd space
+ * @buffer_size:        size of address space specified via mmap
+ * @pid:                pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+       struct mutex mutex;
+       struct task_struct *tsk;
+       struct vm_area_struct *vma;
+       struct mm_struct *vma_vm_mm;
+       void *buffer;
+       ptrdiff_t user_buffer_offset;
+       struct list_head buffers;
+       struct rb_root free_buffers;
+       struct rb_root allocated_buffers;
+       size_t free_async_space;
+       struct page **pages;
+       size_t buffer_size;
+       uint32_t buffer_free;
+       int pid;
+};
+
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+                                                 size_t data_size,
+                                                 size_t offsets_size,
+                                                 size_t extra_buffers_size,
+                                                 int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+                            uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+                                 struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+                                    struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+                                        struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc:     binder_alloc for this proc
+ *
+ * Return:     the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+       size_t free_async_space;
+
+       mutex_lock(&alloc->mutex);
+       free_async_space = alloc->free_async_space;
+       mutex_unlock(&alloc->mutex);
+       return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc:     binder_alloc for this proc
+ *
+ * Return:     the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+       /*
+        * user_buffer_offset is constant if vma is set and
+        * undefined if vma is not set. It is possible to
+        * get here with !alloc->vma if the target process
+        * is dying while a transaction is being initiated.
+        * Returning the old value is ok in this case and
+        * the transaction will fail.
+        */
+       return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+