Revert "FROMLIST: android: binder: Move buffer out of area shared with user space"
authorDanny Wood <danwood76@gmail.com>
Thu, 31 Oct 2019 14:35:27 +0000 (14:35 +0000)
committerDanny Wood <danwood76@gmail.com>
Fri, 8 Nov 2019 12:03:13 +0000 (12:03 +0000)
This commit causes the Samsung a5xelte fingerprint blobs to stop working

This reverts commit 35852b611c5af888e0ac979391099fe2035a06be.

Change-Id: I4c9e3c551deb98b793cb6a7de9ef2a14f3a46067

drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c

index a832bac20df70da3b6187617faa06884319b02b1..6a94e4af96e14ea6c94b5c7a3e6fa3daf7f5320b 100644 (file)
@@ -62,9 +62,9 @@ static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
                                       struct binder_buffer *buffer)
 {
        if (list_is_last(&buffer->entry, &alloc->buffers))
-               return (u8 *)alloc->buffer +
-                       alloc->buffer_size - (u8 *)buffer->data;
-       return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+               return alloc->buffer +
+                      alloc->buffer_size - (void *)buffer->data;
+       return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
 }
 
 static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -114,9 +114,9 @@ static void binder_insert_allocated_buffer_locked(
                buffer = rb_entry(parent, struct binder_buffer, rb_node);
                BUG_ON(buffer->free);
 
-               if (new_buffer->data < buffer->data)
+               if (new_buffer < buffer)
                        p = &parent->rb_left;
-               else if (new_buffer->data > buffer->data)
+               else if (new_buffer > buffer)
                        p = &parent->rb_right;
                else
                        BUG();
@@ -131,17 +131,18 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 {
        struct rb_node *n = alloc->allocated_buffers.rb_node;
        struct binder_buffer *buffer;
-       void *kern_ptr;
+       struct binder_buffer *kern_ptr;
 
-       kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+       kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
+               - offsetof(struct binder_buffer, data));
 
        while (n) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                BUG_ON(buffer->free);
 
-               if (kern_ptr < buffer->data)
+               if (kern_ptr < buffer)
                        n = n->rb_left;
-               else if (kern_ptr > buffer->data)
+               else if (kern_ptr > buffer)
                        n = n->rb_right;
                else {
                        /*
@@ -327,9 +328,6 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
                return ERR_PTR(-ENOSPC);
        }
 
-       /* Pad 0-size buffers so they get assigned unique addresses */
-       size = max(size, sizeof(void *));
-
        while (n) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
                BUG_ON(!buffer->free);
@@ -389,9 +387,14 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
 
        has_page_addr =
                (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-       WARN_ON(n && buffer_size != size);
+       if (n == NULL) {
+               if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+                       buffer_size = size; /* no room for other buffers */
+               else
+                       buffer_size = size + sizeof(struct binder_buffer);
+       }
        end_page_addr =
-               (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
        if (end_page_addr > has_page_addr)
                end_page_addr = has_page_addr;
        ret = binder_update_page_range(alloc, 1,
@@ -399,25 +402,17 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
        if (ret)
                return ERR_PTR(ret);
 
+       rb_erase(best_fit, &alloc->free_buffers);
+       buffer->free = 0;
+       binder_insert_allocated_buffer_locked(alloc, buffer);
        if (buffer_size != size) {
-               struct binder_buffer *new_buffer;
+               struct binder_buffer *new_buffer = (void *)buffer->data + size;
 
-               new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-               if (!new_buffer) {
-                       pr_err("%s: %d failed to alloc new buffer struct\n",
-                              __func__, alloc->pid);
-                       goto err_alloc_buf_struct_failed;
-               }
-               new_buffer->data = (u8 *)buffer->data + size;
                list_add(&new_buffer->entry, &buffer->entry);
                new_buffer->free = 1;
                binder_insert_free_buffer(alloc, new_buffer);
        }
 
-       rb_erase(best_fit, &alloc->free_buffers);
-       buffer->free = 0;
-       buffer->allow_user_free = 0;
-       binder_insert_allocated_buffer_locked(alloc, buffer);
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: binder_alloc_buf size %zd got %pK\n",
                      alloc->pid, size, buffer);
@@ -432,12 +427,6 @@ struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
                              alloc->pid, size, alloc->free_async_space);
        }
        return buffer;
-
-err_alloc_buf_struct_failed:
-       binder_update_page_range(alloc, 0,
-                                (void *)PAGE_ALIGN((uintptr_t)buffer->data),
-                                end_page_addr, NULL);
-       return ERR_PTR(-ENOMEM);
 }
 
 /**
@@ -472,59 +461,56 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 
 static void *buffer_start_page(struct binder_buffer *buffer)
 {
-       return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+       return (void *)((uintptr_t)buffer & PAGE_MASK);
 }
 
-static void *prev_buffer_end_page(struct binder_buffer *buffer)
+static void *buffer_end_page(struct binder_buffer *buffer)
 {
-       return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+       return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
                                      struct binder_buffer *buffer)
 {
        struct binder_buffer *prev, *next = NULL;
-       bool to_free = true;
+       int free_page_end = 1;
+       int free_page_start = 1;
+
        BUG_ON(alloc->buffers.next == &buffer->entry);
        prev = binder_buffer_prev(buffer);
        BUG_ON(!prev->free);
-       if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
-               to_free = false;
+       if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+               free_page_start = 0;
+               if (buffer_end_page(prev) == buffer_end_page(buffer))
+                       free_page_end = 0;
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer %pK share page with %pK\n",
-                                  alloc->pid, buffer->data, prev->data);
+                            "%d: merge free, buffer %pK share page with %pK\n",
+                             alloc->pid, buffer, prev);
        }
 
        if (!list_is_last(&buffer->entry, &alloc->buffers)) {
                next = binder_buffer_next(buffer);
-               if (buffer_start_page(next) == buffer_start_page(buffer)) {
-                       to_free = false;
+               if (buffer_start_page(next) == buffer_end_page(buffer)) {
+                       free_page_end = 0;
+                       if (buffer_start_page(next) ==
+                           buffer_start_page(buffer))
+                               free_page_start = 0;
                        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                          "%d: merge free, buffer %pK share page with %pK\n",
-                                          alloc->pid,
-                                          buffer->data,
-                                          next->data);
+                                    "%d: merge free, buffer %pK share page with %pK\n",
+                                     alloc->pid, buffer, prev);
                }
        }
-
-       if (PAGE_ALIGNED(buffer->data)) {
-               binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer start %pK is page aligned\n",
-                                  alloc->pid, buffer->data);
-               to_free = false;
-       }
-
-       if (to_free) {
+       list_del(&buffer->entry);
+       if (free_page_start || free_page_end) {
                binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                  "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
-                                  alloc->pid, buffer->data,
-                                  prev->data, next ? next->data : NULL);
-               binder_update_page_range(alloc, 0, buffer_start_page(buffer),
-                                        buffer_start_page(buffer) + PAGE_SIZE,
-                                        NULL);
+                            "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+                            alloc->pid, buffer, free_page_start ? "" : " end",
+                            free_page_end ? "" : " start", prev, next);
+               binder_update_page_range(alloc, 0, free_page_start ?
+                       buffer_start_page(buffer) : buffer_end_page(buffer),
+                       (free_page_end ? buffer_end_page(buffer) :
+                       buffer_start_page(buffer)) + PAGE_SIZE, NULL);
        }
-       list_del(&buffer->entry);
-       kfree(buffer);
 }
 
 static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -545,8 +531,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
        BUG_ON(buffer->free);
        BUG_ON(size > buffer_size);
        BUG_ON(buffer->transaction != NULL);
-       BUG_ON(buffer->data < alloc->buffer);
-       BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+       BUG_ON((void *)buffer < alloc->buffer);
+       BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
 
        if (buffer->async_transaction) {
                alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -658,14 +644,14 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        }
        alloc->buffer_size = vma->vm_end - vma->vm_start;
 
-       buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-       if (!buffer) {
+       if (binder_update_page_range(alloc, 1, alloc->buffer,
+                                    alloc->buffer + PAGE_SIZE, vma)) {
                ret = -ENOMEM;
-               failure_string = "alloc buffer struct";
-               goto err_alloc_buf_struct_failed;
+               failure_string = "alloc small buf";
+               goto err_alloc_small_buf_failed;
        }
-
-       buffer->data = alloc->buffer;
+       buffer = alloc->buffer;
+       INIT_LIST_HEAD(&alloc->buffers);
        list_add(&buffer->entry, &alloc->buffers);
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
@@ -676,7 +662,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
 
        return 0;
 
-err_alloc_buf_struct_failed:
+err_alloc_small_buf_failed:
        kfree(alloc->pages);
        alloc->pages = NULL;
 err_alloc_pages_failed:
@@ -696,13 +682,14 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 {
        struct rb_node *n;
        int buffers, page_count;
-       struct binder_buffer *buffer;
 
        BUG_ON(alloc->vma);
 
        buffers = 0;
        mutex_lock(&alloc->mutex);
        while ((n = rb_first(&alloc->allocated_buffers))) {
+               struct binder_buffer *buffer;
+
                buffer = rb_entry(n, struct binder_buffer, rb_node);
 
                /* Transaction should already have been freed */
@@ -712,16 +699,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
                buffers++;
        }
 
-       while (!list_empty(&alloc->buffers)) {
-               buffer = list_first_entry(&alloc->buffers,
-                                         struct binder_buffer, entry);
-               WARN_ON(!buffer->free);
-
-               list_del(&buffer->entry);
-               WARN_ON_ONCE(!list_empty(&alloc->buffers));
-               kfree(buffer);
-       }
-
        page_count = 0;
        if (alloc->pages) {
                int i;
@@ -825,6 +802,5 @@ void binder_alloc_init(struct binder_alloc *alloc)
        alloc->tsk = current->group_leader;
        alloc->pid = current->group_leader->pid;
        mutex_init(&alloc->mutex);
-       INIT_LIST_HEAD(&alloc->buffers);
 }
 
index 395d9b56bbf1721c2ab6b64b4ac1eebb836ad5af..6f1e82d8820518a7dfd9875c405e36ddcb2d4dd2 100644 (file)
@@ -56,7 +56,7 @@ struct binder_buffer {
        size_t data_size;
        size_t offsets_size;
        size_t extra_buffers_size;
-       void *data;
+       uint8_t data[0];
 };
 
 /**
index 0bf72079a9da89a3dc127fec17edfe3aa3850513..cc00ab6ee29d228584f7070f6390a5c52a6eff58 100644 (file)
@@ -105,9 +105,8 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
        void *page_addr, *end;
        int page_index;
 
-       end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
-       page_addr = buffer->data;
-       for (; page_addr < end; page_addr += PAGE_SIZE) {
+       end = (void *)PAGE_ALIGN((uintptr_t)buffer + size);
+       for (page_addr = buffer; page_addr < end; page_addr += PAGE_SIZE) {
                page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
                if (!alloc->pages[page_index]) {
                        pr_err("incorrect alloc state at page index %d\n",
@@ -210,7 +209,8 @@ static void binder_selftest_alloc_size(struct binder_alloc *alloc,
         * Only BUFFER_NUM - 1 buffer sizes are adjustable since
         * we need one giant buffer before getting to the last page.
         */
-       back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+       back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]
+               - sizeof(struct binder_buffer) * BUFFER_NUM;
        binder_selftest_free_seq(alloc, front_sizes, seq, 0);
        binder_selftest_free_seq(alloc, back_sizes, seq, 0);
 }
@@ -228,7 +228,8 @@ static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
        prev = index == 0 ? 0 : end_offset[index - 1];
        end = prev;
 
-       BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+       BUILD_BUG_ON((BUFFER_MIN_SIZE + sizeof(struct binder_buffer))
+                    * BUFFER_NUM >= PAGE_SIZE);
 
        for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
                if (align % 2)