BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of physical page addresses for each page of
+ * mmap'd space
+ * @buffer_size: size of address space (could be less than requested)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct page **pages;
+ size_t buffer_size;
+};
+
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
-
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
int ready_threads;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
};
static size_t binder_buffer_size(struct binder_proc *proc,
struct binder_buffer *buffer)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ if (list_is_last(&buffer->entry, &proc->alloc.buffers))
+ return proc->alloc.buffer +
+ proc->alloc.buffer_size -
+ (void *)buffer->data;
return (size_t)list_entry(buffer->entry.next,
struct binder_buffer, entry) - (size_t)buffer->data;
}
static void binder_insert_free_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node **p = &proc->alloc.free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ rb_insert_color(&new_buffer->rb_node, &proc->alloc.free_buffers);
}
static void binder_insert_allocated_buffer(struct binder_proc *proc,
struct binder_buffer *new_buffer)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node **p = &proc->alloc.allocated_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
BUG();
}
rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ rb_insert_color(&new_buffer->rb_node, &proc->alloc.allocated_buffers);
}
static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
uintptr_t user_ptr)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct rb_node *n = proc->alloc.allocated_buffers.rb_node;
struct binder_buffer *buffer;
struct binder_buffer *kern_ptr;
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (struct binder_buffer *)
+ (user_ptr - proc->alloc.user_buffer_offset -
+ offsetof(struct binder_buffer, data));
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
if (mm) {
down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
+ vma = proc->alloc.vma;
+ if (vma && mm != proc->alloc.vma_vm_mm) {
pr_err("%d: vma mm and task mm mismatch\n",
proc->pid);
vma = NULL;
for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
int ret;
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ page = &proc->alloc.pages[
+ (page_addr - proc->alloc.buffer) / PAGE_SIZE];
BUG_ON(*page);
*page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
goto err_map_kernel_failed;
}
user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
+ (uintptr_t)page_addr + proc->alloc.user_buffer_offset;
ret = vm_insert_page(vma, user_page_addr, page[0]);
if (ret) {
pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
free_range:
for (page_addr = end - PAGE_SIZE; page_addr >= start;
page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ page = &proc->alloc.pages[
+ (page_addr - proc->alloc.buffer) / PAGE_SIZE];
if (vma)
zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE, NULL);
+ proc->alloc.user_buffer_offset,
+ PAGE_SIZE, NULL);
err_vm_insert_page_failed:
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
err_map_kernel_failed:
size_t extra_buffers_size,
int is_async)
{
- struct rb_node *n = proc->free_buffers.rb_node;
+ struct rb_node *n = proc->alloc.free_buffers.rb_node;
struct binder_buffer *buffer;
size_t buffer_size;
struct rb_node *best_fit = NULL;
void *end_page_addr;
size_t size, data_offsets_size;
- if (proc->vma == NULL) {
+ if (proc->alloc.vma == NULL) {
pr_err("%d: binder_alloc_buf, no vma\n",
proc->pid);
return NULL;
return NULL;
}
if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ proc->alloc.free_async_space <
+ size + sizeof(struct binder_buffer)) {
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
proc->pid, size);
(void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
return NULL;
- rb_erase(best_fit, &proc->free_buffers);
+ rb_erase(best_fit, &proc->alloc.free_buffers);
buffer->free = 0;
binder_insert_allocated_buffer(proc, buffer);
if (buffer_size != size) {
buffer->extra_buffers_size = extra_buffers_size;
buffer->async_transaction = is_async;
if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
+ proc->alloc.free_async_space -=
+ size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ proc->pid, size, proc->alloc.free_async_space);
}
return buffer;
int free_page_end = 1;
int free_page_start = 1;
- BUG_ON(proc->buffers.next == &buffer->entry);
+ BUG_ON(proc->alloc.buffers.next == &buffer->entry);
prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
BUG_ON(!prev->free);
if (buffer_end_page(prev) == buffer_start_page(buffer)) {
proc->pid, buffer, prev);
}
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (buffer_start_page(next) == buffer_end_page(buffer)) {
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+ BUG_ON((void *)buffer < proc->alloc.buffer);
+ BUG_ON((void *)buffer > proc->alloc.buffer + proc->alloc.buffer_size);
if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
+ proc->alloc.free_async_space +=
+ size + sizeof(struct binder_buffer);
binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
"%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ proc->pid, size, proc->alloc.free_async_space);
}
binder_update_page_range(proc, 0,
(void *)PAGE_ALIGN((uintptr_t)buffer->data),
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ rb_erase(&buffer->rb_node, &proc->alloc.allocated_buffers);
buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ if (!list_is_last(&buffer->entry, &proc->alloc.buffers)) {
struct binder_buffer *next = list_entry(buffer->entry.next,
struct binder_buffer, entry);
if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
+ rb_erase(&next->rb_node, &proc->alloc.free_buffers);
binder_delete_free_buffer(proc, next);
}
}
- if (proc->buffers.next != &buffer->entry) {
+ if (proc->alloc.buffers.next != &buffer->entry) {
struct binder_buffer *prev = list_entry(buffer->entry.prev,
struct binder_buffer, entry);
if (prev->free) {
binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
+ rb_erase(&prev->rb_node, &proc->alloc.free_buffers);
buffer = prev;
}
}
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ proc->alloc.user_buffer_offset;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer - target_proc->alloc.user_buffer_offset;
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ target_proc->alloc.user_buffer_offset);
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ target_proc->alloc.user_buffer_offset;
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)(
(uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ proc->alloc.user_buffer_offset);
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ proc->alloc.vma = NULL;
+ proc->alloc.vma_vm_mm = NULL;
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
+ if (proc->alloc.buffer) {
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
failure_string = "get_vm_area";
goto err_get_vm_area_failed;
}
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+ proc->alloc.buffer = area->addr;
+ proc->alloc.user_buffer_offset =
+ vma->vm_start - (uintptr_t)proc->alloc.buffer;
mutex_unlock(&binder_mmap_lock);
#ifdef CONFIG_CPU_CACHE_VIPT
if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)proc->alloc.buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__,
+ proc->pid, vma->vm_start,
+ vma->vm_end, proc->alloc.buffer);
vma->vm_start += PAGE_SIZE;
}
}
#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
+ proc->alloc.pages =
+ kzalloc(sizeof(proc->alloc.pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (proc->alloc.pages == NULL) {
ret = -ENOMEM;
failure_string = "alloc page array";
goto err_alloc_pages_failed;
}
- proc->buffer_size = vma->vm_end - vma->vm_start;
+ proc->alloc.buffer_size = vma->vm_end - vma->vm_start;
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ if (binder_update_page_range(proc, 1, proc->alloc.buffer,
+ proc->alloc.buffer + PAGE_SIZE, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
+ buffer = proc->alloc.buffer;
+ INIT_LIST_HEAD(&proc->alloc.buffers);
+ list_add(&buffer->entry, &proc->alloc.buffers);
buffer->free = 1;
binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
+ proc->alloc.free_async_space = proc->alloc.buffer_size / 2;
barrier();
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
+ proc->alloc.vma = vma;
+ proc->alloc.vma_vm_mm = vma->vm_mm;
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n",
+ * proc->pid, vma->vm_start, vma->vm_end, proc->alloc.buffer);
+ */
return 0;
err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
+ kfree(proc->alloc.pages);
+ proc->alloc.pages = NULL;
err_alloc_pages_failed:
mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
+ vfree(proc->alloc.buffer);
+ proc->alloc.buffer = NULL;
err_get_vm_area_failed:
err_already_mapped:
mutex_unlock(&binder_mmap_lock);
int threads, nodes, incoming_refs, outgoing_refs, buffers,
active_transactions, page_count;
- BUG_ON(proc->vma);
+ BUG_ON(proc->alloc.vma);
BUG_ON(proc->files);
hlist_del(&proc->proc_node);
binder_release_work(&proc->delivered_death);
buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
+ while ((n = rb_first(&proc->alloc.allocated_buffers))) {
struct binder_buffer *buffer;
buffer = rb_entry(n, struct binder_buffer, rb_node);
binder_stats_deleted(BINDER_STAT_PROC);
page_count = 0;
- if (proc->pages) {
+ if (proc->alloc.pages) {
int i;
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ for (i = 0; i < proc->alloc.buffer_size / PAGE_SIZE; i++) {
void *page_addr;
- if (!proc->pages[i])
+ if (!proc->alloc.pages[i])
continue;
- page_addr = proc->buffer + i * PAGE_SIZE;
+ page_addr = proc->alloc.buffer + i * PAGE_SIZE;
binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d at %p not freed\n",
__func__, proc->pid, i, page_addr);
unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
+ __free_page(proc->alloc.pages[i]);
page_count++;
}
- kfree(proc->pages);
- vfree(proc->buffer);
+ kfree(proc->alloc.pages);
+ vfree(proc->alloc.buffer);
}
put_task_struct(proc->tsk);
print_binder_ref(m, rb_entry(n, struct binder_ref,
rb_node_desc));
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->alloc.allocated_buffers);
+ n != NULL; n = rb_next(n))
print_binder_buffer(m, " buffer",
rb_entry(n, struct binder_buffer, rb_node));
list_for_each_entry(w, &proc->todo, entry)
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ proc->ready_threads, proc->alloc.free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ for (n = rb_first(&proc->alloc.allocated_buffers);
+ n != NULL; n = rb_next(n))
count++;
seq_printf(m, " buffers: %d\n", count);