Revert "FROMLIST: android: binder: Move buffer out of area shared with user space"
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / android / binder_alloc.c
1 /* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
22 #include <linux/mm.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include "binder_alloc.h"
31 #include "binder_trace.h"
32
33 static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35 enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39 };
40 static uint32_t binder_alloc_debug_mask;
41
42 module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, S_IWUSR | S_IRUGO);
44
45 #define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
51 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52 {
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54 }
55
56 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57 {
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59 }
60
61 static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63 {
64 if (list_is_last(&buffer->entry, &alloc->buffers))
65 return alloc->buffer +
66 alloc->buffer_size - (void *)buffer->data;
67 return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
68 }
69
70 static void binder_insert_free_buffer(struct binder_alloc *alloc,
71 struct binder_buffer *new_buffer)
72 {
73 struct rb_node **p = &alloc->free_buffers.rb_node;
74 struct rb_node *parent = NULL;
75 struct binder_buffer *buffer;
76 size_t buffer_size;
77 size_t new_buffer_size;
78
79 BUG_ON(!new_buffer->free);
80
81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
82
83 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
84 "%d: add free buffer, size %zd, at %pK\n",
85 alloc->pid, new_buffer_size, new_buffer);
86
87 while (*p) {
88 parent = *p;
89 buffer = rb_entry(parent, struct binder_buffer, rb_node);
90 BUG_ON(!buffer->free);
91
92 buffer_size = binder_alloc_buffer_size(alloc, buffer);
93
94 if (new_buffer_size < buffer_size)
95 p = &parent->rb_left;
96 else
97 p = &parent->rb_right;
98 }
99 rb_link_node(&new_buffer->rb_node, parent, p);
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
101 }
102
103 static void binder_insert_allocated_buffer_locked(
104 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
105 {
106 struct rb_node **p = &alloc->allocated_buffers.rb_node;
107 struct rb_node *parent = NULL;
108 struct binder_buffer *buffer;
109
110 BUG_ON(new_buffer->free);
111
112 while (*p) {
113 parent = *p;
114 buffer = rb_entry(parent, struct binder_buffer, rb_node);
115 BUG_ON(buffer->free);
116
117 if (new_buffer < buffer)
118 p = &parent->rb_left;
119 else if (new_buffer > buffer)
120 p = &parent->rb_right;
121 else
122 BUG();
123 }
124 rb_link_node(&new_buffer->rb_node, parent, p);
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
126 }
127
128 static struct binder_buffer *binder_alloc_prepare_to_free_locked(
129 struct binder_alloc *alloc,
130 uintptr_t user_ptr)
131 {
132 struct rb_node *n = alloc->allocated_buffers.rb_node;
133 struct binder_buffer *buffer;
134 struct binder_buffer *kern_ptr;
135
136 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
137 - offsetof(struct binder_buffer, data));
138
139 while (n) {
140 buffer = rb_entry(n, struct binder_buffer, rb_node);
141 BUG_ON(buffer->free);
142
143 if (kern_ptr < buffer)
144 n = n->rb_left;
145 else if (kern_ptr > buffer)
146 n = n->rb_right;
147 else {
148 /*
149 * Guard against user threads attempting to
150 * free the buffer when in use by kernel or
151 * after it's already been freed.
152 */
153 if (!buffer->allow_user_free)
154 return ERR_PTR(-EPERM);
155 buffer->allow_user_free = 0;
156 return buffer;
157 }
158 }
159 return NULL;
160 }
161
162 /**
163 * binder_alloc_buffer_lookup() - get buffer given user ptr
164 * @alloc: binder_alloc for this proc
165 * @user_ptr: User pointer to buffer data
166 *
167 * Validate userspace pointer to buffer data and return buffer corresponding to
168 * that user pointer. Search the rb tree for buffer that matches user data
169 * pointer.
170 *
171 * Return: Pointer to buffer or NULL
172 */
173 struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
174 uintptr_t user_ptr)
175 {
176 struct binder_buffer *buffer;
177
178 mutex_lock(&alloc->mutex);
179 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
180 mutex_unlock(&alloc->mutex);
181 return buffer;
182 }
183
184 static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
185 void *start, void *end,
186 struct vm_area_struct *vma)
187 {
188 void *page_addr;
189 unsigned long user_page_addr;
190 struct page **page;
191 struct mm_struct *mm;
192
193 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
194 "%d: %s pages %pK-%pK\n", alloc->pid,
195 allocate ? "allocate" : "free", start, end);
196
197 if (end <= start)
198 return 0;
199
200 trace_binder_update_page_range(alloc, allocate, start, end);
201
202 if (vma)
203 mm = NULL;
204 else
205 mm = get_task_mm(alloc->tsk);
206
207 if (mm) {
208 down_write(&mm->mmap_sem);
209 vma = alloc->vma;
210 if (vma && mm != alloc->vma_vm_mm) {
211 pr_err("%d: vma mm and task mm mismatch\n",
212 alloc->pid);
213 vma = NULL;
214 }
215 }
216
217 if (allocate == 0)
218 goto free_range;
219
220 if (vma == NULL) {
221 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
222 alloc->pid);
223 goto err_no_vma;
224 }
225
226 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
227 int ret;
228
229 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
230
231 BUG_ON(*page);
232 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
233 if (*page == NULL) {
234 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
235 alloc->pid, page_addr);
236 goto err_alloc_page_failed;
237 }
238 ret = map_kernel_range_noflush((unsigned long)page_addr,
239 PAGE_SIZE, PAGE_KERNEL, page);
240 flush_cache_vmap((unsigned long)page_addr,
241 (unsigned long)page_addr + PAGE_SIZE);
242 if (ret != 1) {
243 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
244 alloc->pid, page_addr);
245 goto err_map_kernel_failed;
246 }
247 user_page_addr =
248 (uintptr_t)page_addr + alloc->user_buffer_offset;
249 ret = vm_insert_page(vma, user_page_addr, page[0]);
250 if (ret) {
251 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
252 alloc->pid, user_page_addr);
253 goto err_vm_insert_page_failed;
254 }
255 /* vm_insert_page does not seem to increment the refcount */
256 }
257 if (mm) {
258 up_write(&mm->mmap_sem);
259 mmput(mm);
260 }
261 return 0;
262
263 free_range:
264 for (page_addr = end - PAGE_SIZE; page_addr >= start;
265 page_addr -= PAGE_SIZE) {
266 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
267 if (vma)
268 zap_page_range(vma, (uintptr_t)page_addr +
269 alloc->user_buffer_offset, PAGE_SIZE, NULL);
270 err_vm_insert_page_failed:
271 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
272 err_map_kernel_failed:
273 __free_page(*page);
274 *page = NULL;
275 err_alloc_page_failed:
276 ;
277 }
278 err_no_vma:
279 if (mm) {
280 up_write(&mm->mmap_sem);
281 mmput(mm);
282 }
283 return vma ? -ENOMEM : -ESRCH;
284 }
285
286 struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
287 size_t data_size,
288 size_t offsets_size,
289 size_t extra_buffers_size,
290 int is_async)
291 {
292 struct rb_node *n = alloc->free_buffers.rb_node;
293 struct binder_buffer *buffer;
294 size_t buffer_size;
295 struct rb_node *best_fit = NULL;
296 void *has_page_addr;
297 void *end_page_addr;
298 size_t size, data_offsets_size;
299 int ret;
300
301 if (alloc->vma == NULL) {
302 pr_err("%d: binder_alloc_buf, no vma\n",
303 alloc->pid);
304 return ERR_PTR(-ESRCH);
305 }
306
307 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
308 ALIGN(offsets_size, sizeof(void *));
309
310 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
311 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
312 "%d: got transaction with invalid size %zd-%zd\n",
313 alloc->pid, data_size, offsets_size);
314 return ERR_PTR(-EINVAL);
315 }
316 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
317 if (size < data_offsets_size || size < extra_buffers_size) {
318 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
319 "%d: got transaction with invalid extra_buffers_size %zd\n",
320 alloc->pid, extra_buffers_size);
321 return ERR_PTR(-EINVAL);
322 }
323 if (is_async &&
324 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
325 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
326 "%d: binder_alloc_buf size %zd failed, no async space left\n",
327 alloc->pid, size);
328 return ERR_PTR(-ENOSPC);
329 }
330
331 while (n) {
332 buffer = rb_entry(n, struct binder_buffer, rb_node);
333 BUG_ON(!buffer->free);
334 buffer_size = binder_alloc_buffer_size(alloc, buffer);
335
336 if (size < buffer_size) {
337 best_fit = n;
338 n = n->rb_left;
339 } else if (size > buffer_size)
340 n = n->rb_right;
341 else {
342 best_fit = n;
343 break;
344 }
345 }
346 if (best_fit == NULL) {
347 size_t allocated_buffers = 0;
348 size_t largest_alloc_size = 0;
349 size_t total_alloc_size = 0;
350 size_t free_buffers = 0;
351 size_t largest_free_size = 0;
352 size_t total_free_size = 0;
353
354 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
355 n = rb_next(n)) {
356 buffer = rb_entry(n, struct binder_buffer, rb_node);
357 buffer_size = binder_alloc_buffer_size(alloc, buffer);
358 allocated_buffers++;
359 total_alloc_size += buffer_size;
360 if (buffer_size > largest_alloc_size)
361 largest_alloc_size = buffer_size;
362 }
363 for (n = rb_first(&alloc->free_buffers); n != NULL;
364 n = rb_next(n)) {
365 buffer = rb_entry(n, struct binder_buffer, rb_node);
366 buffer_size = binder_alloc_buffer_size(alloc, buffer);
367 free_buffers++;
368 total_free_size += buffer_size;
369 if (buffer_size > largest_free_size)
370 largest_free_size = buffer_size;
371 }
372 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
373 alloc->pid, size);
374 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
375 total_alloc_size, allocated_buffers, largest_alloc_size,
376 total_free_size, free_buffers, largest_free_size);
377 return ERR_PTR(-ENOSPC);
378 }
379 if (n == NULL) {
380 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
381 buffer_size = binder_alloc_buffer_size(alloc, buffer);
382 }
383
384 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
385 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
386 alloc->pid, size, buffer, buffer_size);
387
388 has_page_addr =
389 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
390 if (n == NULL) {
391 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
392 buffer_size = size; /* no room for other buffers */
393 else
394 buffer_size = size + sizeof(struct binder_buffer);
395 }
396 end_page_addr =
397 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
398 if (end_page_addr > has_page_addr)
399 end_page_addr = has_page_addr;
400 ret = binder_update_page_range(alloc, 1,
401 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
402 if (ret)
403 return ERR_PTR(ret);
404
405 rb_erase(best_fit, &alloc->free_buffers);
406 buffer->free = 0;
407 binder_insert_allocated_buffer_locked(alloc, buffer);
408 if (buffer_size != size) {
409 struct binder_buffer *new_buffer = (void *)buffer->data + size;
410
411 list_add(&new_buffer->entry, &buffer->entry);
412 new_buffer->free = 1;
413 binder_insert_free_buffer(alloc, new_buffer);
414 }
415
416 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
417 "%d: binder_alloc_buf size %zd got %pK\n",
418 alloc->pid, size, buffer);
419 buffer->data_size = data_size;
420 buffer->offsets_size = offsets_size;
421 buffer->async_transaction = is_async;
422 buffer->extra_buffers_size = extra_buffers_size;
423 if (is_async) {
424 alloc->free_async_space -= size + sizeof(struct binder_buffer);
425 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
426 "%d: binder_alloc_buf size %zd async free %zd\n",
427 alloc->pid, size, alloc->free_async_space);
428 }
429 return buffer;
430 }
431
432 /**
433 * binder_alloc_new_buf() - Allocate a new binder buffer
434 * @alloc: binder_alloc for this proc
435 * @data_size: size of user data buffer
436 * @offsets_size: user specified buffer offset
437 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
438 * @is_async: buffer for async transaction
439 *
440 * Allocate a new buffer given the requested sizes. Returns
441 * the kernel version of the buffer pointer. The size allocated
442 * is the sum of the three given sizes (each rounded up to
443 * pointer-sized boundary)
444 *
445 * Return: The allocated buffer or %NULL if error
446 */
447 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
448 size_t data_size,
449 size_t offsets_size,
450 size_t extra_buffers_size,
451 int is_async)
452 {
453 struct binder_buffer *buffer;
454
455 mutex_lock(&alloc->mutex);
456 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
457 extra_buffers_size, is_async);
458 mutex_unlock(&alloc->mutex);
459 return buffer;
460 }
461
462 static void *buffer_start_page(struct binder_buffer *buffer)
463 {
464 return (void *)((uintptr_t)buffer & PAGE_MASK);
465 }
466
467 static void *buffer_end_page(struct binder_buffer *buffer)
468 {
469 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
470 }
471
472 static void binder_delete_free_buffer(struct binder_alloc *alloc,
473 struct binder_buffer *buffer)
474 {
475 struct binder_buffer *prev, *next = NULL;
476 int free_page_end = 1;
477 int free_page_start = 1;
478
479 BUG_ON(alloc->buffers.next == &buffer->entry);
480 prev = binder_buffer_prev(buffer);
481 BUG_ON(!prev->free);
482 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
483 free_page_start = 0;
484 if (buffer_end_page(prev) == buffer_end_page(buffer))
485 free_page_end = 0;
486 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
487 "%d: merge free, buffer %pK share page with %pK\n",
488 alloc->pid, buffer, prev);
489 }
490
491 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
492 next = binder_buffer_next(buffer);
493 if (buffer_start_page(next) == buffer_end_page(buffer)) {
494 free_page_end = 0;
495 if (buffer_start_page(next) ==
496 buffer_start_page(buffer))
497 free_page_start = 0;
498 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
499 "%d: merge free, buffer %pK share page with %pK\n",
500 alloc->pid, buffer, prev);
501 }
502 }
503 list_del(&buffer->entry);
504 if (free_page_start || free_page_end) {
505 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
506 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
507 alloc->pid, buffer, free_page_start ? "" : " end",
508 free_page_end ? "" : " start", prev, next);
509 binder_update_page_range(alloc, 0, free_page_start ?
510 buffer_start_page(buffer) : buffer_end_page(buffer),
511 (free_page_end ? buffer_end_page(buffer) :
512 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
513 }
514 }
515
516 static void binder_free_buf_locked(struct binder_alloc *alloc,
517 struct binder_buffer *buffer)
518 {
519 size_t size, buffer_size;
520
521 buffer_size = binder_alloc_buffer_size(alloc, buffer);
522
523 size = ALIGN(buffer->data_size, sizeof(void *)) +
524 ALIGN(buffer->offsets_size, sizeof(void *)) +
525 ALIGN(buffer->extra_buffers_size, sizeof(void *));
526
527 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
528 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
529 alloc->pid, buffer, size, buffer_size);
530
531 BUG_ON(buffer->free);
532 BUG_ON(size > buffer_size);
533 BUG_ON(buffer->transaction != NULL);
534 BUG_ON((void *)buffer < alloc->buffer);
535 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
536
537 if (buffer->async_transaction) {
538 alloc->free_async_space += size + sizeof(struct binder_buffer);
539
540 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
541 "%d: binder_free_buf size %zd async free %zd\n",
542 alloc->pid, size, alloc->free_async_space);
543 }
544
545 binder_update_page_range(alloc, 0,
546 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
547 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
548 NULL);
549
550 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
551 buffer->free = 1;
552 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
553 struct binder_buffer *next = binder_buffer_next(buffer);
554
555 if (next->free) {
556 rb_erase(&next->rb_node, &alloc->free_buffers);
557 binder_delete_free_buffer(alloc, next);
558 }
559 }
560 if (alloc->buffers.next != &buffer->entry) {
561 struct binder_buffer *prev = binder_buffer_prev(buffer);
562
563 if (prev->free) {
564 binder_delete_free_buffer(alloc, buffer);
565 rb_erase(&prev->rb_node, &alloc->free_buffers);
566 buffer = prev;
567 }
568 }
569 binder_insert_free_buffer(alloc, buffer);
570 }
571
572 /**
573 * binder_alloc_free_buf() - free a binder buffer
574 * @alloc: binder_alloc for this proc
575 * @buffer: kernel pointer to buffer
576 *
577 * Free the buffer allocated via binder_alloc_new_buffer()
578 */
579 void binder_alloc_free_buf(struct binder_alloc *alloc,
580 struct binder_buffer *buffer)
581 {
582 mutex_lock(&alloc->mutex);
583 binder_free_buf_locked(alloc, buffer);
584 mutex_unlock(&alloc->mutex);
585 }
586
587 /**
588 * binder_alloc_mmap_handler() - map virtual address space for proc
589 * @alloc: alloc structure for this proc
590 * @vma: vma passed to mmap()
591 *
592 * Called by binder_mmap() to initialize the space specified in
593 * vma for allocating binder buffers
594 *
595 * Return:
596 * 0 = success
597 * -EBUSY = address space already mapped
598 * -ENOMEM = failed to map memory to given address space
599 */
600 int binder_alloc_mmap_handler(struct binder_alloc *alloc,
601 struct vm_area_struct *vma)
602 {
603 int ret;
604 struct vm_struct *area;
605 const char *failure_string;
606 struct binder_buffer *buffer;
607
608 mutex_lock(&binder_alloc_mmap_lock);
609 if (alloc->buffer) {
610 ret = -EBUSY;
611 failure_string = "already mapped";
612 goto err_already_mapped;
613 }
614
615 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
616 if (area == NULL) {
617 ret = -ENOMEM;
618 failure_string = "get_vm_area";
619 goto err_get_vm_area_failed;
620 }
621 alloc->buffer = area->addr;
622 alloc->user_buffer_offset =
623 vma->vm_start - (uintptr_t)alloc->buffer;
624 mutex_unlock(&binder_alloc_mmap_lock);
625
626 #ifdef CONFIG_CPU_CACHE_VIPT
627 if (cache_is_vipt_aliasing()) {
628 while (CACHE_COLOUR(
629 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
630 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
631 alloc->pid, vma->vm_start, vma->vm_end,
632 alloc->buffer);
633 vma->vm_start += PAGE_SIZE;
634 }
635 }
636 #endif
637 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
638 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
639 GFP_KERNEL);
640 if (alloc->pages == NULL) {
641 ret = -ENOMEM;
642 failure_string = "alloc page array";
643 goto err_alloc_pages_failed;
644 }
645 alloc->buffer_size = vma->vm_end - vma->vm_start;
646
647 if (binder_update_page_range(alloc, 1, alloc->buffer,
648 alloc->buffer + PAGE_SIZE, vma)) {
649 ret = -ENOMEM;
650 failure_string = "alloc small buf";
651 goto err_alloc_small_buf_failed;
652 }
653 buffer = alloc->buffer;
654 INIT_LIST_HEAD(&alloc->buffers);
655 list_add(&buffer->entry, &alloc->buffers);
656 buffer->free = 1;
657 binder_insert_free_buffer(alloc, buffer);
658 alloc->free_async_space = alloc->buffer_size / 2;
659 barrier();
660 alloc->vma = vma;
661 alloc->vma_vm_mm = vma->vm_mm;
662
663 return 0;
664
665 err_alloc_small_buf_failed:
666 kfree(alloc->pages);
667 alloc->pages = NULL;
668 err_alloc_pages_failed:
669 mutex_lock(&binder_alloc_mmap_lock);
670 vfree(alloc->buffer);
671 alloc->buffer = NULL;
672 err_get_vm_area_failed:
673 err_already_mapped:
674 mutex_unlock(&binder_alloc_mmap_lock);
675 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
676 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
677 return ret;
678 }
679
680
681 void binder_alloc_deferred_release(struct binder_alloc *alloc)
682 {
683 struct rb_node *n;
684 int buffers, page_count;
685
686 BUG_ON(alloc->vma);
687
688 buffers = 0;
689 mutex_lock(&alloc->mutex);
690 while ((n = rb_first(&alloc->allocated_buffers))) {
691 struct binder_buffer *buffer;
692
693 buffer = rb_entry(n, struct binder_buffer, rb_node);
694
695 /* Transaction should already have been freed */
696 BUG_ON(buffer->transaction);
697
698 binder_free_buf_locked(alloc, buffer);
699 buffers++;
700 }
701
702 page_count = 0;
703 if (alloc->pages) {
704 int i;
705
706 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
707 void *page_addr;
708
709 if (!alloc->pages[i])
710 continue;
711
712 page_addr = alloc->buffer + i * PAGE_SIZE;
713 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
714 "%s: %d: page %d at %pK not freed\n",
715 __func__, alloc->pid, i, page_addr);
716 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
717 __free_page(alloc->pages[i]);
718 page_count++;
719 }
720 kfree(alloc->pages);
721 vfree(alloc->buffer);
722 }
723 mutex_unlock(&alloc->mutex);
724
725 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
726 "%s: %d buffers %d, pages %d\n",
727 __func__, alloc->pid, buffers, page_count);
728 }
729
730 static void print_binder_buffer(struct seq_file *m, const char *prefix,
731 struct binder_buffer *buffer)
732 {
733 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
734 prefix, buffer->debug_id, buffer->data,
735 buffer->data_size, buffer->offsets_size,
736 buffer->extra_buffers_size,
737 buffer->transaction ? "active" : "delivered");
738 }
739
740 /**
741 * binder_alloc_print_allocated() - print buffer info
742 * @m: seq_file for output via seq_printf()
743 * @alloc: binder_alloc for this proc
744 *
745 * Prints information about every buffer associated with
746 * the binder_alloc state to the given seq_file
747 */
748 void binder_alloc_print_allocated(struct seq_file *m,
749 struct binder_alloc *alloc)
750 {
751 struct rb_node *n;
752
753 mutex_lock(&alloc->mutex);
754 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
755 print_binder_buffer(m, " buffer",
756 rb_entry(n, struct binder_buffer, rb_node));
757 mutex_unlock(&alloc->mutex);
758 }
759
760 /**
761 * binder_alloc_get_allocated_count() - return count of buffers
762 * @alloc: binder_alloc for this proc
763 *
764 * Return: count of allocated buffers
765 */
766 int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
767 {
768 struct rb_node *n;
769 int count = 0;
770
771 mutex_lock(&alloc->mutex);
772 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
773 count++;
774 mutex_unlock(&alloc->mutex);
775 return count;
776 }
777
778
779 /**
780 * binder_alloc_vma_close() - invalidate address space
781 * @alloc: binder_alloc for this proc
782 *
783 * Called from binder_vma_close() when releasing address space.
784 * Clears alloc->vma to prevent new incoming transactions from
785 * allocating more buffers.
786 */
787 void binder_alloc_vma_close(struct binder_alloc *alloc)
788 {
789 WRITE_ONCE(alloc->vma, NULL);
790 WRITE_ONCE(alloc->vma_vm_mm, NULL);
791 }
792
793 /**
794 * binder_alloc_init() - called by binder_open() for per-proc initialization
795 * @alloc: binder_alloc for this proc
796 *
797 * Called from binder_open() to initialize binder_alloc fields for
798 * new binder proc
799 */
800 void binder_alloc_init(struct binder_alloc *alloc)
801 {
802 alloc->tsk = current->group_leader;
803 alloc->pid = current->group_leader->pid;
804 mutex_init(&alloc->mutex);
805 }
806