3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include "binder_alloc.h"
31 #include "binder_trace.h"
33 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
36 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
40 static uint32_t binder_alloc_debug_mask
;
42 module_param_named(debug_mask
, binder_alloc_debug_mask
,
43 uint
, S_IWUSR
| S_IRUGO
);
45 #define binder_alloc_debug(mask, x...) \
47 if (binder_alloc_debug_mask & mask) \
51 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
53 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
56 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
58 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
61 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
62 struct binder_buffer
*buffer
)
64 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
65 return alloc
->buffer
+
66 alloc
->buffer_size
- (void *)buffer
->data
;
67 return (size_t)binder_buffer_next(buffer
) - (size_t)buffer
->data
;
70 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
71 struct binder_buffer
*new_buffer
)
73 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
74 struct rb_node
*parent
= NULL
;
75 struct binder_buffer
*buffer
;
77 size_t new_buffer_size
;
79 BUG_ON(!new_buffer
->free
);
81 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
83 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
84 "%d: add free buffer, size %zd, at %pK\n",
85 alloc
->pid
, new_buffer_size
, new_buffer
);
89 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
90 BUG_ON(!buffer
->free
);
92 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
94 if (new_buffer_size
< buffer_size
)
97 p
= &parent
->rb_right
;
99 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
100 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
103 static void binder_insert_allocated_buffer_locked(
104 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
106 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
107 struct rb_node
*parent
= NULL
;
108 struct binder_buffer
*buffer
;
110 BUG_ON(new_buffer
->free
);
114 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
115 BUG_ON(buffer
->free
);
117 if (new_buffer
< buffer
)
118 p
= &parent
->rb_left
;
119 else if (new_buffer
> buffer
)
120 p
= &parent
->rb_right
;
124 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
125 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
128 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
129 struct binder_alloc
*alloc
,
132 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
133 struct binder_buffer
*buffer
;
134 struct binder_buffer
*kern_ptr
;
136 kern_ptr
= (struct binder_buffer
*)(user_ptr
- alloc
->user_buffer_offset
137 - offsetof(struct binder_buffer
, data
));
140 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
141 BUG_ON(buffer
->free
);
143 if (kern_ptr
< buffer
)
145 else if (kern_ptr
> buffer
)
149 * Guard against user threads attempting to
150 * free the buffer when in use by kernel or
151 * after it's already been freed.
153 if (!buffer
->allow_user_free
)
154 return ERR_PTR(-EPERM
);
155 buffer
->allow_user_free
= 0;
163 * binder_alloc_buffer_lookup() - get buffer given user ptr
164 * @alloc: binder_alloc for this proc
165 * @user_ptr: User pointer to buffer data
167 * Validate userspace pointer to buffer data and return buffer corresponding to
168 * that user pointer. Search the rb tree for buffer that matches user data
171 * Return: Pointer to buffer or NULL
173 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
176 struct binder_buffer
*buffer
;
178 mutex_lock(&alloc
->mutex
);
179 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
180 mutex_unlock(&alloc
->mutex
);
184 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
185 void *start
, void *end
,
186 struct vm_area_struct
*vma
)
189 unsigned long user_page_addr
;
191 struct mm_struct
*mm
;
193 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
194 "%d: %s pages %pK-%pK\n", alloc
->pid
,
195 allocate
? "allocate" : "free", start
, end
);
200 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
205 mm
= get_task_mm(alloc
->tsk
);
208 down_write(&mm
->mmap_sem
);
210 if (vma
&& mm
!= alloc
->vma_vm_mm
) {
211 pr_err("%d: vma mm and task mm mismatch\n",
221 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
226 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
229 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
232 *page
= alloc_page(GFP_KERNEL
| __GFP_HIGHMEM
| __GFP_ZERO
);
234 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
235 alloc
->pid
, page_addr
);
236 goto err_alloc_page_failed
;
238 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
239 PAGE_SIZE
, PAGE_KERNEL
, page
);
240 flush_cache_vmap((unsigned long)page_addr
,
241 (unsigned long)page_addr
+ PAGE_SIZE
);
243 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
244 alloc
->pid
, page_addr
);
245 goto err_map_kernel_failed
;
248 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
249 ret
= vm_insert_page(vma
, user_page_addr
, page
[0]);
251 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
252 alloc
->pid
, user_page_addr
);
253 goto err_vm_insert_page_failed
;
255 /* vm_insert_page does not seem to increment the refcount */
258 up_write(&mm
->mmap_sem
);
264 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
265 page_addr
-= PAGE_SIZE
) {
266 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
268 zap_page_range(vma
, (uintptr_t)page_addr
+
269 alloc
->user_buffer_offset
, PAGE_SIZE
, NULL
);
270 err_vm_insert_page_failed
:
271 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
272 err_map_kernel_failed
:
275 err_alloc_page_failed
:
280 up_write(&mm
->mmap_sem
);
283 return vma
? -ENOMEM
: -ESRCH
;
286 struct binder_buffer
*binder_alloc_new_buf_locked(struct binder_alloc
*alloc
,
289 size_t extra_buffers_size
,
292 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
293 struct binder_buffer
*buffer
;
295 struct rb_node
*best_fit
= NULL
;
298 size_t size
, data_offsets_size
;
301 if (alloc
->vma
== NULL
) {
302 pr_err("%d: binder_alloc_buf, no vma\n",
304 return ERR_PTR(-ESRCH
);
307 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
308 ALIGN(offsets_size
, sizeof(void *));
310 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
311 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
312 "%d: got transaction with invalid size %zd-%zd\n",
313 alloc
->pid
, data_size
, offsets_size
);
314 return ERR_PTR(-EINVAL
);
316 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
317 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
318 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
319 "%d: got transaction with invalid extra_buffers_size %zd\n",
320 alloc
->pid
, extra_buffers_size
);
321 return ERR_PTR(-EINVAL
);
324 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
325 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
326 "%d: binder_alloc_buf size %zd failed, no async space left\n",
328 return ERR_PTR(-ENOSPC
);
332 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
333 BUG_ON(!buffer
->free
);
334 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
336 if (size
< buffer_size
) {
339 } else if (size
> buffer_size
)
346 if (best_fit
== NULL
) {
347 size_t allocated_buffers
= 0;
348 size_t largest_alloc_size
= 0;
349 size_t total_alloc_size
= 0;
350 size_t free_buffers
= 0;
351 size_t largest_free_size
= 0;
352 size_t total_free_size
= 0;
354 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
356 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
357 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
359 total_alloc_size
+= buffer_size
;
360 if (buffer_size
> largest_alloc_size
)
361 largest_alloc_size
= buffer_size
;
363 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
365 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
366 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
368 total_free_size
+= buffer_size
;
369 if (buffer_size
> largest_free_size
)
370 largest_free_size
= buffer_size
;
372 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
374 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
375 total_alloc_size
, allocated_buffers
, largest_alloc_size
,
376 total_free_size
, free_buffers
, largest_free_size
);
377 return ERR_PTR(-ENOSPC
);
380 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
381 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
384 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
385 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
386 alloc
->pid
, size
, buffer
, buffer_size
);
389 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
391 if (size
+ sizeof(struct binder_buffer
) + 4 >= buffer_size
)
392 buffer_size
= size
; /* no room for other buffers */
394 buffer_size
= size
+ sizeof(struct binder_buffer
);
397 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ buffer_size
);
398 if (end_page_addr
> has_page_addr
)
399 end_page_addr
= has_page_addr
;
400 ret
= binder_update_page_range(alloc
, 1,
401 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
, NULL
);
405 rb_erase(best_fit
, &alloc
->free_buffers
);
407 binder_insert_allocated_buffer_locked(alloc
, buffer
);
408 if (buffer_size
!= size
) {
409 struct binder_buffer
*new_buffer
= (void *)buffer
->data
+ size
;
411 list_add(&new_buffer
->entry
, &buffer
->entry
);
412 new_buffer
->free
= 1;
413 binder_insert_free_buffer(alloc
, new_buffer
);
416 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
417 "%d: binder_alloc_buf size %zd got %pK\n",
418 alloc
->pid
, size
, buffer
);
419 buffer
->data_size
= data_size
;
420 buffer
->offsets_size
= offsets_size
;
421 buffer
->async_transaction
= is_async
;
422 buffer
->extra_buffers_size
= extra_buffers_size
;
424 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
425 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
426 "%d: binder_alloc_buf size %zd async free %zd\n",
427 alloc
->pid
, size
, alloc
->free_async_space
);
433 * binder_alloc_new_buf() - Allocate a new binder buffer
434 * @alloc: binder_alloc for this proc
435 * @data_size: size of user data buffer
436 * @offsets_size: user specified buffer offset
437 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
438 * @is_async: buffer for async transaction
440 * Allocate a new buffer given the requested sizes. Returns
441 * the kernel version of the buffer pointer. The size allocated
442 * is the sum of the three given sizes (each rounded up to
443 * pointer-sized boundary)
445 * Return: The allocated buffer or %NULL if error
447 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
450 size_t extra_buffers_size
,
453 struct binder_buffer
*buffer
;
455 mutex_lock(&alloc
->mutex
);
456 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
457 extra_buffers_size
, is_async
);
458 mutex_unlock(&alloc
->mutex
);
462 static void *buffer_start_page(struct binder_buffer
*buffer
)
464 return (void *)((uintptr_t)buffer
& PAGE_MASK
);
467 static void *buffer_end_page(struct binder_buffer
*buffer
)
469 return (void *)(((uintptr_t)(buffer
+ 1) - 1) & PAGE_MASK
);
472 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
473 struct binder_buffer
*buffer
)
475 struct binder_buffer
*prev
, *next
= NULL
;
476 int free_page_end
= 1;
477 int free_page_start
= 1;
479 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
480 prev
= binder_buffer_prev(buffer
);
482 if (buffer_end_page(prev
) == buffer_start_page(buffer
)) {
484 if (buffer_end_page(prev
) == buffer_end_page(buffer
))
486 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
487 "%d: merge free, buffer %pK share page with %pK\n",
488 alloc
->pid
, buffer
, prev
);
491 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
492 next
= binder_buffer_next(buffer
);
493 if (buffer_start_page(next
) == buffer_end_page(buffer
)) {
495 if (buffer_start_page(next
) ==
496 buffer_start_page(buffer
))
498 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
499 "%d: merge free, buffer %pK share page with %pK\n",
500 alloc
->pid
, buffer
, prev
);
503 list_del(&buffer
->entry
);
504 if (free_page_start
|| free_page_end
) {
505 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
506 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
507 alloc
->pid
, buffer
, free_page_start
? "" : " end",
508 free_page_end
? "" : " start", prev
, next
);
509 binder_update_page_range(alloc
, 0, free_page_start
?
510 buffer_start_page(buffer
) : buffer_end_page(buffer
),
511 (free_page_end
? buffer_end_page(buffer
) :
512 buffer_start_page(buffer
)) + PAGE_SIZE
, NULL
);
516 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
517 struct binder_buffer
*buffer
)
519 size_t size
, buffer_size
;
521 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
523 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
524 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
525 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
527 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
528 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
529 alloc
->pid
, buffer
, size
, buffer_size
);
531 BUG_ON(buffer
->free
);
532 BUG_ON(size
> buffer_size
);
533 BUG_ON(buffer
->transaction
!= NULL
);
534 BUG_ON((void *)buffer
< alloc
->buffer
);
535 BUG_ON((void *)buffer
> alloc
->buffer
+ alloc
->buffer_size
);
537 if (buffer
->async_transaction
) {
538 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
540 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
541 "%d: binder_free_buf size %zd async free %zd\n",
542 alloc
->pid
, size
, alloc
->free_async_space
);
545 binder_update_page_range(alloc
, 0,
546 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
547 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
),
550 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
552 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
553 struct binder_buffer
*next
= binder_buffer_next(buffer
);
556 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
557 binder_delete_free_buffer(alloc
, next
);
560 if (alloc
->buffers
.next
!= &buffer
->entry
) {
561 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
564 binder_delete_free_buffer(alloc
, buffer
);
565 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
569 binder_insert_free_buffer(alloc
, buffer
);
573 * binder_alloc_free_buf() - free a binder buffer
574 * @alloc: binder_alloc for this proc
575 * @buffer: kernel pointer to buffer
577 * Free the buffer allocated via binder_alloc_new_buffer()
579 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
580 struct binder_buffer
*buffer
)
582 mutex_lock(&alloc
->mutex
);
583 binder_free_buf_locked(alloc
, buffer
);
584 mutex_unlock(&alloc
->mutex
);
588 * binder_alloc_mmap_handler() - map virtual address space for proc
589 * @alloc: alloc structure for this proc
590 * @vma: vma passed to mmap()
592 * Called by binder_mmap() to initialize the space specified in
593 * vma for allocating binder buffers
597 * -EBUSY = address space already mapped
598 * -ENOMEM = failed to map memory to given address space
600 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
601 struct vm_area_struct
*vma
)
604 struct vm_struct
*area
;
605 const char *failure_string
;
606 struct binder_buffer
*buffer
;
608 mutex_lock(&binder_alloc_mmap_lock
);
611 failure_string
= "already mapped";
612 goto err_already_mapped
;
615 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
618 failure_string
= "get_vm_area";
619 goto err_get_vm_area_failed
;
621 alloc
->buffer
= area
->addr
;
622 alloc
->user_buffer_offset
=
623 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
624 mutex_unlock(&binder_alloc_mmap_lock
);
626 #ifdef CONFIG_CPU_CACHE_VIPT
627 if (cache_is_vipt_aliasing()) {
629 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
630 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
631 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
633 vma
->vm_start
+= PAGE_SIZE
;
637 alloc
->pages
= kzalloc(sizeof(alloc
->pages
[0]) *
638 ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
),
640 if (alloc
->pages
== NULL
) {
642 failure_string
= "alloc page array";
643 goto err_alloc_pages_failed
;
645 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
647 if (binder_update_page_range(alloc
, 1, alloc
->buffer
,
648 alloc
->buffer
+ PAGE_SIZE
, vma
)) {
650 failure_string
= "alloc small buf";
651 goto err_alloc_small_buf_failed
;
653 buffer
= alloc
->buffer
;
654 INIT_LIST_HEAD(&alloc
->buffers
);
655 list_add(&buffer
->entry
, &alloc
->buffers
);
657 binder_insert_free_buffer(alloc
, buffer
);
658 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
661 alloc
->vma_vm_mm
= vma
->vm_mm
;
665 err_alloc_small_buf_failed
:
668 err_alloc_pages_failed
:
669 mutex_lock(&binder_alloc_mmap_lock
);
670 vfree(alloc
->buffer
);
671 alloc
->buffer
= NULL
;
672 err_get_vm_area_failed
:
674 mutex_unlock(&binder_alloc_mmap_lock
);
675 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
676 alloc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
681 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
684 int buffers
, page_count
;
689 mutex_lock(&alloc
->mutex
);
690 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
691 struct binder_buffer
*buffer
;
693 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
695 /* Transaction should already have been freed */
696 BUG_ON(buffer
->transaction
);
698 binder_free_buf_locked(alloc
, buffer
);
706 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
709 if (!alloc
->pages
[i
])
712 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
713 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
714 "%s: %d: page %d at %pK not freed\n",
715 __func__
, alloc
->pid
, i
, page_addr
);
716 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
717 __free_page(alloc
->pages
[i
]);
721 vfree(alloc
->buffer
);
723 mutex_unlock(&alloc
->mutex
);
725 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
726 "%s: %d buffers %d, pages %d\n",
727 __func__
, alloc
->pid
, buffers
, page_count
);
730 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
731 struct binder_buffer
*buffer
)
733 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
734 prefix
, buffer
->debug_id
, buffer
->data
,
735 buffer
->data_size
, buffer
->offsets_size
,
736 buffer
->extra_buffers_size
,
737 buffer
->transaction
? "active" : "delivered");
741 * binder_alloc_print_allocated() - print buffer info
742 * @m: seq_file for output via seq_printf()
743 * @alloc: binder_alloc for this proc
745 * Prints information about every buffer associated with
746 * the binder_alloc state to the given seq_file
748 void binder_alloc_print_allocated(struct seq_file
*m
,
749 struct binder_alloc
*alloc
)
753 mutex_lock(&alloc
->mutex
);
754 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
755 print_binder_buffer(m
, " buffer",
756 rb_entry(n
, struct binder_buffer
, rb_node
));
757 mutex_unlock(&alloc
->mutex
);
761 * binder_alloc_get_allocated_count() - return count of buffers
762 * @alloc: binder_alloc for this proc
764 * Return: count of allocated buffers
766 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
771 mutex_lock(&alloc
->mutex
);
772 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
774 mutex_unlock(&alloc
->mutex
);
780 * binder_alloc_vma_close() - invalidate address space
781 * @alloc: binder_alloc for this proc
783 * Called from binder_vma_close() when releasing address space.
784 * Clears alloc->vma to prevent new incoming transactions from
785 * allocating more buffers.
787 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
789 WRITE_ONCE(alloc
->vma
, NULL
);
790 WRITE_ONCE(alloc
->vma_vm_mm
, NULL
);
794 * binder_alloc_init() - called by binder_open() for per-proc initialization
795 * @alloc: binder_alloc for this proc
797 * Called from binder_open() to initialize binder_alloc fields for
800 void binder_alloc_init(struct binder_alloc
*alloc
)
802 alloc
->tsk
= current
->group_leader
;
803 alloc
->pid
= current
->group_leader
->pid
;
804 mutex_init(&alloc
->mutex
);