3 * Android IPC Subsystem
5 * Copyright (C) 2007-2017 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <asm/cacheflush.h>
21 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/rtmutex.h>
25 #include <linux/rbtree.h>
26 #include <linux/seq_file.h>
27 #include <linux/vmalloc.h>
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/list_lru.h>
31 #include "binder_alloc.h"
32 #include "binder_trace.h"
34 struct list_lru binder_alloc_lru
;
36 static DEFINE_MUTEX(binder_alloc_mmap_lock
);
39 BINDER_DEBUG_OPEN_CLOSE
= 1U << 1,
40 BINDER_DEBUG_BUFFER_ALLOC
= 1U << 2,
41 BINDER_DEBUG_BUFFER_ALLOC_ASYNC
= 1U << 3,
43 static uint32_t binder_alloc_debug_mask
;
45 module_param_named(debug_mask
, binder_alloc_debug_mask
,
46 uint
, S_IWUSR
| S_IRUGO
);
48 #define binder_alloc_debug(mask, x...) \
50 if (binder_alloc_debug_mask & mask) \
54 static struct binder_buffer
*binder_buffer_next(struct binder_buffer
*buffer
)
56 return list_entry(buffer
->entry
.next
, struct binder_buffer
, entry
);
59 static struct binder_buffer
*binder_buffer_prev(struct binder_buffer
*buffer
)
61 return list_entry(buffer
->entry
.prev
, struct binder_buffer
, entry
);
64 static size_t binder_alloc_buffer_size(struct binder_alloc
*alloc
,
65 struct binder_buffer
*buffer
)
67 if (list_is_last(&buffer
->entry
, &alloc
->buffers
))
68 return (u8
*)alloc
->buffer
+
69 alloc
->buffer_size
- (u8
*)buffer
->data
;
70 return (u8
*)binder_buffer_next(buffer
)->data
- (u8
*)buffer
->data
;
73 static void binder_insert_free_buffer(struct binder_alloc
*alloc
,
74 struct binder_buffer
*new_buffer
)
76 struct rb_node
**p
= &alloc
->free_buffers
.rb_node
;
77 struct rb_node
*parent
= NULL
;
78 struct binder_buffer
*buffer
;
80 size_t new_buffer_size
;
82 BUG_ON(!new_buffer
->free
);
84 new_buffer_size
= binder_alloc_buffer_size(alloc
, new_buffer
);
86 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
87 "%d: add free buffer, size %zd, at %pK\n",
88 alloc
->pid
, new_buffer_size
, new_buffer
);
92 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
93 BUG_ON(!buffer
->free
);
95 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
97 if (new_buffer_size
< buffer_size
)
100 p
= &parent
->rb_right
;
102 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
103 rb_insert_color(&new_buffer
->rb_node
, &alloc
->free_buffers
);
106 static void binder_insert_allocated_buffer_locked(
107 struct binder_alloc
*alloc
, struct binder_buffer
*new_buffer
)
109 struct rb_node
**p
= &alloc
->allocated_buffers
.rb_node
;
110 struct rb_node
*parent
= NULL
;
111 struct binder_buffer
*buffer
;
113 BUG_ON(new_buffer
->free
);
117 buffer
= rb_entry(parent
, struct binder_buffer
, rb_node
);
118 BUG_ON(buffer
->free
);
120 if (new_buffer
->data
< buffer
->data
)
121 p
= &parent
->rb_left
;
122 else if (new_buffer
->data
> buffer
->data
)
123 p
= &parent
->rb_right
;
127 rb_link_node(&new_buffer
->rb_node
, parent
, p
);
128 rb_insert_color(&new_buffer
->rb_node
, &alloc
->allocated_buffers
);
131 static struct binder_buffer
*binder_alloc_prepare_to_free_locked(
132 struct binder_alloc
*alloc
,
135 struct rb_node
*n
= alloc
->allocated_buffers
.rb_node
;
136 struct binder_buffer
*buffer
;
139 kern_ptr
= (void *)(user_ptr
- alloc
->user_buffer_offset
);
142 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
143 BUG_ON(buffer
->free
);
145 if (kern_ptr
< buffer
->data
)
147 else if (kern_ptr
> buffer
->data
)
151 * Guard against user threads attempting to
152 * free the buffer when in use by kernel or
153 * after it's already been freed.
155 if (!buffer
->allow_user_free
)
156 return ERR_PTR(-EPERM
);
157 buffer
->allow_user_free
= 0;
165 * binder_alloc_buffer_lookup() - get buffer given user ptr
166 * @alloc: binder_alloc for this proc
167 * @user_ptr: User pointer to buffer data
169 * Validate userspace pointer to buffer data and return buffer corresponding to
170 * that user pointer. Search the rb tree for buffer that matches user data
173 * Return: Pointer to buffer or NULL
175 struct binder_buffer
*binder_alloc_prepare_to_free(struct binder_alloc
*alloc
,
178 struct binder_buffer
*buffer
;
180 mutex_lock(&alloc
->mutex
);
181 buffer
= binder_alloc_prepare_to_free_locked(alloc
, user_ptr
);
182 mutex_unlock(&alloc
->mutex
);
186 static int binder_update_page_range(struct binder_alloc
*alloc
, int allocate
,
187 void *start
, void *end
)
190 unsigned long user_page_addr
;
191 struct binder_lru_page
*page
;
192 struct vm_area_struct
*vma
= NULL
;
193 struct mm_struct
*mm
= NULL
;
194 bool need_mm
= false;
196 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
197 "%d: %s pages %pK-%pK\n", alloc
->pid
,
198 allocate
? "allocate" : "free", start
, end
);
203 trace_binder_update_page_range(alloc
, allocate
, start
, end
);
208 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
209 page
= &alloc
->pages
[(page_addr
- alloc
->buffer
) / PAGE_SIZE
];
210 if (!page
->page_ptr
) {
216 /* Same as mmget_not_zero() in later kernel versions */
217 if (need_mm
&& atomic_inc_not_zero(&alloc
->vma_vm_mm
->mm_users
))
218 mm
= alloc
->vma_vm_mm
;
221 down_read(&mm
->mmap_sem
);
225 if (!vma
&& need_mm
) {
226 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
231 for (page_addr
= start
; page_addr
< end
; page_addr
+= PAGE_SIZE
) {
236 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
237 page
= &alloc
->pages
[index
];
239 if (page
->page_ptr
) {
240 trace_binder_alloc_lru_start(alloc
, index
);
242 on_lru
= list_lru_del(&binder_alloc_lru
, &page
->lru
);
245 trace_binder_alloc_lru_end(alloc
, index
);
250 goto err_page_ptr_cleared
;
252 trace_binder_alloc_page_start(alloc
, index
);
253 page
->page_ptr
= alloc_page(GFP_KERNEL
|
256 if (!page
->page_ptr
) {
257 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
258 alloc
->pid
, page_addr
);
259 goto err_alloc_page_failed
;
262 INIT_LIST_HEAD(&page
->lru
);
264 ret
= map_kernel_range_noflush((unsigned long)page_addr
,
265 PAGE_SIZE
, PAGE_KERNEL
,
267 flush_cache_vmap((unsigned long)page_addr
,
268 (unsigned long)page_addr
+ PAGE_SIZE
);
270 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
271 alloc
->pid
, page_addr
);
272 goto err_map_kernel_failed
;
275 (uintptr_t)page_addr
+ alloc
->user_buffer_offset
;
276 ret
= vm_insert_page(vma
, user_page_addr
, page
[0].page_ptr
);
278 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
279 alloc
->pid
, user_page_addr
);
280 goto err_vm_insert_page_failed
;
283 if (index
+ 1 > alloc
->pages_high
)
284 alloc
->pages_high
= index
+ 1;
286 trace_binder_alloc_page_end(alloc
, index
);
287 /* vm_insert_page does not seem to increment the refcount */
290 up_read(&mm
->mmap_sem
);
296 for (page_addr
= end
- PAGE_SIZE
; page_addr
>= start
;
297 page_addr
-= PAGE_SIZE
) {
301 index
= (page_addr
- alloc
->buffer
) / PAGE_SIZE
;
302 page
= &alloc
->pages
[index
];
304 trace_binder_free_lru_start(alloc
, index
);
306 ret
= list_lru_add(&binder_alloc_lru
, &page
->lru
);
309 trace_binder_free_lru_end(alloc
, index
);
312 err_vm_insert_page_failed
:
313 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
314 err_map_kernel_failed
:
315 __free_page(page
->page_ptr
);
316 page
->page_ptr
= NULL
;
317 err_alloc_page_failed
:
318 err_page_ptr_cleared
:
323 up_read(&mm
->mmap_sem
);
326 return vma
? -ENOMEM
: -ESRCH
;
329 struct binder_buffer
*binder_alloc_new_buf_locked(struct binder_alloc
*alloc
,
332 size_t extra_buffers_size
,
335 struct rb_node
*n
= alloc
->free_buffers
.rb_node
;
336 struct binder_buffer
*buffer
;
338 struct rb_node
*best_fit
= NULL
;
341 size_t size
, data_offsets_size
;
344 if (alloc
->vma
== NULL
) {
345 pr_err("%d: binder_alloc_buf, no vma\n",
347 return ERR_PTR(-ESRCH
);
350 data_offsets_size
= ALIGN(data_size
, sizeof(void *)) +
351 ALIGN(offsets_size
, sizeof(void *));
353 if (data_offsets_size
< data_size
|| data_offsets_size
< offsets_size
) {
354 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
355 "%d: got transaction with invalid size %zd-%zd\n",
356 alloc
->pid
, data_size
, offsets_size
);
357 return ERR_PTR(-EINVAL
);
359 size
= data_offsets_size
+ ALIGN(extra_buffers_size
, sizeof(void *));
360 if (size
< data_offsets_size
|| size
< extra_buffers_size
) {
361 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
362 "%d: got transaction with invalid extra_buffers_size %zd\n",
363 alloc
->pid
, extra_buffers_size
);
364 return ERR_PTR(-EINVAL
);
367 alloc
->free_async_space
< size
+ sizeof(struct binder_buffer
)) {
368 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
369 "%d: binder_alloc_buf size %zd failed, no async space left\n",
371 return ERR_PTR(-ENOSPC
);
374 /* Pad 0-size buffers so they get assigned unique addresses */
375 size
= max(size
, sizeof(void *));
378 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
379 BUG_ON(!buffer
->free
);
380 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
382 if (size
< buffer_size
) {
385 } else if (size
> buffer_size
)
392 if (best_fit
== NULL
) {
393 size_t allocated_buffers
= 0;
394 size_t largest_alloc_size
= 0;
395 size_t total_alloc_size
= 0;
396 size_t free_buffers
= 0;
397 size_t largest_free_size
= 0;
398 size_t total_free_size
= 0;
400 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
;
402 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
403 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
405 total_alloc_size
+= buffer_size
;
406 if (buffer_size
> largest_alloc_size
)
407 largest_alloc_size
= buffer_size
;
409 for (n
= rb_first(&alloc
->free_buffers
); n
!= NULL
;
411 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
412 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
414 total_free_size
+= buffer_size
;
415 if (buffer_size
> largest_free_size
)
416 largest_free_size
= buffer_size
;
418 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
420 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
421 total_alloc_size
, allocated_buffers
, largest_alloc_size
,
422 total_free_size
, free_buffers
, largest_free_size
);
423 return ERR_PTR(-ENOSPC
);
426 buffer
= rb_entry(best_fit
, struct binder_buffer
, rb_node
);
427 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
430 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
431 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
432 alloc
->pid
, size
, buffer
, buffer_size
);
435 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
);
436 WARN_ON(n
&& buffer_size
!= size
);
438 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
+ size
);
439 if (end_page_addr
> has_page_addr
)
440 end_page_addr
= has_page_addr
;
441 ret
= binder_update_page_range(alloc
, 1,
442 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
), end_page_addr
);
446 if (buffer_size
!= size
) {
447 struct binder_buffer
*new_buffer
;
449 new_buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
451 pr_err("%s: %d failed to alloc new buffer struct\n",
452 __func__
, alloc
->pid
);
453 goto err_alloc_buf_struct_failed
;
455 new_buffer
->data
= (u8
*)buffer
->data
+ size
;
456 list_add(&new_buffer
->entry
, &buffer
->entry
);
457 new_buffer
->free
= 1;
458 binder_insert_free_buffer(alloc
, new_buffer
);
461 rb_erase(best_fit
, &alloc
->free_buffers
);
463 buffer
->allow_user_free
= 0;
464 binder_insert_allocated_buffer_locked(alloc
, buffer
);
465 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
466 "%d: binder_alloc_buf size %zd got %pK\n",
467 alloc
->pid
, size
, buffer
);
468 buffer
->data_size
= data_size
;
469 buffer
->offsets_size
= offsets_size
;
470 buffer
->async_transaction
= is_async
;
471 buffer
->extra_buffers_size
= extra_buffers_size
;
473 alloc
->free_async_space
-= size
+ sizeof(struct binder_buffer
);
474 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
475 "%d: binder_alloc_buf size %zd async free %zd\n",
476 alloc
->pid
, size
, alloc
->free_async_space
);
480 err_alloc_buf_struct_failed
:
481 binder_update_page_range(alloc
, 0,
482 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
484 return ERR_PTR(-ENOMEM
);
488 * binder_alloc_new_buf() - Allocate a new binder buffer
489 * @alloc: binder_alloc for this proc
490 * @data_size: size of user data buffer
491 * @offsets_size: user specified buffer offset
492 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
493 * @is_async: buffer for async transaction
495 * Allocate a new buffer given the requested sizes. Returns
496 * the kernel version of the buffer pointer. The size allocated
497 * is the sum of the three given sizes (each rounded up to
498 * pointer-sized boundary)
500 * Return: The allocated buffer or %NULL if error
502 struct binder_buffer
*binder_alloc_new_buf(struct binder_alloc
*alloc
,
505 size_t extra_buffers_size
,
508 struct binder_buffer
*buffer
;
510 mutex_lock(&alloc
->mutex
);
511 buffer
= binder_alloc_new_buf_locked(alloc
, data_size
, offsets_size
,
512 extra_buffers_size
, is_async
);
513 mutex_unlock(&alloc
->mutex
);
517 static void *buffer_start_page(struct binder_buffer
*buffer
)
519 return (void *)((uintptr_t)buffer
->data
& PAGE_MASK
);
522 static void *prev_buffer_end_page(struct binder_buffer
*buffer
)
524 return (void *)(((uintptr_t)(buffer
->data
) - 1) & PAGE_MASK
);
527 static void binder_delete_free_buffer(struct binder_alloc
*alloc
,
528 struct binder_buffer
*buffer
)
530 struct binder_buffer
*prev
, *next
= NULL
;
532 BUG_ON(alloc
->buffers
.next
== &buffer
->entry
);
533 prev
= binder_buffer_prev(buffer
);
535 if (prev_buffer_end_page(prev
) == buffer_start_page(buffer
)) {
537 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
538 "%d: merge free, buffer %pK share page with %pK\n",
539 alloc
->pid
, buffer
->data
, prev
->data
);
542 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
543 next
= binder_buffer_next(buffer
);
544 if (buffer_start_page(next
) == buffer_start_page(buffer
)) {
546 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
547 "%d: merge free, buffer %pK share page with %pK\n",
554 if (PAGE_ALIGNED(buffer
->data
)) {
555 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
556 "%d: merge free, buffer start %pK is page aligned\n",
557 alloc
->pid
, buffer
->data
);
562 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
563 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
564 alloc
->pid
, buffer
->data
,
565 prev
->data
, next
? next
->data
: NULL
);
566 binder_update_page_range(alloc
, 0, buffer_start_page(buffer
),
567 buffer_start_page(buffer
) + PAGE_SIZE
);
569 list_del(&buffer
->entry
);
573 static void binder_free_buf_locked(struct binder_alloc
*alloc
,
574 struct binder_buffer
*buffer
)
576 size_t size
, buffer_size
;
578 buffer_size
= binder_alloc_buffer_size(alloc
, buffer
);
580 size
= ALIGN(buffer
->data_size
, sizeof(void *)) +
581 ALIGN(buffer
->offsets_size
, sizeof(void *)) +
582 ALIGN(buffer
->extra_buffers_size
, sizeof(void *));
584 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
585 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
586 alloc
->pid
, buffer
, size
, buffer_size
);
588 BUG_ON(buffer
->free
);
589 BUG_ON(size
> buffer_size
);
590 BUG_ON(buffer
->transaction
!= NULL
);
591 BUG_ON(buffer
->data
< alloc
->buffer
);
592 BUG_ON(buffer
->data
> alloc
->buffer
+ alloc
->buffer_size
);
594 if (buffer
->async_transaction
) {
595 alloc
->free_async_space
+= size
+ sizeof(struct binder_buffer
);
597 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC
,
598 "%d: binder_free_buf size %zd async free %zd\n",
599 alloc
->pid
, size
, alloc
->free_async_space
);
602 binder_update_page_range(alloc
, 0,
603 (void *)PAGE_ALIGN((uintptr_t)buffer
->data
),
604 (void *)(((uintptr_t)buffer
->data
+ buffer_size
) & PAGE_MASK
));
606 rb_erase(&buffer
->rb_node
, &alloc
->allocated_buffers
);
608 if (!list_is_last(&buffer
->entry
, &alloc
->buffers
)) {
609 struct binder_buffer
*next
= binder_buffer_next(buffer
);
612 rb_erase(&next
->rb_node
, &alloc
->free_buffers
);
613 binder_delete_free_buffer(alloc
, next
);
616 if (alloc
->buffers
.next
!= &buffer
->entry
) {
617 struct binder_buffer
*prev
= binder_buffer_prev(buffer
);
620 binder_delete_free_buffer(alloc
, buffer
);
621 rb_erase(&prev
->rb_node
, &alloc
->free_buffers
);
625 binder_insert_free_buffer(alloc
, buffer
);
629 * binder_alloc_free_buf() - free a binder buffer
630 * @alloc: binder_alloc for this proc
631 * @buffer: kernel pointer to buffer
633 * Free the buffer allocated via binder_alloc_new_buffer()
635 void binder_alloc_free_buf(struct binder_alloc
*alloc
,
636 struct binder_buffer
*buffer
)
638 mutex_lock(&alloc
->mutex
);
639 binder_free_buf_locked(alloc
, buffer
);
640 mutex_unlock(&alloc
->mutex
);
644 * binder_alloc_mmap_handler() - map virtual address space for proc
645 * @alloc: alloc structure for this proc
646 * @vma: vma passed to mmap()
648 * Called by binder_mmap() to initialize the space specified in
649 * vma for allocating binder buffers
653 * -EBUSY = address space already mapped
654 * -ENOMEM = failed to map memory to given address space
656 int binder_alloc_mmap_handler(struct binder_alloc
*alloc
,
657 struct vm_area_struct
*vma
)
660 struct vm_struct
*area
;
661 const char *failure_string
;
662 struct binder_buffer
*buffer
;
664 mutex_lock(&binder_alloc_mmap_lock
);
667 failure_string
= "already mapped";
668 goto err_already_mapped
;
671 area
= get_vm_area(vma
->vm_end
- vma
->vm_start
, VM_IOREMAP
);
674 failure_string
= "get_vm_area";
675 goto err_get_vm_area_failed
;
677 alloc
->buffer
= area
->addr
;
678 alloc
->user_buffer_offset
=
679 vma
->vm_start
- (uintptr_t)alloc
->buffer
;
680 mutex_unlock(&binder_alloc_mmap_lock
);
682 #ifdef CONFIG_CPU_CACHE_VIPT
683 if (cache_is_vipt_aliasing()) {
685 (vma
->vm_start
^ (uint32_t)alloc
->buffer
))) {
686 pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n",
687 alloc
->pid
, vma
->vm_start
, vma
->vm_end
,
689 vma
->vm_start
+= PAGE_SIZE
;
693 alloc
->pages
= kzalloc(sizeof(alloc
->pages
[0]) *
694 ((vma
->vm_end
- vma
->vm_start
) / PAGE_SIZE
),
696 if (alloc
->pages
== NULL
) {
698 failure_string
= "alloc page array";
699 goto err_alloc_pages_failed
;
701 alloc
->buffer_size
= vma
->vm_end
- vma
->vm_start
;
703 buffer
= kzalloc(sizeof(*buffer
), GFP_KERNEL
);
706 failure_string
= "alloc buffer struct";
707 goto err_alloc_buf_struct_failed
;
710 buffer
->data
= alloc
->buffer
;
711 list_add(&buffer
->entry
, &alloc
->buffers
);
713 binder_insert_free_buffer(alloc
, buffer
);
714 alloc
->free_async_space
= alloc
->buffer_size
/ 2;
717 alloc
->vma_vm_mm
= vma
->vm_mm
;
718 /* Same as mmgrab() in later kernel versions */
719 atomic_inc(&alloc
->vma_vm_mm
->mm_count
);
723 err_alloc_buf_struct_failed
:
726 err_alloc_pages_failed
:
727 mutex_lock(&binder_alloc_mmap_lock
);
728 vfree(alloc
->buffer
);
729 alloc
->buffer
= NULL
;
730 err_get_vm_area_failed
:
732 mutex_unlock(&binder_alloc_mmap_lock
);
733 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__
,
734 alloc
->pid
, vma
->vm_start
, vma
->vm_end
, failure_string
, ret
);
739 void binder_alloc_deferred_release(struct binder_alloc
*alloc
)
742 int buffers
, page_count
;
743 struct binder_buffer
*buffer
;
748 mutex_lock(&alloc
->mutex
);
749 while ((n
= rb_first(&alloc
->allocated_buffers
))) {
750 buffer
= rb_entry(n
, struct binder_buffer
, rb_node
);
752 /* Transaction should already have been freed */
753 BUG_ON(buffer
->transaction
);
755 binder_free_buf_locked(alloc
, buffer
);
759 while (!list_empty(&alloc
->buffers
)) {
760 buffer
= list_first_entry(&alloc
->buffers
,
761 struct binder_buffer
, entry
);
762 WARN_ON(!buffer
->free
);
764 list_del(&buffer
->entry
);
765 WARN_ON_ONCE(!list_empty(&alloc
->buffers
));
773 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
777 if (!alloc
->pages
[i
].page_ptr
)
780 on_lru
= list_lru_del(&binder_alloc_lru
,
781 &alloc
->pages
[i
].lru
);
782 page_addr
= alloc
->buffer
+ i
* PAGE_SIZE
;
783 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC
,
784 "%s: %d: page %d at %pK %s\n",
785 __func__
, alloc
->pid
, i
, page_addr
,
786 on_lru
? "on lru" : "active");
787 unmap_kernel_range((unsigned long)page_addr
, PAGE_SIZE
);
788 __free_page(alloc
->pages
[i
].page_ptr
);
792 vfree(alloc
->buffer
);
794 mutex_unlock(&alloc
->mutex
);
795 if (alloc
->vma_vm_mm
)
796 mmdrop(alloc
->vma_vm_mm
);
798 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE
,
799 "%s: %d buffers %d, pages %d\n",
800 __func__
, alloc
->pid
, buffers
, page_count
);
803 static void print_binder_buffer(struct seq_file
*m
, const char *prefix
,
804 struct binder_buffer
*buffer
)
806 seq_printf(m
, "%s %d: %pK size %zd:%zd:%zd %s\n",
807 prefix
, buffer
->debug_id
, buffer
->data
,
808 buffer
->data_size
, buffer
->offsets_size
,
809 buffer
->extra_buffers_size
,
810 buffer
->transaction
? "active" : "delivered");
814 * binder_alloc_print_allocated() - print buffer info
815 * @m: seq_file for output via seq_printf()
816 * @alloc: binder_alloc for this proc
818 * Prints information about every buffer associated with
819 * the binder_alloc state to the given seq_file
821 void binder_alloc_print_allocated(struct seq_file
*m
,
822 struct binder_alloc
*alloc
)
826 mutex_lock(&alloc
->mutex
);
827 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
828 print_binder_buffer(m
, " buffer",
829 rb_entry(n
, struct binder_buffer
, rb_node
));
830 mutex_unlock(&alloc
->mutex
);
834 * binder_alloc_print_pages() - print page usage
835 * @m: seq_file for output via seq_printf()
836 * @alloc: binder_alloc for this proc
838 void binder_alloc_print_pages(struct seq_file
*m
,
839 struct binder_alloc
*alloc
)
841 struct binder_lru_page
*page
;
847 mutex_lock(&alloc
->mutex
);
848 for (i
= 0; i
< alloc
->buffer_size
/ PAGE_SIZE
; i
++) {
849 page
= &alloc
->pages
[i
];
852 else if (list_empty(&page
->lru
))
857 mutex_unlock(&alloc
->mutex
);
858 seq_printf(m
, " pages: %d:%d:%d\n", active
, lru
, free
);
859 seq_printf(m
, " pages high watermark: %zu\n", alloc
->pages_high
);
863 * binder_alloc_get_allocated_count() - return count of buffers
864 * @alloc: binder_alloc for this proc
866 * Return: count of allocated buffers
868 int binder_alloc_get_allocated_count(struct binder_alloc
*alloc
)
873 mutex_lock(&alloc
->mutex
);
874 for (n
= rb_first(&alloc
->allocated_buffers
); n
!= NULL
; n
= rb_next(n
))
876 mutex_unlock(&alloc
->mutex
);
882 * binder_alloc_vma_close() - invalidate address space
883 * @alloc: binder_alloc for this proc
885 * Called from binder_vma_close() when releasing address space.
886 * Clears alloc->vma to prevent new incoming transactions from
887 * allocating more buffers.
889 void binder_alloc_vma_close(struct binder_alloc
*alloc
)
891 WRITE_ONCE(alloc
->vma
, NULL
);
895 * binder_alloc_free_page() - shrinker callback to free pages
896 * @item: item to free
897 * @lock: lock protecting the item
898 * @cb_arg: callback argument
900 * Called from list_lru_walk() in binder_shrink_scan() to free
901 * up pages when the system is under memory pressure.
903 enum lru_status
binder_alloc_free_page(struct list_head
*item
,
904 struct list_lru_one
*lru
,
908 struct mm_struct
*mm
= NULL
;
909 struct binder_lru_page
*page
= container_of(item
,
910 struct binder_lru_page
,
912 struct binder_alloc
*alloc
;
915 struct vm_area_struct
*vma
;
918 if (!mutex_trylock(&alloc
->mutex
))
919 goto err_get_alloc_mutex_failed
;
922 goto err_page_already_freed
;
924 index
= page
- alloc
->pages
;
925 page_addr
= (uintptr_t)alloc
->buffer
+ index
* PAGE_SIZE
;
927 mm
= alloc
->vma_vm_mm
;
928 if (!atomic_inc_not_zero(&mm
->mm_users
))
930 if (!down_write_trylock(&mm
->mmap_sem
))
931 goto err_down_write_mmap_sem_failed
;
934 list_lru_isolate(lru
, item
);
938 trace_binder_unmap_user_start(alloc
, index
);
942 alloc
->user_buffer_offset
,
945 trace_binder_unmap_user_end(alloc
, index
);
947 up_write(&mm
->mmap_sem
);
950 trace_binder_unmap_kernel_start(alloc
, index
);
952 unmap_kernel_range(page_addr
, PAGE_SIZE
);
953 __free_page(page
->page_ptr
);
954 page
->page_ptr
= NULL
;
956 trace_binder_unmap_kernel_end(alloc
, index
);
959 mutex_unlock(&alloc
->mutex
);
960 return LRU_REMOVED_RETRY
;
962 err_down_write_mmap_sem_failed
:
965 err_page_already_freed
:
966 mutex_unlock(&alloc
->mutex
);
967 err_get_alloc_mutex_failed
:
972 binder_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
974 unsigned long ret
= list_lru_count(&binder_alloc_lru
);
979 binder_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
983 ret
= list_lru_walk(&binder_alloc_lru
, binder_alloc_free_page
,
984 NULL
, sc
->nr_to_scan
);
988 static struct shrinker binder_shrinker
= {
989 .count_objects
= binder_shrink_count
,
990 .scan_objects
= binder_shrink_scan
,
991 .seeks
= DEFAULT_SEEKS
,
995 * binder_alloc_init() - called by binder_open() for per-proc initialization
996 * @alloc: binder_alloc for this proc
998 * Called from binder_open() to initialize binder_alloc fields for
1001 void binder_alloc_init(struct binder_alloc
*alloc
)
1003 alloc
->pid
= current
->group_leader
->pid
;
1004 mutex_init(&alloc
->mutex
);
1005 INIT_LIST_HEAD(&alloc
->buffers
);
1008 int binder_alloc_shrinker_init(void)
1010 int ret
= list_lru_init(&binder_alloc_lru
);
1013 ret
= register_shrinker(&binder_shrinker
);
1015 list_lru_destroy(&binder_alloc_lru
);