Commit | Line | Data |
---|---|---|
2324f70c TK |
1 | /* binder_alloc.c |
2 | * | |
3 | * Android IPC Subsystem | |
4 | * | |
5 | * Copyright (C) 2007-2017 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
19 | ||
20 | #include <asm/cacheflush.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/rtmutex.h> | |
25 | #include <linux/rbtree.h> | |
26 | #include <linux/seq_file.h> | |
27 | #include <linux/vmalloc.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
f73e8e76 | 30 | #include <linux/list_lru.h> |
2324f70c TK |
31 | #include "binder_alloc.h" |
32 | #include "binder_trace.h" | |
33 | ||
f73e8e76 SY |
34 | struct list_lru binder_alloc_lru; |
35 | ||
2324f70c TK |
36 | static DEFINE_MUTEX(binder_alloc_mmap_lock); |
37 | ||
38 | enum { | |
39 | BINDER_DEBUG_OPEN_CLOSE = 1U << 1, | |
40 | BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, | |
41 | BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, | |
42 | }; | |
43 | static uint32_t binder_alloc_debug_mask; | |
44 | ||
45 | module_param_named(debug_mask, binder_alloc_debug_mask, | |
46 | uint, S_IWUSR | S_IRUGO); | |
47 | ||
48 | #define binder_alloc_debug(mask, x...) \ | |
49 | do { \ | |
50 | if (binder_alloc_debug_mask & mask) \ | |
51 | pr_info(x); \ | |
52 | } while (0) | |
53 | ||
0e05bd2d SY |
54 | static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) |
55 | { | |
56 | return list_entry(buffer->entry.next, struct binder_buffer, entry); | |
57 | } | |
58 | ||
59 | static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) | |
60 | { | |
61 | return list_entry(buffer->entry.prev, struct binder_buffer, entry); | |
62 | } | |
63 | ||
2324f70c TK |
64 | static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, |
65 | struct binder_buffer *buffer) | |
66 | { | |
67 | if (list_is_last(&buffer->entry, &alloc->buffers)) | |
7a6d4b15 SY |
68 | return (u8 *)alloc->buffer + |
69 | alloc->buffer_size - (u8 *)buffer->data; | |
70 | return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data; | |
2324f70c TK |
71 | } |
72 | ||
73 | static void binder_insert_free_buffer(struct binder_alloc *alloc, | |
74 | struct binder_buffer *new_buffer) | |
75 | { | |
76 | struct rb_node **p = &alloc->free_buffers.rb_node; | |
77 | struct rb_node *parent = NULL; | |
78 | struct binder_buffer *buffer; | |
79 | size_t buffer_size; | |
80 | size_t new_buffer_size; | |
81 | ||
82 | BUG_ON(!new_buffer->free); | |
83 | ||
84 | new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); | |
85 | ||
86 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
87 | "%d: add free buffer, size %zd, at %pK\n", | |
88 | alloc->pid, new_buffer_size, new_buffer); | |
89 | ||
90 | while (*p) { | |
91 | parent = *p; | |
92 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
93 | BUG_ON(!buffer->free); | |
94 | ||
95 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
96 | ||
97 | if (new_buffer_size < buffer_size) | |
98 | p = &parent->rb_left; | |
99 | else | |
100 | p = &parent->rb_right; | |
101 | } | |
102 | rb_link_node(&new_buffer->rb_node, parent, p); | |
103 | rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); | |
104 | } | |
105 | ||
106 | static void binder_insert_allocated_buffer_locked( | |
107 | struct binder_alloc *alloc, struct binder_buffer *new_buffer) | |
108 | { | |
109 | struct rb_node **p = &alloc->allocated_buffers.rb_node; | |
110 | struct rb_node *parent = NULL; | |
111 | struct binder_buffer *buffer; | |
112 | ||
113 | BUG_ON(new_buffer->free); | |
114 | ||
115 | while (*p) { | |
116 | parent = *p; | |
117 | buffer = rb_entry(parent, struct binder_buffer, rb_node); | |
118 | BUG_ON(buffer->free); | |
119 | ||
7a6d4b15 | 120 | if (new_buffer->data < buffer->data) |
2324f70c | 121 | p = &parent->rb_left; |
7a6d4b15 | 122 | else if (new_buffer->data > buffer->data) |
2324f70c TK |
123 | p = &parent->rb_right; |
124 | else | |
125 | BUG(); | |
126 | } | |
127 | rb_link_node(&new_buffer->rb_node, parent, p); | |
128 | rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); | |
129 | } | |
130 | ||
db516584 | 131 | static struct binder_buffer *binder_alloc_prepare_to_free_locked( |
2324f70c TK |
132 | struct binder_alloc *alloc, |
133 | uintptr_t user_ptr) | |
134 | { | |
135 | struct rb_node *n = alloc->allocated_buffers.rb_node; | |
136 | struct binder_buffer *buffer; | |
7a6d4b15 | 137 | void *kern_ptr; |
2324f70c | 138 | |
7a6d4b15 | 139 | kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset); |
2324f70c TK |
140 | |
141 | while (n) { | |
142 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
143 | BUG_ON(buffer->free); | |
144 | ||
7a6d4b15 | 145 | if (kern_ptr < buffer->data) |
2324f70c | 146 | n = n->rb_left; |
7a6d4b15 | 147 | else if (kern_ptr > buffer->data) |
2324f70c | 148 | n = n->rb_right; |
db516584 TK |
149 | else { |
150 | /* | |
151 | * Guard against user threads attempting to | |
1cac41cb MB |
152 | * free the buffer when in use by kernel or |
153 | * after it's already been freed. | |
db516584 | 154 | */ |
1cac41cb MB |
155 | if (!buffer->allow_user_free) |
156 | return ERR_PTR(-EPERM); | |
157 | buffer->allow_user_free = 0; | |
2324f70c | 158 | return buffer; |
db516584 | 159 | } |
2324f70c TK |
160 | } |
161 | return NULL; | |
162 | } | |
163 | ||
164 | /** | |
165 | * binder_alloc_buffer_lookup() - get buffer given user ptr | |
166 | * @alloc: binder_alloc for this proc | |
167 | * @user_ptr: User pointer to buffer data | |
168 | * | |
169 | * Validate userspace pointer to buffer data and return buffer corresponding to | |
170 | * that user pointer. Search the rb tree for buffer that matches user data | |
171 | * pointer. | |
172 | * | |
173 | * Return: Pointer to buffer or NULL | |
174 | */ | |
db516584 TK |
175 | struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, |
176 | uintptr_t user_ptr) | |
2324f70c TK |
177 | { |
178 | struct binder_buffer *buffer; | |
179 | ||
180 | mutex_lock(&alloc->mutex); | |
db516584 | 181 | buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); |
2324f70c TK |
182 | mutex_unlock(&alloc->mutex); |
183 | return buffer; | |
184 | } | |
185 | ||
186 | static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |
e6fa28a9 | 187 | void *start, void *end) |
2324f70c TK |
188 | { |
189 | void *page_addr; | |
190 | unsigned long user_page_addr; | |
f73e8e76 | 191 | struct binder_lru_page *page; |
e6fa28a9 | 192 | struct vm_area_struct *vma = NULL; |
f73e8e76 SY |
193 | struct mm_struct *mm = NULL; |
194 | bool need_mm = false; | |
2324f70c TK |
195 | |
196 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
197 | "%d: %s pages %pK-%pK\n", alloc->pid, | |
198 | allocate ? "allocate" : "free", start, end); | |
199 | ||
200 | if (end <= start) | |
201 | return 0; | |
202 | ||
203 | trace_binder_update_page_range(alloc, allocate, start, end); | |
204 | ||
f73e8e76 SY |
205 | if (allocate == 0) |
206 | goto free_range; | |
207 | ||
208 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
209 | page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; | |
210 | if (!page->page_ptr) { | |
211 | need_mm = true; | |
212 | break; | |
213 | } | |
214 | } | |
215 | ||
9b9d7cf1 SY |
216 | /* Same as mmget_not_zero() in later kernel versions */ |
217 | if (need_mm && atomic_inc_not_zero(&alloc->vma_vm_mm->mm_users)) | |
218 | mm = alloc->vma_vm_mm; | |
2324f70c TK |
219 | |
220 | if (mm) { | |
5a068558 | 221 | down_read(&mm->mmap_sem); |
2324f70c | 222 | vma = alloc->vma; |
2324f70c TK |
223 | } |
224 | ||
f73e8e76 | 225 | if (!vma && need_mm) { |
2324f70c TK |
226 | pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", |
227 | alloc->pid); | |
228 | goto err_no_vma; | |
229 | } | |
230 | ||
231 | for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { | |
232 | int ret; | |
f73e8e76 | 233 | bool on_lru; |
850d57dc | 234 | size_t index; |
2324f70c | 235 | |
850d57dc SY |
236 | index = (page_addr - alloc->buffer) / PAGE_SIZE; |
237 | page = &alloc->pages[index]; | |
2324f70c | 238 | |
f73e8e76 | 239 | if (page->page_ptr) { |
850d57dc SY |
240 | trace_binder_alloc_lru_start(alloc, index); |
241 | ||
f73e8e76 SY |
242 | on_lru = list_lru_del(&binder_alloc_lru, &page->lru); |
243 | WARN_ON(!on_lru); | |
850d57dc SY |
244 | |
245 | trace_binder_alloc_lru_end(alloc, index); | |
f73e8e76 SY |
246 | continue; |
247 | } | |
248 | ||
249 | if (WARN_ON(!vma)) | |
250 | goto err_page_ptr_cleared; | |
251 | ||
850d57dc | 252 | trace_binder_alloc_page_start(alloc, index); |
f73e8e76 SY |
253 | page->page_ptr = alloc_page(GFP_KERNEL | |
254 | __GFP_HIGHMEM | | |
255 | __GFP_ZERO); | |
256 | if (!page->page_ptr) { | |
2324f70c TK |
257 | pr_err("%d: binder_alloc_buf failed for page at %pK\n", |
258 | alloc->pid, page_addr); | |
259 | goto err_alloc_page_failed; | |
260 | } | |
f73e8e76 SY |
261 | page->alloc = alloc; |
262 | INIT_LIST_HEAD(&page->lru); | |
263 | ||
2324f70c | 264 | ret = map_kernel_range_noflush((unsigned long)page_addr, |
f73e8e76 SY |
265 | PAGE_SIZE, PAGE_KERNEL, |
266 | &page->page_ptr); | |
2324f70c TK |
267 | flush_cache_vmap((unsigned long)page_addr, |
268 | (unsigned long)page_addr + PAGE_SIZE); | |
269 | if (ret != 1) { | |
270 | pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", | |
271 | alloc->pid, page_addr); | |
272 | goto err_map_kernel_failed; | |
273 | } | |
274 | user_page_addr = | |
275 | (uintptr_t)page_addr + alloc->user_buffer_offset; | |
f73e8e76 | 276 | ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); |
2324f70c TK |
277 | if (ret) { |
278 | pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", | |
279 | alloc->pid, user_page_addr); | |
280 | goto err_vm_insert_page_failed; | |
281 | } | |
850d57dc | 282 | |
c8bc3e3a MC |
283 | if (index + 1 > alloc->pages_high) |
284 | alloc->pages_high = index + 1; | |
285 | ||
850d57dc | 286 | trace_binder_alloc_page_end(alloc, index); |
2324f70c TK |
287 | /* vm_insert_page does not seem to increment the refcount */ |
288 | } | |
289 | if (mm) { | |
5a068558 | 290 | up_read(&mm->mmap_sem); |
2324f70c TK |
291 | mmput(mm); |
292 | } | |
293 | return 0; | |
294 | ||
295 | free_range: | |
296 | for (page_addr = end - PAGE_SIZE; page_addr >= start; | |
297 | page_addr -= PAGE_SIZE) { | |
f73e8e76 | 298 | bool ret; |
850d57dc | 299 | size_t index; |
f73e8e76 | 300 | |
850d57dc SY |
301 | index = (page_addr - alloc->buffer) / PAGE_SIZE; |
302 | page = &alloc->pages[index]; | |
303 | ||
304 | trace_binder_free_lru_start(alloc, index); | |
f73e8e76 SY |
305 | |
306 | ret = list_lru_add(&binder_alloc_lru, &page->lru); | |
307 | WARN_ON(!ret); | |
850d57dc SY |
308 | |
309 | trace_binder_free_lru_end(alloc, index); | |
f73e8e76 SY |
310 | continue; |
311 | ||
2324f70c TK |
312 | err_vm_insert_page_failed: |
313 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); | |
314 | err_map_kernel_failed: | |
f73e8e76 SY |
315 | __free_page(page->page_ptr); |
316 | page->page_ptr = NULL; | |
2324f70c | 317 | err_alloc_page_failed: |
f73e8e76 | 318 | err_page_ptr_cleared: |
2324f70c TK |
319 | ; |
320 | } | |
321 | err_no_vma: | |
322 | if (mm) { | |
5a068558 | 323 | up_read(&mm->mmap_sem); |
2324f70c TK |
324 | mmput(mm); |
325 | } | |
0a0fdc1f | 326 | return vma ? -ENOMEM : -ESRCH; |
2324f70c TK |
327 | } |
328 | ||
329 | struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc, | |
330 | size_t data_size, | |
331 | size_t offsets_size, | |
332 | size_t extra_buffers_size, | |
333 | int is_async) | |
334 | { | |
335 | struct rb_node *n = alloc->free_buffers.rb_node; | |
336 | struct binder_buffer *buffer; | |
337 | size_t buffer_size; | |
338 | struct rb_node *best_fit = NULL; | |
339 | void *has_page_addr; | |
340 | void *end_page_addr; | |
341 | size_t size, data_offsets_size; | |
0a0fdc1f | 342 | int ret; |
2324f70c TK |
343 | |
344 | if (alloc->vma == NULL) { | |
345 | pr_err("%d: binder_alloc_buf, no vma\n", | |
346 | alloc->pid); | |
0a0fdc1f | 347 | return ERR_PTR(-ESRCH); |
2324f70c TK |
348 | } |
349 | ||
350 | data_offsets_size = ALIGN(data_size, sizeof(void *)) + | |
351 | ALIGN(offsets_size, sizeof(void *)); | |
352 | ||
353 | if (data_offsets_size < data_size || data_offsets_size < offsets_size) { | |
354 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
355 | "%d: got transaction with invalid size %zd-%zd\n", | |
356 | alloc->pid, data_size, offsets_size); | |
0a0fdc1f | 357 | return ERR_PTR(-EINVAL); |
2324f70c TK |
358 | } |
359 | size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); | |
360 | if (size < data_offsets_size || size < extra_buffers_size) { | |
361 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
362 | "%d: got transaction with invalid extra_buffers_size %zd\n", | |
363 | alloc->pid, extra_buffers_size); | |
0a0fdc1f | 364 | return ERR_PTR(-EINVAL); |
2324f70c TK |
365 | } |
366 | if (is_async && | |
367 | alloc->free_async_space < size + sizeof(struct binder_buffer)) { | |
368 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
369 | "%d: binder_alloc_buf size %zd failed, no async space left\n", | |
370 | alloc->pid, size); | |
0a0fdc1f | 371 | return ERR_PTR(-ENOSPC); |
2324f70c TK |
372 | } |
373 | ||
7a6d4b15 SY |
374 | /* Pad 0-size buffers so they get assigned unique addresses */ |
375 | size = max(size, sizeof(void *)); | |
376 | ||
2324f70c TK |
377 | while (n) { |
378 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
379 | BUG_ON(!buffer->free); | |
380 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
381 | ||
382 | if (size < buffer_size) { | |
383 | best_fit = n; | |
384 | n = n->rb_left; | |
385 | } else if (size > buffer_size) | |
386 | n = n->rb_right; | |
387 | else { | |
388 | best_fit = n; | |
389 | break; | |
390 | } | |
391 | } | |
392 | if (best_fit == NULL) { | |
42e1ca78 MC |
393 | size_t allocated_buffers = 0; |
394 | size_t largest_alloc_size = 0; | |
395 | size_t total_alloc_size = 0; | |
396 | size_t free_buffers = 0; | |
397 | size_t largest_free_size = 0; | |
398 | size_t total_free_size = 0; | |
399 | ||
400 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; | |
401 | n = rb_next(n)) { | |
402 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
403 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
404 | allocated_buffers++; | |
405 | total_alloc_size += buffer_size; | |
406 | if (buffer_size > largest_alloc_size) | |
407 | largest_alloc_size = buffer_size; | |
408 | } | |
409 | for (n = rb_first(&alloc->free_buffers); n != NULL; | |
410 | n = rb_next(n)) { | |
411 | buffer = rb_entry(n, struct binder_buffer, rb_node); | |
412 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
413 | free_buffers++; | |
414 | total_free_size += buffer_size; | |
415 | if (buffer_size > largest_free_size) | |
416 | largest_free_size = buffer_size; | |
417 | } | |
2324f70c TK |
418 | pr_err("%d: binder_alloc_buf size %zd failed, no address space\n", |
419 | alloc->pid, size); | |
42e1ca78 MC |
420 | pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", |
421 | total_alloc_size, allocated_buffers, largest_alloc_size, | |
422 | total_free_size, free_buffers, largest_free_size); | |
0a0fdc1f | 423 | return ERR_PTR(-ENOSPC); |
2324f70c TK |
424 | } |
425 | if (n == NULL) { | |
426 | buffer = rb_entry(best_fit, struct binder_buffer, rb_node); | |
427 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
428 | } | |
429 | ||
430 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
431 | "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", | |
432 | alloc->pid, size, buffer, buffer_size); | |
433 | ||
434 | has_page_addr = | |
435 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); | |
7a6d4b15 | 436 | WARN_ON(n && buffer_size != size); |
2324f70c | 437 | end_page_addr = |
7a6d4b15 | 438 | (void *)PAGE_ALIGN((uintptr_t)buffer->data + size); |
2324f70c TK |
439 | if (end_page_addr > has_page_addr) |
440 | end_page_addr = has_page_addr; | |
0a0fdc1f | 441 | ret = binder_update_page_range(alloc, 1, |
e6fa28a9 | 442 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr); |
0a0fdc1f TK |
443 | if (ret) |
444 | return ERR_PTR(ret); | |
2324f70c | 445 | |
2324f70c | 446 | if (buffer_size != size) { |
7a6d4b15 | 447 | struct binder_buffer *new_buffer; |
2324f70c | 448 | |
7a6d4b15 SY |
449 | new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
450 | if (!new_buffer) { | |
451 | pr_err("%s: %d failed to alloc new buffer struct\n", | |
452 | __func__, alloc->pid); | |
453 | goto err_alloc_buf_struct_failed; | |
454 | } | |
455 | new_buffer->data = (u8 *)buffer->data + size; | |
2324f70c TK |
456 | list_add(&new_buffer->entry, &buffer->entry); |
457 | new_buffer->free = 1; | |
458 | binder_insert_free_buffer(alloc, new_buffer); | |
459 | } | |
7a6d4b15 SY |
460 | |
461 | rb_erase(best_fit, &alloc->free_buffers); | |
462 | buffer->free = 0; | |
1cac41cb | 463 | buffer->allow_user_free = 0; |
7a6d4b15 | 464 | binder_insert_allocated_buffer_locked(alloc, buffer); |
2324f70c TK |
465 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
466 | "%d: binder_alloc_buf size %zd got %pK\n", | |
467 | alloc->pid, size, buffer); | |
468 | buffer->data_size = data_size; | |
469 | buffer->offsets_size = offsets_size; | |
470 | buffer->async_transaction = is_async; | |
471 | buffer->extra_buffers_size = extra_buffers_size; | |
472 | if (is_async) { | |
473 | alloc->free_async_space -= size + sizeof(struct binder_buffer); | |
474 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
475 | "%d: binder_alloc_buf size %zd async free %zd\n", | |
476 | alloc->pid, size, alloc->free_async_space); | |
477 | } | |
478 | return buffer; | |
7a6d4b15 SY |
479 | |
480 | err_alloc_buf_struct_failed: | |
481 | binder_update_page_range(alloc, 0, | |
482 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
e6fa28a9 | 483 | end_page_addr); |
7a6d4b15 | 484 | return ERR_PTR(-ENOMEM); |
2324f70c TK |
485 | } |
486 | ||
487 | /** | |
488 | * binder_alloc_new_buf() - Allocate a new binder buffer | |
489 | * @alloc: binder_alloc for this proc | |
490 | * @data_size: size of user data buffer | |
491 | * @offsets_size: user specified buffer offset | |
492 | * @extra_buffers_size: size of extra space for meta-data (eg, security context) | |
493 | * @is_async: buffer for async transaction | |
494 | * | |
495 | * Allocate a new buffer given the requested sizes. Returns | |
496 | * the kernel version of the buffer pointer. The size allocated | |
497 | * is the sum of the three given sizes (each rounded up to | |
498 | * pointer-sized boundary) | |
499 | * | |
500 | * Return: The allocated buffer or %NULL if error | |
501 | */ | |
502 | struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, | |
503 | size_t data_size, | |
504 | size_t offsets_size, | |
505 | size_t extra_buffers_size, | |
506 | int is_async) | |
507 | { | |
508 | struct binder_buffer *buffer; | |
509 | ||
510 | mutex_lock(&alloc->mutex); | |
511 | buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, | |
512 | extra_buffers_size, is_async); | |
513 | mutex_unlock(&alloc->mutex); | |
514 | return buffer; | |
515 | } | |
516 | ||
517 | static void *buffer_start_page(struct binder_buffer *buffer) | |
518 | { | |
7a6d4b15 | 519 | return (void *)((uintptr_t)buffer->data & PAGE_MASK); |
2324f70c TK |
520 | } |
521 | ||
7a6d4b15 | 522 | static void *prev_buffer_end_page(struct binder_buffer *buffer) |
2324f70c | 523 | { |
7a6d4b15 | 524 | return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); |
2324f70c TK |
525 | } |
526 | ||
527 | static void binder_delete_free_buffer(struct binder_alloc *alloc, | |
528 | struct binder_buffer *buffer) | |
529 | { | |
530 | struct binder_buffer *prev, *next = NULL; | |
7a6d4b15 | 531 | bool to_free = true; |
2324f70c | 532 | BUG_ON(alloc->buffers.next == &buffer->entry); |
0e05bd2d | 533 | prev = binder_buffer_prev(buffer); |
2324f70c | 534 | BUG_ON(!prev->free); |
7a6d4b15 SY |
535 | if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { |
536 | to_free = false; | |
2324f70c | 537 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
7a6d4b15 SY |
538 | "%d: merge free, buffer %pK share page with %pK\n", |
539 | alloc->pid, buffer->data, prev->data); | |
2324f70c TK |
540 | } |
541 | ||
542 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
0e05bd2d | 543 | next = binder_buffer_next(buffer); |
7a6d4b15 SY |
544 | if (buffer_start_page(next) == buffer_start_page(buffer)) { |
545 | to_free = false; | |
2324f70c | 546 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
7a6d4b15 SY |
547 | "%d: merge free, buffer %pK share page with %pK\n", |
548 | alloc->pid, | |
549 | buffer->data, | |
550 | next->data); | |
2324f70c TK |
551 | } |
552 | } | |
7a6d4b15 SY |
553 | |
554 | if (PAGE_ALIGNED(buffer->data)) { | |
555 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
556 | "%d: merge free, buffer start %pK is page aligned\n", | |
557 | alloc->pid, buffer->data); | |
558 | to_free = false; | |
559 | } | |
560 | ||
561 | if (to_free) { | |
2324f70c | 562 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
7a6d4b15 SY |
563 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", |
564 | alloc->pid, buffer->data, | |
a7e1d33c | 565 | prev->data, next ? next->data : NULL); |
7a6d4b15 | 566 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), |
e6fa28a9 | 567 | buffer_start_page(buffer) + PAGE_SIZE); |
2324f70c | 568 | } |
7a6d4b15 SY |
569 | list_del(&buffer->entry); |
570 | kfree(buffer); | |
2324f70c TK |
571 | } |
572 | ||
573 | static void binder_free_buf_locked(struct binder_alloc *alloc, | |
574 | struct binder_buffer *buffer) | |
575 | { | |
576 | size_t size, buffer_size; | |
577 | ||
578 | buffer_size = binder_alloc_buffer_size(alloc, buffer); | |
579 | ||
580 | size = ALIGN(buffer->data_size, sizeof(void *)) + | |
581 | ALIGN(buffer->offsets_size, sizeof(void *)) + | |
582 | ALIGN(buffer->extra_buffers_size, sizeof(void *)); | |
583 | ||
584 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
585 | "%d: binder_free_buf %pK size %zd buffer_size %zd\n", | |
586 | alloc->pid, buffer, size, buffer_size); | |
587 | ||
588 | BUG_ON(buffer->free); | |
589 | BUG_ON(size > buffer_size); | |
590 | BUG_ON(buffer->transaction != NULL); | |
7a6d4b15 SY |
591 | BUG_ON(buffer->data < alloc->buffer); |
592 | BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size); | |
2324f70c TK |
593 | |
594 | if (buffer->async_transaction) { | |
595 | alloc->free_async_space += size + sizeof(struct binder_buffer); | |
596 | ||
597 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, | |
598 | "%d: binder_free_buf size %zd async free %zd\n", | |
599 | alloc->pid, size, alloc->free_async_space); | |
600 | } | |
601 | ||
602 | binder_update_page_range(alloc, 0, | |
603 | (void *)PAGE_ALIGN((uintptr_t)buffer->data), | |
e6fa28a9 | 604 | (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK)); |
2324f70c TK |
605 | |
606 | rb_erase(&buffer->rb_node, &alloc->allocated_buffers); | |
607 | buffer->free = 1; | |
608 | if (!list_is_last(&buffer->entry, &alloc->buffers)) { | |
0e05bd2d | 609 | struct binder_buffer *next = binder_buffer_next(buffer); |
2324f70c TK |
610 | |
611 | if (next->free) { | |
612 | rb_erase(&next->rb_node, &alloc->free_buffers); | |
613 | binder_delete_free_buffer(alloc, next); | |
614 | } | |
615 | } | |
616 | if (alloc->buffers.next != &buffer->entry) { | |
0e05bd2d | 617 | struct binder_buffer *prev = binder_buffer_prev(buffer); |
2324f70c TK |
618 | |
619 | if (prev->free) { | |
620 | binder_delete_free_buffer(alloc, buffer); | |
621 | rb_erase(&prev->rb_node, &alloc->free_buffers); | |
622 | buffer = prev; | |
623 | } | |
624 | } | |
625 | binder_insert_free_buffer(alloc, buffer); | |
626 | } | |
627 | ||
628 | /** | |
629 | * binder_alloc_free_buf() - free a binder buffer | |
630 | * @alloc: binder_alloc for this proc | |
631 | * @buffer: kernel pointer to buffer | |
632 | * | |
633 | * Free the buffer allocated via binder_alloc_new_buffer() | |
634 | */ | |
635 | void binder_alloc_free_buf(struct binder_alloc *alloc, | |
636 | struct binder_buffer *buffer) | |
637 | { | |
638 | mutex_lock(&alloc->mutex); | |
639 | binder_free_buf_locked(alloc, buffer); | |
640 | mutex_unlock(&alloc->mutex); | |
641 | } | |
642 | ||
643 | /** | |
644 | * binder_alloc_mmap_handler() - map virtual address space for proc | |
645 | * @alloc: alloc structure for this proc | |
646 | * @vma: vma passed to mmap() | |
647 | * | |
648 | * Called by binder_mmap() to initialize the space specified in | |
649 | * vma for allocating binder buffers | |
650 | * | |
651 | * Return: | |
652 | * 0 = success | |
653 | * -EBUSY = address space already mapped | |
654 | * -ENOMEM = failed to map memory to given address space | |
655 | */ | |
656 | int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |
657 | struct vm_area_struct *vma) | |
658 | { | |
659 | int ret; | |
660 | struct vm_struct *area; | |
661 | const char *failure_string; | |
662 | struct binder_buffer *buffer; | |
663 | ||
664 | mutex_lock(&binder_alloc_mmap_lock); | |
665 | if (alloc->buffer) { | |
666 | ret = -EBUSY; | |
667 | failure_string = "already mapped"; | |
668 | goto err_already_mapped; | |
669 | } | |
670 | ||
671 | area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); | |
672 | if (area == NULL) { | |
673 | ret = -ENOMEM; | |
674 | failure_string = "get_vm_area"; | |
675 | goto err_get_vm_area_failed; | |
676 | } | |
677 | alloc->buffer = area->addr; | |
678 | alloc->user_buffer_offset = | |
679 | vma->vm_start - (uintptr_t)alloc->buffer; | |
680 | mutex_unlock(&binder_alloc_mmap_lock); | |
681 | ||
682 | #ifdef CONFIG_CPU_CACHE_VIPT | |
683 | if (cache_is_vipt_aliasing()) { | |
684 | while (CACHE_COLOUR( | |
685 | (vma->vm_start ^ (uint32_t)alloc->buffer))) { | |
686 | pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", | |
687 | alloc->pid, vma->vm_start, vma->vm_end, | |
688 | alloc->buffer); | |
689 | vma->vm_start += PAGE_SIZE; | |
690 | } | |
691 | } | |
692 | #endif | |
693 | alloc->pages = kzalloc(sizeof(alloc->pages[0]) * | |
694 | ((vma->vm_end - vma->vm_start) / PAGE_SIZE), | |
695 | GFP_KERNEL); | |
696 | if (alloc->pages == NULL) { | |
697 | ret = -ENOMEM; | |
698 | failure_string = "alloc page array"; | |
699 | goto err_alloc_pages_failed; | |
700 | } | |
701 | alloc->buffer_size = vma->vm_end - vma->vm_start; | |
702 | ||
7a6d4b15 SY |
703 | buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); |
704 | if (!buffer) { | |
2324f70c | 705 | ret = -ENOMEM; |
7a6d4b15 SY |
706 | failure_string = "alloc buffer struct"; |
707 | goto err_alloc_buf_struct_failed; | |
2324f70c | 708 | } |
7a6d4b15 SY |
709 | |
710 | buffer->data = alloc->buffer; | |
2324f70c TK |
711 | list_add(&buffer->entry, &alloc->buffers); |
712 | buffer->free = 1; | |
713 | binder_insert_free_buffer(alloc, buffer); | |
714 | alloc->free_async_space = alloc->buffer_size / 2; | |
715 | barrier(); | |
716 | alloc->vma = vma; | |
717 | alloc->vma_vm_mm = vma->vm_mm; | |
9b9d7cf1 SY |
718 | /* Same as mmgrab() in later kernel versions */ |
719 | atomic_inc(&alloc->vma_vm_mm->mm_count); | |
2324f70c TK |
720 | |
721 | return 0; | |
722 | ||
7a6d4b15 | 723 | err_alloc_buf_struct_failed: |
2324f70c TK |
724 | kfree(alloc->pages); |
725 | alloc->pages = NULL; | |
726 | err_alloc_pages_failed: | |
727 | mutex_lock(&binder_alloc_mmap_lock); | |
728 | vfree(alloc->buffer); | |
729 | alloc->buffer = NULL; | |
730 | err_get_vm_area_failed: | |
731 | err_already_mapped: | |
732 | mutex_unlock(&binder_alloc_mmap_lock); | |
733 | pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, | |
734 | alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); | |
735 | return ret; | |
736 | } | |
737 | ||
738 | ||
739 | void binder_alloc_deferred_release(struct binder_alloc *alloc) | |
740 | { | |
741 | struct rb_node *n; | |
742 | int buffers, page_count; | |
7a6d4b15 | 743 | struct binder_buffer *buffer; |
2324f70c TK |
744 | |
745 | BUG_ON(alloc->vma); | |
746 | ||
747 | buffers = 0; | |
748 | mutex_lock(&alloc->mutex); | |
749 | while ((n = rb_first(&alloc->allocated_buffers))) { | |
2324f70c TK |
750 | buffer = rb_entry(n, struct binder_buffer, rb_node); |
751 | ||
752 | /* Transaction should already have been freed */ | |
753 | BUG_ON(buffer->transaction); | |
754 | ||
755 | binder_free_buf_locked(alloc, buffer); | |
756 | buffers++; | |
757 | } | |
758 | ||
7a6d4b15 SY |
759 | while (!list_empty(&alloc->buffers)) { |
760 | buffer = list_first_entry(&alloc->buffers, | |
761 | struct binder_buffer, entry); | |
762 | WARN_ON(!buffer->free); | |
763 | ||
764 | list_del(&buffer->entry); | |
765 | WARN_ON_ONCE(!list_empty(&alloc->buffers)); | |
766 | kfree(buffer); | |
767 | } | |
768 | ||
2324f70c TK |
769 | page_count = 0; |
770 | if (alloc->pages) { | |
771 | int i; | |
772 | ||
773 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
774 | void *page_addr; | |
f73e8e76 | 775 | bool on_lru; |
2324f70c | 776 | |
f73e8e76 | 777 | if (!alloc->pages[i].page_ptr) |
2324f70c TK |
778 | continue; |
779 | ||
f73e8e76 SY |
780 | on_lru = list_lru_del(&binder_alloc_lru, |
781 | &alloc->pages[i].lru); | |
2324f70c TK |
782 | page_addr = alloc->buffer + i * PAGE_SIZE; |
783 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | |
f73e8e76 SY |
784 | "%s: %d: page %d at %pK %s\n", |
785 | __func__, alloc->pid, i, page_addr, | |
786 | on_lru ? "on lru" : "active"); | |
2324f70c | 787 | unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); |
f73e8e76 | 788 | __free_page(alloc->pages[i].page_ptr); |
2324f70c TK |
789 | page_count++; |
790 | } | |
791 | kfree(alloc->pages); | |
792 | vfree(alloc->buffer); | |
793 | } | |
794 | mutex_unlock(&alloc->mutex); | |
9b9d7cf1 SY |
795 | if (alloc->vma_vm_mm) |
796 | mmdrop(alloc->vma_vm_mm); | |
2324f70c TK |
797 | |
798 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | |
799 | "%s: %d buffers %d, pages %d\n", | |
800 | __func__, alloc->pid, buffers, page_count); | |
801 | } | |
802 | ||
803 | static void print_binder_buffer(struct seq_file *m, const char *prefix, | |
804 | struct binder_buffer *buffer) | |
805 | { | |
42e1ca78 | 806 | seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", |
2324f70c TK |
807 | prefix, buffer->debug_id, buffer->data, |
808 | buffer->data_size, buffer->offsets_size, | |
42e1ca78 | 809 | buffer->extra_buffers_size, |
2324f70c TK |
810 | buffer->transaction ? "active" : "delivered"); |
811 | } | |
812 | ||
813 | /** | |
814 | * binder_alloc_print_allocated() - print buffer info | |
815 | * @m: seq_file for output via seq_printf() | |
816 | * @alloc: binder_alloc for this proc | |
817 | * | |
818 | * Prints information about every buffer associated with | |
819 | * the binder_alloc state to the given seq_file | |
820 | */ | |
821 | void binder_alloc_print_allocated(struct seq_file *m, | |
822 | struct binder_alloc *alloc) | |
823 | { | |
824 | struct rb_node *n; | |
825 | ||
826 | mutex_lock(&alloc->mutex); | |
827 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
828 | print_binder_buffer(m, " buffer", | |
829 | rb_entry(n, struct binder_buffer, rb_node)); | |
830 | mutex_unlock(&alloc->mutex); | |
831 | } | |
832 | ||
798dfdd8 SY |
833 | /** |
834 | * binder_alloc_print_pages() - print page usage | |
835 | * @m: seq_file for output via seq_printf() | |
836 | * @alloc: binder_alloc for this proc | |
837 | */ | |
838 | void binder_alloc_print_pages(struct seq_file *m, | |
839 | struct binder_alloc *alloc) | |
840 | { | |
841 | struct binder_lru_page *page; | |
842 | int i; | |
843 | int active = 0; | |
844 | int lru = 0; | |
845 | int free = 0; | |
846 | ||
847 | mutex_lock(&alloc->mutex); | |
848 | for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { | |
849 | page = &alloc->pages[i]; | |
850 | if (!page->page_ptr) | |
851 | free++; | |
852 | else if (list_empty(&page->lru)) | |
853 | active++; | |
854 | else | |
855 | lru++; | |
856 | } | |
857 | mutex_unlock(&alloc->mutex); | |
858 | seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); | |
c8bc3e3a | 859 | seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); |
798dfdd8 SY |
860 | } |
861 | ||
2324f70c TK |
862 | /** |
863 | * binder_alloc_get_allocated_count() - return count of buffers | |
864 | * @alloc: binder_alloc for this proc | |
865 | * | |
866 | * Return: count of allocated buffers | |
867 | */ | |
868 | int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |
869 | { | |
870 | struct rb_node *n; | |
871 | int count = 0; | |
872 | ||
873 | mutex_lock(&alloc->mutex); | |
874 | for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) | |
875 | count++; | |
876 | mutex_unlock(&alloc->mutex); | |
877 | return count; | |
878 | } | |
879 | ||
880 | ||
881 | /** | |
882 | * binder_alloc_vma_close() - invalidate address space | |
883 | * @alloc: binder_alloc for this proc | |
884 | * | |
885 | * Called from binder_vma_close() when releasing address space. | |
886 | * Clears alloc->vma to prevent new incoming transactions from | |
887 | * allocating more buffers. | |
888 | */ | |
889 | void binder_alloc_vma_close(struct binder_alloc *alloc) | |
890 | { | |
891 | WRITE_ONCE(alloc->vma, NULL); | |
2324f70c TK |
892 | } |
893 | ||
f73e8e76 SY |
894 | /** |
895 | * binder_alloc_free_page() - shrinker callback to free pages | |
896 | * @item: item to free | |
897 | * @lock: lock protecting the item | |
898 | * @cb_arg: callback argument | |
899 | * | |
900 | * Called from list_lru_walk() in binder_shrink_scan() to free | |
901 | * up pages when the system is under memory pressure. | |
902 | */ | |
903 | enum lru_status binder_alloc_free_page(struct list_head *item, | |
904 | struct list_lru_one *lru, | |
905 | spinlock_t *lock, | |
906 | void *cb_arg) | |
907 | { | |
908 | struct mm_struct *mm = NULL; | |
909 | struct binder_lru_page *page = container_of(item, | |
910 | struct binder_lru_page, | |
911 | lru); | |
912 | struct binder_alloc *alloc; | |
913 | uintptr_t page_addr; | |
914 | size_t index; | |
849c7764 | 915 | struct vm_area_struct *vma; |
f73e8e76 SY |
916 | |
917 | alloc = page->alloc; | |
918 | if (!mutex_trylock(&alloc->mutex)) | |
919 | goto err_get_alloc_mutex_failed; | |
920 | ||
921 | if (!page->page_ptr) | |
922 | goto err_page_already_freed; | |
923 | ||
924 | index = page - alloc->pages; | |
925 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | |
1cac41cb MB |
926 | |
927 | mm = alloc->vma_vm_mm; | |
928 | if (!atomic_inc_not_zero(&mm->mm_users)) | |
929 | goto err_mmget; | |
930 | if (!down_write_trylock(&mm->mmap_sem)) | |
931 | goto err_down_write_mmap_sem_failed; | |
849c7764 | 932 | vma = alloc->vma; |
849c7764 SY |
933 | |
934 | list_lru_isolate(lru, item); | |
935 | spin_unlock(lock); | |
f73e8e76 | 936 | |
849c7764 | 937 | if (vma) { |
850d57dc SY |
938 | trace_binder_unmap_user_start(alloc, index); |
939 | ||
849c7764 | 940 | zap_page_range(vma, |
f73e8e76 SY |
941 | page_addr + |
942 | alloc->user_buffer_offset, | |
943 | PAGE_SIZE, NULL); | |
944 | ||
850d57dc | 945 | trace_binder_unmap_user_end(alloc, index); |
f73e8e76 | 946 | } |
1cac41cb MB |
947 | up_write(&mm->mmap_sem); |
948 | mmput(mm); | |
f73e8e76 | 949 | |
850d57dc SY |
950 | trace_binder_unmap_kernel_start(alloc, index); |
951 | ||
f73e8e76 SY |
952 | unmap_kernel_range(page_addr, PAGE_SIZE); |
953 | __free_page(page->page_ptr); | |
954 | page->page_ptr = NULL; | |
955 | ||
850d57dc SY |
956 | trace_binder_unmap_kernel_end(alloc, index); |
957 | ||
849c7764 | 958 | spin_lock(lock); |
f73e8e76 | 959 | mutex_unlock(&alloc->mutex); |
849c7764 | 960 | return LRU_REMOVED_RETRY; |
f73e8e76 SY |
961 | |
962 | err_down_write_mmap_sem_failed: | |
849c7764 | 963 | mmput_async(mm); |
9b9d7cf1 | 964 | err_mmget: |
f73e8e76 SY |
965 | err_page_already_freed: |
966 | mutex_unlock(&alloc->mutex); | |
967 | err_get_alloc_mutex_failed: | |
968 | return LRU_SKIP; | |
969 | } | |
970 | ||
971 | static unsigned long | |
972 | binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
973 | { | |
974 | unsigned long ret = list_lru_count(&binder_alloc_lru); | |
975 | return ret; | |
976 | } | |
977 | ||
978 | static unsigned long | |
979 | binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
980 | { | |
981 | unsigned long ret; | |
982 | ||
983 | ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, | |
984 | NULL, sc->nr_to_scan); | |
985 | return ret; | |
986 | } | |
987 | ||
c9391ba6 | 988 | static struct shrinker binder_shrinker = { |
f73e8e76 SY |
989 | .count_objects = binder_shrink_count, |
990 | .scan_objects = binder_shrink_scan, | |
991 | .seeks = DEFAULT_SEEKS, | |
992 | }; | |
993 | ||
2324f70c TK |
994 | /** |
995 | * binder_alloc_init() - called by binder_open() for per-proc initialization | |
996 | * @alloc: binder_alloc for this proc | |
997 | * | |
998 | * Called from binder_open() to initialize binder_alloc fields for | |
999 | * new binder proc | |
1000 | */ | |
1001 | void binder_alloc_init(struct binder_alloc *alloc) | |
1002 | { | |
2324f70c TK |
1003 | alloc->pid = current->group_leader->pid; |
1004 | mutex_init(&alloc->mutex); | |
7a6d4b15 | 1005 | INIT_LIST_HEAD(&alloc->buffers); |
2324f70c TK |
1006 | } |
1007 | ||
5a068558 | 1008 | int binder_alloc_shrinker_init(void) |
f73e8e76 | 1009 | { |
5a068558 MB |
1010 | int ret = list_lru_init(&binder_alloc_lru); |
1011 | ||
1012 | if (ret == 0) { | |
1013 | ret = register_shrinker(&binder_shrinker); | |
1014 | if (ret) | |
1015 | list_lru_destroy(&binder_alloc_lru); | |
1016 | } | |
1017 | return ret; | |
f73e8e76 | 1018 | } |