Commit | Line | Data |
---|---|---|
c30707be | 1 | /* |
38eeeb51 | 2 | |
c30707be RSZ |
3 | * drivers/staging/android/ion/ion.c |
4 | * | |
5 | * Copyright (C) 2011 Google, Inc. | |
6 | * | |
7 | * This software is licensed under the terms of the GNU General Public | |
8 | * License version 2, as published by the Free Software Foundation, and | |
9 | * may be copied, distributed, and modified under those terms. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include <linux/device.h> | |
ab0c069a | 19 | #include <linux/err.h> |
c30707be | 20 | #include <linux/file.h> |
fe2faea7 | 21 | #include <linux/freezer.h> |
c30707be RSZ |
22 | #include <linux/fs.h> |
23 | #include <linux/anon_inodes.h> | |
fe2faea7 | 24 | #include <linux/kthread.h> |
c30707be | 25 | #include <linux/list.h> |
2991b7a0 | 26 | #include <linux/memblock.h> |
c30707be RSZ |
27 | #include <linux/miscdevice.h> |
28 | #include <linux/export.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/mm_types.h> | |
31 | #include <linux/rbtree.h> | |
c30707be RSZ |
32 | #include <linux/slab.h> |
33 | #include <linux/seq_file.h> | |
34 | #include <linux/uaccess.h> | |
c13bd1c4 | 35 | #include <linux/vmalloc.h> |
c30707be | 36 | #include <linux/debugfs.h> |
b892bf75 | 37 | #include <linux/dma-buf.h> |
47b40458 | 38 | #include <linux/idr.h> |
c30707be RSZ |
39 | |
40 | #include "ion.h" | |
41 | #include "ion_priv.h" | |
827c849e | 42 | #include "compat_ion.h" |
c30707be RSZ |
43 | |
44 | /** | |
45 | * struct ion_device - the metadata of the ion device node | |
46 | * @dev: the actual misc device | |
8d7ab9a9 RSZ |
47 | * @buffers: an rb tree of all the existing buffers |
48 | * @buffer_lock: lock protecting the tree of buffers | |
49 | * @lock: rwsem protecting the tree of heaps and clients | |
c30707be RSZ |
50 | * @heaps: list of all the heaps in the system |
51 | * @user_clients: list of all the clients created from userspace | |
52 | */ | |
53 | struct ion_device { | |
54 | struct miscdevice dev; | |
55 | struct rb_root buffers; | |
8d7ab9a9 RSZ |
56 | struct mutex buffer_lock; |
57 | struct rw_semaphore lock; | |
cd69488c | 58 | struct plist_head heaps; |
51108985 DY |
59 | long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, |
60 | unsigned long arg); | |
b892bf75 | 61 | struct rb_root clients; |
c30707be | 62 | struct dentry *debug_root; |
b08585fb MH |
63 | struct dentry *heaps_debug_root; |
64 | struct dentry *clients_debug_root; | |
c30707be RSZ |
65 | }; |
66 | ||
67 | /** | |
68 | * struct ion_client - a process/hw block local address space | |
c30707be RSZ |
69 | * @node: node in the tree of all clients |
70 | * @dev: backpointer to ion device | |
71 | * @handles: an rb tree of all the handles in this client | |
47b40458 | 72 | * @idr: an idr space for allocating handle ids |
c30707be | 73 | * @lock: lock protecting the tree of handles |
c30707be | 74 | * @name: used for debugging |
2803ac7b MH |
75 | * @display_name: used for debugging (unique version of @name) |
76 | * @display_serial: used for debugging (to make display_name unique) | |
c30707be RSZ |
77 | * @task: used for debugging |
78 | * | |
79 | * A client represents a list of buffers this client may access. | |
80 | * The mutex stored here is used to protect both handles tree | |
81 | * as well as the handles themselves, and should be held while modifying either. | |
82 | */ | |
83 | struct ion_client { | |
c30707be RSZ |
84 | struct rb_node node; |
85 | struct ion_device *dev; | |
86 | struct rb_root handles; | |
47b40458 | 87 | struct idr idr; |
c30707be | 88 | struct mutex lock; |
c30707be | 89 | const char *name; |
2803ac7b MH |
90 | char *display_name; |
91 | int display_serial; | |
c30707be RSZ |
92 | struct task_struct *task; |
93 | pid_t pid; | |
94 | struct dentry *debug_root; | |
95 | }; | |
96 | ||
97 | /** | |
98 | * ion_handle - a client local reference to a buffer | |
99 | * @ref: reference count | |
100 | * @client: back pointer to the client the buffer resides in | |
101 | * @buffer: pointer to the buffer | |
102 | * @node: node in the client's handle rbtree | |
103 | * @kmap_cnt: count of times this client has mapped to kernel | |
47b40458 | 104 | * @id: client-unique id allocated by client->idr |
c30707be RSZ |
105 | * |
106 | * Modifications to node, map_cnt or mapping should be protected by the | |
107 | * lock in the client. Other fields are never changed after initialization. | |
108 | */ | |
109 | struct ion_handle { | |
110 | struct kref ref; | |
111 | struct ion_client *client; | |
112 | struct ion_buffer *buffer; | |
113 | struct rb_node node; | |
114 | unsigned int kmap_cnt; | |
47b40458 | 115 | int id; |
c30707be RSZ |
116 | }; |
117 | ||
13ba7805 RSZ |
118 | bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) |
119 | { | |
e1d855b0 JS |
120 | return (buffer->flags & ION_FLAG_CACHED) && |
121 | !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); | |
13ba7805 RSZ |
122 | } |
123 | ||
45b17a80 RSZ |
124 | bool ion_buffer_cached(struct ion_buffer *buffer) |
125 | { | |
c13bd1c4 RSZ |
126 | return !!(buffer->flags & ION_FLAG_CACHED); |
127 | } | |
128 | ||
129 | static inline struct page *ion_buffer_page(struct page *page) | |
130 | { | |
131 | return (struct page *)((unsigned long)page & ~(1UL)); | |
132 | } | |
133 | ||
134 | static inline bool ion_buffer_page_is_dirty(struct page *page) | |
135 | { | |
136 | return !!((unsigned long)page & 1UL); | |
137 | } | |
138 | ||
139 | static inline void ion_buffer_page_dirty(struct page **page) | |
140 | { | |
141 | *page = (struct page *)((unsigned long)(*page) | 1UL); | |
142 | } | |
143 | ||
144 | static inline void ion_buffer_page_clean(struct page **page) | |
145 | { | |
146 | *page = (struct page *)((unsigned long)(*page) & ~(1UL)); | |
45b17a80 RSZ |
147 | } |
148 | ||
c30707be RSZ |
149 | /* this function should only be called while dev->lock is held */ |
150 | static void ion_buffer_add(struct ion_device *dev, | |
151 | struct ion_buffer *buffer) | |
152 | { | |
153 | struct rb_node **p = &dev->buffers.rb_node; | |
154 | struct rb_node *parent = NULL; | |
155 | struct ion_buffer *entry; | |
156 | ||
157 | while (*p) { | |
158 | parent = *p; | |
159 | entry = rb_entry(parent, struct ion_buffer, node); | |
160 | ||
161 | if (buffer < entry) { | |
162 | p = &(*p)->rb_left; | |
163 | } else if (buffer > entry) { | |
164 | p = &(*p)->rb_right; | |
165 | } else { | |
166 | pr_err("%s: buffer already found.", __func__); | |
167 | BUG(); | |
168 | } | |
169 | } | |
170 | ||
171 | rb_link_node(&buffer->node, parent, p); | |
172 | rb_insert_color(&buffer->node, &dev->buffers); | |
173 | } | |
174 | ||
175 | /* this function should only be called while dev->lock is held */ | |
176 | static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, | |
177 | struct ion_device *dev, | |
178 | unsigned long len, | |
179 | unsigned long align, | |
180 | unsigned long flags) | |
181 | { | |
182 | struct ion_buffer *buffer; | |
29ae6bc7 | 183 | struct sg_table *table; |
a46b6b2d RSZ |
184 | struct scatterlist *sg; |
185 | int i, ret; | |
c30707be RSZ |
186 | |
187 | buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); | |
188 | if (!buffer) | |
189 | return ERR_PTR(-ENOMEM); | |
190 | ||
191 | buffer->heap = heap; | |
13ba7805 | 192 | buffer->flags = flags; |
c30707be RSZ |
193 | kref_init(&buffer->ref); |
194 | ||
195 | ret = heap->ops->allocate(heap, buffer, len, align, flags); | |
fe2faea7 | 196 | |
c30707be | 197 | if (ret) { |
fe2faea7 RSZ |
198 | if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) |
199 | goto err2; | |
200 | ||
ea313b5f | 201 | ion_heap_freelist_drain(heap, 0); |
fe2faea7 RSZ |
202 | ret = heap->ops->allocate(heap, buffer, len, align, |
203 | flags); | |
204 | if (ret) | |
205 | goto err2; | |
c30707be | 206 | } |
29ae6bc7 | 207 | |
056be396 GH |
208 | buffer->dev = dev; |
209 | buffer->size = len; | |
210 | ||
56a7c185 | 211 | table = heap->ops->map_dma(heap, buffer); |
e1d855b0 JS |
212 | if (WARN_ONCE(table == NULL, |
213 | "heap->ops->map_dma should return ERR_PTR on error")) | |
9e907654 CC |
214 | table = ERR_PTR(-EINVAL); |
215 | if (IS_ERR(table)) { | |
29ae6bc7 RSZ |
216 | heap->ops->free(buffer); |
217 | kfree(buffer); | |
464a5028 | 218 | return ERR_CAST(table); |
29ae6bc7 RSZ |
219 | } |
220 | buffer->sg_table = table; | |
13ba7805 | 221 | if (ion_buffer_fault_user_mappings(buffer)) { |
c13bd1c4 RSZ |
222 | int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
223 | struct scatterlist *sg; | |
224 | int i, j, k = 0; | |
225 | ||
226 | buffer->pages = vmalloc(sizeof(struct page *) * num_pages); | |
227 | if (!buffer->pages) { | |
228 | ret = -ENOMEM; | |
229 | goto err1; | |
230 | } | |
231 | ||
232 | for_each_sg(table->sgl, sg, table->nents, i) { | |
233 | struct page *page = sg_page(sg); | |
234 | ||
06e0dcae | 235 | for (j = 0; j < sg->length / PAGE_SIZE; j++) |
c13bd1c4 | 236 | buffer->pages[k++] = page++; |
56a7c185 | 237 | } |
29ae6bc7 | 238 | |
d3c0bced RSZ |
239 | if (ret) |
240 | goto err; | |
56a7c185 RSZ |
241 | } |
242 | ||
243 | buffer->dev = dev; | |
244 | buffer->size = len; | |
245 | INIT_LIST_HEAD(&buffer->vmas); | |
c30707be | 246 | mutex_init(&buffer->lock); |
a46b6b2d RSZ |
247 | /* this will set up dma addresses for the sglist -- it is not |
248 | technically correct as per the dma api -- a specific | |
249 | device isn't really taking ownership here. However, in practice on | |
250 | our systems the only dma_address space is physical addresses. | |
251 | Additionally, we can't afford the overhead of invalidating every | |
252 | allocation via dma_map_sg. The implicit contract here is that | |
253 | memory comming from the heaps is ready for dma, ie if it has a | |
254 | cached mapping that mapping has been invalidated */ | |
255 | for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) | |
256 | sg_dma_address(sg) = sg_phys(sg); | |
8d7ab9a9 | 257 | mutex_lock(&dev->buffer_lock); |
c30707be | 258 | ion_buffer_add(dev, buffer); |
8d7ab9a9 | 259 | mutex_unlock(&dev->buffer_lock); |
c30707be | 260 | return buffer; |
d3c0bced RSZ |
261 | |
262 | err: | |
263 | heap->ops->unmap_dma(heap, buffer); | |
264 | heap->ops->free(buffer); | |
c13bd1c4 RSZ |
265 | err1: |
266 | if (buffer->pages) | |
267 | vfree(buffer->pages); | |
fe2faea7 | 268 | err2: |
d3c0bced RSZ |
269 | kfree(buffer); |
270 | return ERR_PTR(ret); | |
c30707be RSZ |
271 | } |
272 | ||
ea313b5f | 273 | void ion_buffer_destroy(struct ion_buffer *buffer) |
c30707be | 274 | { |
54ac0784 KC |
275 | if (WARN_ON(buffer->kmap_cnt > 0)) |
276 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
29ae6bc7 | 277 | buffer->heap->ops->unmap_dma(buffer->heap, buffer); |
c30707be | 278 | buffer->heap->ops->free(buffer); |
c13bd1c4 RSZ |
279 | if (buffer->pages) |
280 | vfree(buffer->pages); | |
c30707be RSZ |
281 | kfree(buffer); |
282 | } | |
283 | ||
ea313b5f | 284 | static void _ion_buffer_destroy(struct kref *kref) |
fe2faea7 RSZ |
285 | { |
286 | struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); | |
287 | struct ion_heap *heap = buffer->heap; | |
288 | struct ion_device *dev = buffer->dev; | |
289 | ||
290 | mutex_lock(&dev->buffer_lock); | |
291 | rb_erase(&buffer->node, &dev->buffers); | |
292 | mutex_unlock(&dev->buffer_lock); | |
293 | ||
ea313b5f RSZ |
294 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
295 | ion_heap_freelist_add(heap, buffer); | |
296 | else | |
297 | ion_buffer_destroy(buffer); | |
fe2faea7 RSZ |
298 | } |
299 | ||
c30707be RSZ |
300 | static void ion_buffer_get(struct ion_buffer *buffer) |
301 | { | |
302 | kref_get(&buffer->ref); | |
303 | } | |
304 | ||
305 | static int ion_buffer_put(struct ion_buffer *buffer) | |
306 | { | |
ea313b5f | 307 | return kref_put(&buffer->ref, _ion_buffer_destroy); |
c30707be RSZ |
308 | } |
309 | ||
5ad7bc3a RSZ |
310 | static void ion_buffer_add_to_handle(struct ion_buffer *buffer) |
311 | { | |
8d7ab9a9 | 312 | mutex_lock(&buffer->lock); |
5ad7bc3a | 313 | buffer->handle_count++; |
8d7ab9a9 | 314 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
315 | } |
316 | ||
317 | static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) | |
318 | { | |
319 | /* | |
320 | * when a buffer is removed from a handle, if it is not in | |
321 | * any other handles, copy the taskcomm and the pid of the | |
322 | * process it's being removed from into the buffer. At this | |
323 | * point there will be no way to track what processes this buffer is | |
324 | * being used by, it only exists as a dma_buf file descriptor. | |
325 | * The taskcomm and pid can provide a debug hint as to where this fd | |
326 | * is in the system | |
327 | */ | |
8d7ab9a9 | 328 | mutex_lock(&buffer->lock); |
5ad7bc3a RSZ |
329 | buffer->handle_count--; |
330 | BUG_ON(buffer->handle_count < 0); | |
331 | if (!buffer->handle_count) { | |
332 | struct task_struct *task; | |
333 | ||
334 | task = current->group_leader; | |
335 | get_task_comm(buffer->task_comm, task); | |
336 | buffer->pid = task_pid_nr(task); | |
337 | } | |
8d7ab9a9 | 338 | mutex_unlock(&buffer->lock); |
5ad7bc3a RSZ |
339 | } |
340 | ||
c30707be RSZ |
341 | static struct ion_handle *ion_handle_create(struct ion_client *client, |
342 | struct ion_buffer *buffer) | |
343 | { | |
344 | struct ion_handle *handle; | |
345 | ||
346 | handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); | |
347 | if (!handle) | |
348 | return ERR_PTR(-ENOMEM); | |
349 | kref_init(&handle->ref); | |
350 | RB_CLEAR_NODE(&handle->node); | |
351 | handle->client = client; | |
352 | ion_buffer_get(buffer); | |
5ad7bc3a | 353 | ion_buffer_add_to_handle(buffer); |
c30707be RSZ |
354 | handle->buffer = buffer; |
355 | ||
356 | return handle; | |
357 | } | |
358 | ||
b892bf75 RSZ |
359 | static void ion_handle_kmap_put(struct ion_handle *); |
360 | ||
c30707be RSZ |
361 | static void ion_handle_destroy(struct kref *kref) |
362 | { | |
363 | struct ion_handle *handle = container_of(kref, struct ion_handle, ref); | |
b892bf75 RSZ |
364 | struct ion_client *client = handle->client; |
365 | struct ion_buffer *buffer = handle->buffer; | |
366 | ||
b892bf75 | 367 | mutex_lock(&buffer->lock); |
2900cd76 | 368 | while (handle->kmap_cnt) |
b892bf75 RSZ |
369 | ion_handle_kmap_put(handle); |
370 | mutex_unlock(&buffer->lock); | |
371 | ||
47b40458 | 372 | idr_remove(&client->idr, handle->id); |
c30707be | 373 | if (!RB_EMPTY_NODE(&handle->node)) |
b892bf75 | 374 | rb_erase(&handle->node, &client->handles); |
b892bf75 | 375 | |
5ad7bc3a | 376 | ion_buffer_remove_from_handle(buffer); |
b892bf75 | 377 | ion_buffer_put(buffer); |
5ad7bc3a | 378 | |
c30707be RSZ |
379 | kfree(handle); |
380 | } | |
381 | ||
382 | struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) | |
383 | { | |
384 | return handle->buffer; | |
385 | } | |
386 | ||
387 | static void ion_handle_get(struct ion_handle *handle) | |
388 | { | |
389 | kref_get(&handle->ref); | |
390 | } | |
391 | ||
392 | static int ion_handle_put(struct ion_handle *handle) | |
393 | { | |
83271f62 CC |
394 | struct ion_client *client = handle->client; |
395 | int ret; | |
396 | ||
397 | mutex_lock(&client->lock); | |
398 | ret = kref_put(&handle->ref, ion_handle_destroy); | |
399 | mutex_unlock(&client->lock); | |
400 | ||
401 | return ret; | |
c30707be RSZ |
402 | } |
403 | ||
404 | static struct ion_handle *ion_handle_lookup(struct ion_client *client, | |
405 | struct ion_buffer *buffer) | |
406 | { | |
e1cf3682 CC |
407 | struct rb_node *n = client->handles.rb_node; |
408 | ||
409 | while (n) { | |
410 | struct ion_handle *entry = rb_entry(n, struct ion_handle, node); | |
10f62861 | 411 | |
e1cf3682 CC |
412 | if (buffer < entry->buffer) |
413 | n = n->rb_left; | |
414 | else if (buffer > entry->buffer) | |
415 | n = n->rb_right; | |
416 | else | |
417 | return entry; | |
c30707be | 418 | } |
9e907654 | 419 | return ERR_PTR(-EINVAL); |
c30707be RSZ |
420 | } |
421 | ||
83271f62 CC |
422 | static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, |
423 | int id) | |
47b40458 | 424 | { |
83271f62 CC |
425 | struct ion_handle *handle; |
426 | ||
427 | mutex_lock(&client->lock); | |
428 | handle = idr_find(&client->idr, id); | |
429 | if (handle) | |
430 | ion_handle_get(handle); | |
431 | mutex_unlock(&client->lock); | |
432 | ||
433 | return handle ? handle : ERR_PTR(-EINVAL); | |
47b40458 CC |
434 | } |
435 | ||
e1d855b0 JS |
436 | static bool ion_handle_validate(struct ion_client *client, |
437 | struct ion_handle *handle) | |
c30707be | 438 | { |
83271f62 | 439 | WARN_ON(!mutex_is_locked(&client->lock)); |
51108985 | 440 | return idr_find(&client->idr, handle->id) == handle; |
c30707be RSZ |
441 | } |
442 | ||
47b40458 | 443 | static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) |
c30707be | 444 | { |
b26661d1 | 445 | int id; |
c30707be RSZ |
446 | struct rb_node **p = &client->handles.rb_node; |
447 | struct rb_node *parent = NULL; | |
448 | struct ion_handle *entry; | |
449 | ||
b26661d1 CC |
450 | id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); |
451 | if (id < 0) | |
452 | return id; | |
47b40458 | 453 | |
b26661d1 | 454 | handle->id = id; |
47b40458 | 455 | |
c30707be RSZ |
456 | while (*p) { |
457 | parent = *p; | |
458 | entry = rb_entry(parent, struct ion_handle, node); | |
459 | ||
e1cf3682 | 460 | if (handle->buffer < entry->buffer) |
c30707be | 461 | p = &(*p)->rb_left; |
e1cf3682 | 462 | else if (handle->buffer > entry->buffer) |
c30707be RSZ |
463 | p = &(*p)->rb_right; |
464 | else | |
465 | WARN(1, "%s: buffer already found.", __func__); | |
466 | } | |
467 | ||
468 | rb_link_node(&handle->node, parent, p); | |
469 | rb_insert_color(&handle->node, &client->handles); | |
47b40458 CC |
470 | |
471 | return 0; | |
c30707be RSZ |
472 | } |
473 | ||
474 | struct ion_handle *ion_alloc(struct ion_client *client, size_t len, | |
38eeeb51 | 475 | size_t align, unsigned int heap_id_mask, |
56a7c185 | 476 | unsigned int flags) |
c30707be | 477 | { |
c30707be RSZ |
478 | struct ion_handle *handle; |
479 | struct ion_device *dev = client->dev; | |
480 | struct ion_buffer *buffer = NULL; | |
cd69488c | 481 | struct ion_heap *heap; |
47b40458 | 482 | int ret; |
c30707be | 483 | |
e61fc915 | 484 | pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, |
38eeeb51 | 485 | len, align, heap_id_mask, flags); |
c30707be RSZ |
486 | /* |
487 | * traverse the list of heaps available in this system in priority | |
488 | * order. If the heap type is supported by the client, and matches the | |
489 | * request of the caller allocate from it. Repeat until allocate has | |
490 | * succeeded or all heaps have been tried | |
491 | */ | |
54ac0784 KC |
492 | len = PAGE_ALIGN(len); |
493 | ||
a14baf71 CC |
494 | if (!len) |
495 | return ERR_PTR(-EINVAL); | |
496 | ||
8d7ab9a9 | 497 | down_read(&dev->lock); |
cd69488c | 498 | plist_for_each_entry(heap, &dev->heaps, node) { |
38eeeb51 RSZ |
499 | /* if the caller didn't specify this heap id */ |
500 | if (!((1 << heap->id) & heap_id_mask)) | |
c30707be RSZ |
501 | continue; |
502 | buffer = ion_buffer_create(heap, dev, len, align, flags); | |
9e907654 | 503 | if (!IS_ERR(buffer)) |
c30707be RSZ |
504 | break; |
505 | } | |
8d7ab9a9 | 506 | up_read(&dev->lock); |
c30707be | 507 | |
54ac0784 KC |
508 | if (buffer == NULL) |
509 | return ERR_PTR(-ENODEV); | |
510 | ||
511 | if (IS_ERR(buffer)) | |
464a5028 | 512 | return ERR_CAST(buffer); |
c30707be RSZ |
513 | |
514 | handle = ion_handle_create(client, buffer); | |
515 | ||
c30707be RSZ |
516 | /* |
517 | * ion_buffer_create will create a buffer with a ref_cnt of 1, | |
518 | * and ion_handle_create will take a second reference, drop one here | |
519 | */ | |
520 | ion_buffer_put(buffer); | |
521 | ||
47b40458 CC |
522 | if (IS_ERR(handle)) |
523 | return handle; | |
c30707be | 524 | |
47b40458 CC |
525 | mutex_lock(&client->lock); |
526 | ret = ion_handle_add(client, handle); | |
83271f62 | 527 | mutex_unlock(&client->lock); |
47b40458 CC |
528 | if (ret) { |
529 | ion_handle_put(handle); | |
530 | handle = ERR_PTR(ret); | |
531 | } | |
29ae6bc7 | 532 | |
c30707be RSZ |
533 | return handle; |
534 | } | |
ee4c8aa9 | 535 | EXPORT_SYMBOL(ion_alloc); |
c30707be RSZ |
536 | |
537 | void ion_free(struct ion_client *client, struct ion_handle *handle) | |
538 | { | |
539 | bool valid_handle; | |
540 | ||
541 | BUG_ON(client != handle->client); | |
542 | ||
543 | mutex_lock(&client->lock); | |
544 | valid_handle = ion_handle_validate(client, handle); | |
c30707be RSZ |
545 | |
546 | if (!valid_handle) { | |
a9bb075d | 547 | WARN(1, "%s: invalid handle passed to free.\n", __func__); |
37bdbf00 | 548 | mutex_unlock(&client->lock); |
c30707be RSZ |
549 | return; |
550 | } | |
0e9c03a5 | 551 | mutex_unlock(&client->lock); |
83271f62 | 552 | ion_handle_put(handle); |
c30707be | 553 | } |
ee4c8aa9 | 554 | EXPORT_SYMBOL(ion_free); |
c30707be | 555 | |
c30707be RSZ |
556 | int ion_phys(struct ion_client *client, struct ion_handle *handle, |
557 | ion_phys_addr_t *addr, size_t *len) | |
558 | { | |
559 | struct ion_buffer *buffer; | |
560 | int ret; | |
561 | ||
562 | mutex_lock(&client->lock); | |
563 | if (!ion_handle_validate(client, handle)) { | |
564 | mutex_unlock(&client->lock); | |
565 | return -EINVAL; | |
566 | } | |
567 | ||
568 | buffer = handle->buffer; | |
569 | ||
570 | if (!buffer->heap->ops->phys) { | |
571 | pr_err("%s: ion_phys is not implemented by this heap.\n", | |
572 | __func__); | |
573 | mutex_unlock(&client->lock); | |
574 | return -ENODEV; | |
575 | } | |
576 | mutex_unlock(&client->lock); | |
577 | ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); | |
578 | return ret; | |
579 | } | |
ee4c8aa9 | 580 | EXPORT_SYMBOL(ion_phys); |
c30707be | 581 | |
0f34faf8 RSZ |
582 | static void *ion_buffer_kmap_get(struct ion_buffer *buffer) |
583 | { | |
584 | void *vaddr; | |
585 | ||
586 | if (buffer->kmap_cnt) { | |
587 | buffer->kmap_cnt++; | |
588 | return buffer->vaddr; | |
589 | } | |
590 | vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); | |
e1d855b0 JS |
591 | if (WARN_ONCE(vaddr == NULL, |
592 | "heap->ops->map_kernel should return ERR_PTR on error")) | |
9e907654 CC |
593 | return ERR_PTR(-EINVAL); |
594 | if (IS_ERR(vaddr)) | |
0f34faf8 RSZ |
595 | return vaddr; |
596 | buffer->vaddr = vaddr; | |
597 | buffer->kmap_cnt++; | |
598 | return vaddr; | |
599 | } | |
600 | ||
b892bf75 | 601 | static void *ion_handle_kmap_get(struct ion_handle *handle) |
c30707be | 602 | { |
b892bf75 | 603 | struct ion_buffer *buffer = handle->buffer; |
c30707be RSZ |
604 | void *vaddr; |
605 | ||
b892bf75 RSZ |
606 | if (handle->kmap_cnt) { |
607 | handle->kmap_cnt++; | |
608 | return buffer->vaddr; | |
c30707be | 609 | } |
0f34faf8 | 610 | vaddr = ion_buffer_kmap_get(buffer); |
9e907654 | 611 | if (IS_ERR(vaddr)) |
b892bf75 | 612 | return vaddr; |
b892bf75 | 613 | handle->kmap_cnt++; |
b892bf75 RSZ |
614 | return vaddr; |
615 | } | |
c30707be | 616 | |
0f34faf8 RSZ |
617 | static void ion_buffer_kmap_put(struct ion_buffer *buffer) |
618 | { | |
619 | buffer->kmap_cnt--; | |
620 | if (!buffer->kmap_cnt) { | |
621 | buffer->heap->ops->unmap_kernel(buffer->heap, buffer); | |
622 | buffer->vaddr = NULL; | |
623 | } | |
624 | } | |
625 | ||
b892bf75 RSZ |
626 | static void ion_handle_kmap_put(struct ion_handle *handle) |
627 | { | |
628 | struct ion_buffer *buffer = handle->buffer; | |
629 | ||
630 | handle->kmap_cnt--; | |
631 | if (!handle->kmap_cnt) | |
0f34faf8 | 632 | ion_buffer_kmap_put(buffer); |
c30707be RSZ |
633 | } |
634 | ||
b892bf75 | 635 | void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
636 | { |
637 | struct ion_buffer *buffer; | |
b892bf75 | 638 | void *vaddr; |
c30707be RSZ |
639 | |
640 | mutex_lock(&client->lock); | |
641 | if (!ion_handle_validate(client, handle)) { | |
b892bf75 | 642 | pr_err("%s: invalid handle passed to map_kernel.\n", |
c30707be RSZ |
643 | __func__); |
644 | mutex_unlock(&client->lock); | |
645 | return ERR_PTR(-EINVAL); | |
646 | } | |
b892bf75 | 647 | |
c30707be | 648 | buffer = handle->buffer; |
c30707be | 649 | |
b892bf75 | 650 | if (!handle->buffer->heap->ops->map_kernel) { |
c30707be RSZ |
651 | pr_err("%s: map_kernel is not implemented by this heap.\n", |
652 | __func__); | |
c30707be RSZ |
653 | mutex_unlock(&client->lock); |
654 | return ERR_PTR(-ENODEV); | |
655 | } | |
c30707be | 656 | |
c30707be | 657 | mutex_lock(&buffer->lock); |
b892bf75 | 658 | vaddr = ion_handle_kmap_get(handle); |
c30707be RSZ |
659 | mutex_unlock(&buffer->lock); |
660 | mutex_unlock(&client->lock); | |
b892bf75 | 661 | return vaddr; |
c30707be | 662 | } |
ee4c8aa9 | 663 | EXPORT_SYMBOL(ion_map_kernel); |
c30707be | 664 | |
b892bf75 | 665 | void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) |
c30707be RSZ |
666 | { |
667 | struct ion_buffer *buffer; | |
668 | ||
669 | mutex_lock(&client->lock); | |
670 | buffer = handle->buffer; | |
671 | mutex_lock(&buffer->lock); | |
b892bf75 | 672 | ion_handle_kmap_put(handle); |
c30707be RSZ |
673 | mutex_unlock(&buffer->lock); |
674 | mutex_unlock(&client->lock); | |
675 | } | |
ee4c8aa9 | 676 | EXPORT_SYMBOL(ion_unmap_kernel); |
c30707be | 677 | |
c30707be RSZ |
678 | static int ion_debug_client_show(struct seq_file *s, void *unused) |
679 | { | |
680 | struct ion_client *client = s->private; | |
681 | struct rb_node *n; | |
38eeeb51 | 682 | size_t sizes[ION_NUM_HEAP_IDS] = {0}; |
f63958d8 | 683 | const char *names[ION_NUM_HEAP_IDS] = {NULL}; |
c30707be RSZ |
684 | int i; |
685 | ||
686 | mutex_lock(&client->lock); | |
687 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
688 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
689 | node); | |
38eeeb51 | 690 | unsigned int id = handle->buffer->heap->id; |
c30707be | 691 | |
38eeeb51 RSZ |
692 | if (!names[id]) |
693 | names[id] = handle->buffer->heap->name; | |
694 | sizes[id] += handle->buffer->size; | |
c30707be RSZ |
695 | } |
696 | mutex_unlock(&client->lock); | |
697 | ||
698 | seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); | |
38eeeb51 | 699 | for (i = 0; i < ION_NUM_HEAP_IDS; i++) { |
c30707be RSZ |
700 | if (!names[i]) |
701 | continue; | |
e61fc915 | 702 | seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); |
c30707be RSZ |
703 | } |
704 | return 0; | |
705 | } | |
706 | ||
707 | static int ion_debug_client_open(struct inode *inode, struct file *file) | |
708 | { | |
709 | return single_open(file, ion_debug_client_show, inode->i_private); | |
710 | } | |
711 | ||
712 | static const struct file_operations debug_client_fops = { | |
713 | .open = ion_debug_client_open, | |
714 | .read = seq_read, | |
715 | .llseek = seq_lseek, | |
716 | .release = single_release, | |
717 | }; | |
718 | ||
2803ac7b MH |
719 | static int ion_get_client_serial(const struct rb_root *root, |
720 | const unsigned char *name) | |
721 | { | |
722 | int serial = -1; | |
723 | struct rb_node *node; | |
10f62861 | 724 | |
2803ac7b MH |
725 | for (node = rb_first(root); node; node = rb_next(node)) { |
726 | struct ion_client *client = rb_entry(node, struct ion_client, | |
727 | node); | |
10f62861 | 728 | |
2803ac7b MH |
729 | if (strcmp(client->name, name)) |
730 | continue; | |
731 | serial = max(serial, client->display_serial); | |
732 | } | |
733 | return serial + 1; | |
734 | } | |
735 | ||
c30707be | 736 | struct ion_client *ion_client_create(struct ion_device *dev, |
c30707be RSZ |
737 | const char *name) |
738 | { | |
739 | struct ion_client *client; | |
740 | struct task_struct *task; | |
741 | struct rb_node **p; | |
742 | struct rb_node *parent = NULL; | |
743 | struct ion_client *entry; | |
c30707be RSZ |
744 | pid_t pid; |
745 | ||
2803ac7b MH |
746 | if (!name) { |
747 | pr_err("%s: Name cannot be null\n", __func__); | |
748 | return ERR_PTR(-EINVAL); | |
749 | } | |
750 | ||
c30707be RSZ |
751 | get_task_struct(current->group_leader); |
752 | task_lock(current->group_leader); | |
753 | pid = task_pid_nr(current->group_leader); | |
754 | /* don't bother to store task struct for kernel threads, | |
755 | they can't be killed anyway */ | |
756 | if (current->group_leader->flags & PF_KTHREAD) { | |
757 | put_task_struct(current->group_leader); | |
758 | task = NULL; | |
759 | } else { | |
760 | task = current->group_leader; | |
761 | } | |
762 | task_unlock(current->group_leader); | |
763 | ||
c30707be | 764 | client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); |
ae5cbf4a MH |
765 | if (!client) |
766 | goto err_put_task_struct; | |
c30707be RSZ |
767 | |
768 | client->dev = dev; | |
769 | client->handles = RB_ROOT; | |
47b40458 | 770 | idr_init(&client->idr); |
c30707be | 771 | mutex_init(&client->lock); |
c30707be RSZ |
772 | client->task = task; |
773 | client->pid = pid; | |
ae5cbf4a MH |
774 | client->name = kstrdup(name, GFP_KERNEL); |
775 | if (!client->name) | |
776 | goto err_free_client; | |
c30707be | 777 | |
8d7ab9a9 | 778 | down_write(&dev->lock); |
2803ac7b MH |
779 | client->display_serial = ion_get_client_serial(&dev->clients, name); |
780 | client->display_name = kasprintf( | |
781 | GFP_KERNEL, "%s-%d", name, client->display_serial); | |
782 | if (!client->display_name) { | |
783 | up_write(&dev->lock); | |
784 | goto err_free_client_name; | |
785 | } | |
b892bf75 RSZ |
786 | p = &dev->clients.rb_node; |
787 | while (*p) { | |
788 | parent = *p; | |
789 | entry = rb_entry(parent, struct ion_client, node); | |
790 | ||
791 | if (client < entry) | |
792 | p = &(*p)->rb_left; | |
793 | else if (client > entry) | |
794 | p = &(*p)->rb_right; | |
c30707be | 795 | } |
b892bf75 RSZ |
796 | rb_link_node(&client->node, parent, p); |
797 | rb_insert_color(&client->node, &dev->clients); | |
c30707be | 798 | |
2803ac7b | 799 | client->debug_root = debugfs_create_file(client->display_name, 0664, |
b08585fb MH |
800 | dev->clients_debug_root, |
801 | client, &debug_client_fops); | |
802 | if (!client->debug_root) { | |
803 | char buf[256], *path; | |
804 | path = dentry_path(dev->clients_debug_root, buf, 256); | |
805 | pr_err("Failed to create client debugfs at %s/%s\n", | |
2803ac7b | 806 | path, client->display_name); |
b08585fb MH |
807 | } |
808 | ||
8d7ab9a9 | 809 | up_write(&dev->lock); |
c30707be RSZ |
810 | |
811 | return client; | |
ae5cbf4a | 812 | |
2803ac7b MH |
813 | err_free_client_name: |
814 | kfree(client->name); | |
ae5cbf4a MH |
815 | err_free_client: |
816 | kfree(client); | |
817 | err_put_task_struct: | |
818 | if (task) | |
819 | put_task_struct(current->group_leader); | |
820 | return ERR_PTR(-ENOMEM); | |
c30707be | 821 | } |
9122fe86 | 822 | EXPORT_SYMBOL(ion_client_create); |
c30707be | 823 | |
b892bf75 | 824 | void ion_client_destroy(struct ion_client *client) |
c30707be | 825 | { |
c30707be RSZ |
826 | struct ion_device *dev = client->dev; |
827 | struct rb_node *n; | |
828 | ||
829 | pr_debug("%s: %d\n", __func__, __LINE__); | |
830 | while ((n = rb_first(&client->handles))) { | |
831 | struct ion_handle *handle = rb_entry(n, struct ion_handle, | |
832 | node); | |
833 | ion_handle_destroy(&handle->ref); | |
834 | } | |
47b40458 | 835 | |
47b40458 CC |
836 | idr_destroy(&client->idr); |
837 | ||
8d7ab9a9 | 838 | down_write(&dev->lock); |
b892bf75 | 839 | if (client->task) |
c30707be | 840 | put_task_struct(client->task); |
b892bf75 | 841 | rb_erase(&client->node, &dev->clients); |
c30707be | 842 | debugfs_remove_recursive(client->debug_root); |
8d7ab9a9 | 843 | up_write(&dev->lock); |
c30707be | 844 | |
2803ac7b | 845 | kfree(client->display_name); |
ae5cbf4a | 846 | kfree(client->name); |
c30707be RSZ |
847 | kfree(client); |
848 | } | |
ee4c8aa9 | 849 | EXPORT_SYMBOL(ion_client_destroy); |
c30707be | 850 | |
ce1f147a RSZ |
851 | struct sg_table *ion_sg_table(struct ion_client *client, |
852 | struct ion_handle *handle) | |
c30707be | 853 | { |
29ae6bc7 | 854 | struct ion_buffer *buffer; |
b892bf75 | 855 | struct sg_table *table; |
c30707be | 856 | |
29ae6bc7 RSZ |
857 | mutex_lock(&client->lock); |
858 | if (!ion_handle_validate(client, handle)) { | |
859 | pr_err("%s: invalid handle passed to map_dma.\n", | |
b892bf75 | 860 | __func__); |
29ae6bc7 RSZ |
861 | mutex_unlock(&client->lock); |
862 | return ERR_PTR(-EINVAL); | |
54ac0784 | 863 | } |
29ae6bc7 RSZ |
864 | buffer = handle->buffer; |
865 | table = buffer->sg_table; | |
866 | mutex_unlock(&client->lock); | |
b892bf75 | 867 | return table; |
c30707be | 868 | } |
ee4c8aa9 | 869 | EXPORT_SYMBOL(ion_sg_table); |
c30707be | 870 | |
56a7c185 RSZ |
871 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, |
872 | struct device *dev, | |
873 | enum dma_data_direction direction); | |
874 | ||
29ae6bc7 RSZ |
875 | static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, |
876 | enum dma_data_direction direction) | |
c30707be | 877 | { |
b892bf75 RSZ |
878 | struct dma_buf *dmabuf = attachment->dmabuf; |
879 | struct ion_buffer *buffer = dmabuf->priv; | |
c30707be | 880 | |
0b9ec1cf | 881 | ion_buffer_sync_for_device(buffer, attachment->dev, direction); |
29ae6bc7 RSZ |
882 | return buffer->sg_table; |
883 | } | |
884 | ||
885 | static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, | |
886 | struct sg_table *table, | |
887 | enum dma_data_direction direction) | |
888 | { | |
c30707be RSZ |
889 | } |
890 | ||
e946b209 CC |
891 | void ion_pages_sync_for_device(struct device *dev, struct page *page, |
892 | size_t size, enum dma_data_direction dir) | |
893 | { | |
894 | struct scatterlist sg; | |
895 | ||
896 | sg_init_table(&sg, 1); | |
897 | sg_set_page(&sg, page, size, 0); | |
898 | /* | |
899 | * This is not correct - sg_dma_address needs a dma_addr_t that is valid | |
900 | * for the the targeted device, but this works on the currently targeted | |
901 | * hardware. | |
902 | */ | |
903 | sg_dma_address(&sg) = page_to_phys(page); | |
904 | dma_sync_sg_for_device(dev, &sg, 1, dir); | |
905 | } | |
906 | ||
56a7c185 RSZ |
907 | struct ion_vma_list { |
908 | struct list_head list; | |
909 | struct vm_area_struct *vma; | |
910 | }; | |
911 | ||
912 | static void ion_buffer_sync_for_device(struct ion_buffer *buffer, | |
913 | struct device *dev, | |
914 | enum dma_data_direction dir) | |
915 | { | |
56a7c185 | 916 | struct ion_vma_list *vma_list; |
c13bd1c4 RSZ |
917 | int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; |
918 | int i; | |
56a7c185 RSZ |
919 | |
920 | pr_debug("%s: syncing for device %s\n", __func__, | |
921 | dev ? dev_name(dev) : "null"); | |
0b9ec1cf | 922 | |
13ba7805 | 923 | if (!ion_buffer_fault_user_mappings(buffer)) |
0b9ec1cf RSZ |
924 | return; |
925 | ||
56a7c185 | 926 | mutex_lock(&buffer->lock); |
c13bd1c4 RSZ |
927 | for (i = 0; i < pages; i++) { |
928 | struct page *page = buffer->pages[i]; | |
929 | ||
930 | if (ion_buffer_page_is_dirty(page)) | |
e946b209 CC |
931 | ion_pages_sync_for_device(dev, ion_buffer_page(page), |
932 | PAGE_SIZE, dir); | |
933 | ||
c13bd1c4 | 934 | ion_buffer_page_clean(buffer->pages + i); |
56a7c185 RSZ |
935 | } |
936 | list_for_each_entry(vma_list, &buffer->vmas, list) { | |
937 | struct vm_area_struct *vma = vma_list->vma; | |
938 | ||
939 | zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, | |
940 | NULL); | |
941 | } | |
942 | mutex_unlock(&buffer->lock); | |
943 | } | |
944 | ||
f63958d8 | 945 | static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
56a7c185 RSZ |
946 | { |
947 | struct ion_buffer *buffer = vma->vm_private_data; | |
462be0c6 | 948 | unsigned long pfn; |
c13bd1c4 | 949 | int ret; |
56a7c185 RSZ |
950 | |
951 | mutex_lock(&buffer->lock); | |
c13bd1c4 | 952 | ion_buffer_page_dirty(buffer->pages + vmf->pgoff); |
c13bd1c4 | 953 | BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); |
462be0c6 CC |
954 | |
955 | pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); | |
956 | ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); | |
56a7c185 | 957 | mutex_unlock(&buffer->lock); |
c13bd1c4 RSZ |
958 | if (ret) |
959 | return VM_FAULT_ERROR; | |
960 | ||
56a7c185 RSZ |
961 | return VM_FAULT_NOPAGE; |
962 | } | |
963 | ||
964 | static void ion_vm_open(struct vm_area_struct *vma) | |
965 | { | |
966 | struct ion_buffer *buffer = vma->vm_private_data; | |
967 | struct ion_vma_list *vma_list; | |
968 | ||
969 | vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); | |
970 | if (!vma_list) | |
971 | return; | |
972 | vma_list->vma = vma; | |
973 | mutex_lock(&buffer->lock); | |
974 | list_add(&vma_list->list, &buffer->vmas); | |
975 | mutex_unlock(&buffer->lock); | |
976 | pr_debug("%s: adding %p\n", __func__, vma); | |
977 | } | |
978 | ||
979 | static void ion_vm_close(struct vm_area_struct *vma) | |
980 | { | |
981 | struct ion_buffer *buffer = vma->vm_private_data; | |
982 | struct ion_vma_list *vma_list, *tmp; | |
983 | ||
984 | pr_debug("%s\n", __func__); | |
985 | mutex_lock(&buffer->lock); | |
986 | list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { | |
987 | if (vma_list->vma != vma) | |
988 | continue; | |
989 | list_del(&vma_list->list); | |
990 | kfree(vma_list); | |
991 | pr_debug("%s: deleting %p\n", __func__, vma); | |
992 | break; | |
993 | } | |
994 | mutex_unlock(&buffer->lock); | |
995 | } | |
996 | ||
f63958d8 | 997 | static struct vm_operations_struct ion_vma_ops = { |
56a7c185 RSZ |
998 | .open = ion_vm_open, |
999 | .close = ion_vm_close, | |
1000 | .fault = ion_vm_fault, | |
1001 | }; | |
1002 | ||
b892bf75 | 1003 | static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) |
c30707be | 1004 | { |
b892bf75 | 1005 | struct ion_buffer *buffer = dmabuf->priv; |
56a7c185 | 1006 | int ret = 0; |
c30707be | 1007 | |
b892bf75 | 1008 | if (!buffer->heap->ops->map_user) { |
7287bb52 IM |
1009 | pr_err("%s: this heap does not define a method for mapping to userspace\n", |
1010 | __func__); | |
b892bf75 | 1011 | return -EINVAL; |
c30707be RSZ |
1012 | } |
1013 | ||
13ba7805 | 1014 | if (ion_buffer_fault_user_mappings(buffer)) { |
462be0c6 CC |
1015 | vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | |
1016 | VM_DONTDUMP; | |
56a7c185 RSZ |
1017 | vma->vm_private_data = buffer; |
1018 | vma->vm_ops = &ion_vma_ops; | |
1019 | ion_vm_open(vma); | |
856661d5 | 1020 | return 0; |
56a7c185 | 1021 | } |
b892bf75 | 1022 | |
856661d5 RSZ |
1023 | if (!(buffer->flags & ION_FLAG_CACHED)) |
1024 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | |
1025 | ||
1026 | mutex_lock(&buffer->lock); | |
1027 | /* now map it to userspace */ | |
1028 | ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); | |
1029 | mutex_unlock(&buffer->lock); | |
1030 | ||
b892bf75 | 1031 | if (ret) |
c30707be RSZ |
1032 | pr_err("%s: failure mapping buffer to userspace\n", |
1033 | __func__); | |
c30707be | 1034 | |
c30707be RSZ |
1035 | return ret; |
1036 | } | |
1037 | ||
b892bf75 RSZ |
1038 | static void ion_dma_buf_release(struct dma_buf *dmabuf) |
1039 | { | |
1040 | struct ion_buffer *buffer = dmabuf->priv; | |
10f62861 | 1041 | |
b892bf75 RSZ |
1042 | ion_buffer_put(buffer); |
1043 | } | |
c30707be | 1044 | |
b892bf75 | 1045 | static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) |
c30707be | 1046 | { |
0f34faf8 | 1047 | struct ion_buffer *buffer = dmabuf->priv; |
10f62861 | 1048 | |
12edf53d | 1049 | return buffer->vaddr + offset * PAGE_SIZE; |
b892bf75 | 1050 | } |
c30707be | 1051 | |
b892bf75 RSZ |
1052 | static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, |
1053 | void *ptr) | |
1054 | { | |
1055 | return; | |
1056 | } | |
1057 | ||
0f34faf8 RSZ |
1058 | static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, |
1059 | size_t len, | |
1060 | enum dma_data_direction direction) | |
b892bf75 | 1061 | { |
0f34faf8 RSZ |
1062 | struct ion_buffer *buffer = dmabuf->priv; |
1063 | void *vaddr; | |
1064 | ||
1065 | if (!buffer->heap->ops->map_kernel) { | |
1066 | pr_err("%s: map kernel is not implemented by this heap.\n", | |
1067 | __func__); | |
1068 | return -ENODEV; | |
1069 | } | |
1070 | ||
1071 | mutex_lock(&buffer->lock); | |
1072 | vaddr = ion_buffer_kmap_get(buffer); | |
1073 | mutex_unlock(&buffer->lock); | |
ab0c069a | 1074 | return PTR_ERR_OR_ZERO(vaddr); |
b892bf75 RSZ |
1075 | } |
1076 | ||
0f34faf8 RSZ |
1077 | static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, |
1078 | size_t len, | |
1079 | enum dma_data_direction direction) | |
b892bf75 | 1080 | { |
0f34faf8 | 1081 | struct ion_buffer *buffer = dmabuf->priv; |
c30707be | 1082 | |
0f34faf8 RSZ |
1083 | mutex_lock(&buffer->lock); |
1084 | ion_buffer_kmap_put(buffer); | |
1085 | mutex_unlock(&buffer->lock); | |
1086 | } | |
c30707be | 1087 | |
f63958d8 | 1088 | static struct dma_buf_ops dma_buf_ops = { |
b892bf75 RSZ |
1089 | .map_dma_buf = ion_map_dma_buf, |
1090 | .unmap_dma_buf = ion_unmap_dma_buf, | |
1091 | .mmap = ion_mmap, | |
1092 | .release = ion_dma_buf_release, | |
0f34faf8 RSZ |
1093 | .begin_cpu_access = ion_dma_buf_begin_cpu_access, |
1094 | .end_cpu_access = ion_dma_buf_end_cpu_access, | |
1095 | .kmap_atomic = ion_dma_buf_kmap, | |
1096 | .kunmap_atomic = ion_dma_buf_kunmap, | |
b892bf75 RSZ |
1097 | .kmap = ion_dma_buf_kmap, |
1098 | .kunmap = ion_dma_buf_kunmap, | |
1099 | }; | |
1100 | ||
22ba4322 JM |
1101 | struct dma_buf *ion_share_dma_buf(struct ion_client *client, |
1102 | struct ion_handle *handle) | |
b892bf75 RSZ |
1103 | { |
1104 | struct ion_buffer *buffer; | |
1105 | struct dma_buf *dmabuf; | |
1106 | bool valid_handle; | |
b892bf75 RSZ |
1107 | |
1108 | mutex_lock(&client->lock); | |
1109 | valid_handle = ion_handle_validate(client, handle); | |
b892bf75 | 1110 | if (!valid_handle) { |
a9bb075d | 1111 | WARN(1, "%s: invalid handle passed to share.\n", __func__); |
83271f62 | 1112 | mutex_unlock(&client->lock); |
22ba4322 | 1113 | return ERR_PTR(-EINVAL); |
b892bf75 | 1114 | } |
b892bf75 RSZ |
1115 | buffer = handle->buffer; |
1116 | ion_buffer_get(buffer); | |
83271f62 CC |
1117 | mutex_unlock(&client->lock); |
1118 | ||
b892bf75 RSZ |
1119 | dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); |
1120 | if (IS_ERR(dmabuf)) { | |
1121 | ion_buffer_put(buffer); | |
22ba4322 | 1122 | return dmabuf; |
b892bf75 | 1123 | } |
22ba4322 JM |
1124 | |
1125 | return dmabuf; | |
1126 | } | |
1127 | EXPORT_SYMBOL(ion_share_dma_buf); | |
1128 | ||
1129 | int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) | |
1130 | { | |
1131 | struct dma_buf *dmabuf; | |
1132 | int fd; | |
1133 | ||
1134 | dmabuf = ion_share_dma_buf(client, handle); | |
1135 | if (IS_ERR(dmabuf)) | |
1136 | return PTR_ERR(dmabuf); | |
1137 | ||
b892bf75 | 1138 | fd = dma_buf_fd(dmabuf, O_CLOEXEC); |
55808b8d | 1139 | if (fd < 0) |
b892bf75 | 1140 | dma_buf_put(dmabuf); |
55808b8d | 1141 | |
c30707be | 1142 | return fd; |
b892bf75 | 1143 | } |
22ba4322 | 1144 | EXPORT_SYMBOL(ion_share_dma_buf_fd); |
c30707be | 1145 | |
b892bf75 RSZ |
1146 | struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) |
1147 | { | |
1148 | struct dma_buf *dmabuf; | |
1149 | struct ion_buffer *buffer; | |
1150 | struct ion_handle *handle; | |
47b40458 | 1151 | int ret; |
b892bf75 RSZ |
1152 | |
1153 | dmabuf = dma_buf_get(fd); | |
9e907654 | 1154 | if (IS_ERR(dmabuf)) |
464a5028 | 1155 | return ERR_CAST(dmabuf); |
b892bf75 RSZ |
1156 | /* if this memory came from ion */ |
1157 | ||
1158 | if (dmabuf->ops != &dma_buf_ops) { | |
1159 | pr_err("%s: can not import dmabuf from another exporter\n", | |
1160 | __func__); | |
1161 | dma_buf_put(dmabuf); | |
1162 | return ERR_PTR(-EINVAL); | |
1163 | } | |
1164 | buffer = dmabuf->priv; | |
1165 | ||
1166 | mutex_lock(&client->lock); | |
1167 | /* if a handle exists for this buffer just take a reference to it */ | |
1168 | handle = ion_handle_lookup(client, buffer); | |
9e907654 | 1169 | if (!IS_ERR(handle)) { |
b892bf75 | 1170 | ion_handle_get(handle); |
83271f62 | 1171 | mutex_unlock(&client->lock); |
b892bf75 RSZ |
1172 | goto end; |
1173 | } | |
83271f62 CC |
1174 | mutex_unlock(&client->lock); |
1175 | ||
b892bf75 | 1176 | handle = ion_handle_create(client, buffer); |
9e907654 | 1177 | if (IS_ERR(handle)) |
b892bf75 | 1178 | goto end; |
83271f62 CC |
1179 | |
1180 | mutex_lock(&client->lock); | |
47b40458 | 1181 | ret = ion_handle_add(client, handle); |
83271f62 | 1182 | mutex_unlock(&client->lock); |
47b40458 CC |
1183 | if (ret) { |
1184 | ion_handle_put(handle); | |
1185 | handle = ERR_PTR(ret); | |
1186 | } | |
83271f62 | 1187 | |
b892bf75 | 1188 | end: |
b892bf75 RSZ |
1189 | dma_buf_put(dmabuf); |
1190 | return handle; | |
c30707be | 1191 | } |
ee4c8aa9 | 1192 | EXPORT_SYMBOL(ion_import_dma_buf); |
c30707be | 1193 | |
0b9ec1cf RSZ |
1194 | static int ion_sync_for_device(struct ion_client *client, int fd) |
1195 | { | |
1196 | struct dma_buf *dmabuf; | |
1197 | struct ion_buffer *buffer; | |
1198 | ||
1199 | dmabuf = dma_buf_get(fd); | |
9e907654 | 1200 | if (IS_ERR(dmabuf)) |
0b9ec1cf RSZ |
1201 | return PTR_ERR(dmabuf); |
1202 | ||
1203 | /* if this memory came from ion */ | |
1204 | if (dmabuf->ops != &dma_buf_ops) { | |
1205 | pr_err("%s: can not sync dmabuf from another exporter\n", | |
1206 | __func__); | |
1207 | dma_buf_put(dmabuf); | |
1208 | return -EINVAL; | |
1209 | } | |
1210 | buffer = dmabuf->priv; | |
856661d5 RSZ |
1211 | |
1212 | dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, | |
1213 | buffer->sg_table->nents, DMA_BIDIRECTIONAL); | |
0b9ec1cf RSZ |
1214 | dma_buf_put(dmabuf); |
1215 | return 0; | |
1216 | } | |
1217 | ||
db866e3d CC |
1218 | /* fix up the cases where the ioctl direction bits are incorrect */ |
1219 | static unsigned int ion_ioctl_dir(unsigned int cmd) | |
1220 | { | |
1221 | switch (cmd) { | |
1222 | case ION_IOC_SYNC: | |
1223 | case ION_IOC_FREE: | |
1224 | case ION_IOC_CUSTOM: | |
1225 | return _IOC_WRITE; | |
1226 | default: | |
1227 | return _IOC_DIR(cmd); | |
1228 | } | |
1229 | } | |
1230 | ||
c30707be RSZ |
1231 | static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) |
1232 | { | |
1233 | struct ion_client *client = filp->private_data; | |
db866e3d CC |
1234 | struct ion_device *dev = client->dev; |
1235 | struct ion_handle *cleanup_handle = NULL; | |
1236 | int ret = 0; | |
1237 | unsigned int dir; | |
1238 | ||
1239 | union { | |
1240 | struct ion_fd_data fd; | |
1241 | struct ion_allocation_data allocation; | |
1242 | struct ion_handle_data handle; | |
1243 | struct ion_custom_data custom; | |
1244 | } data; | |
1245 | ||
1246 | dir = ion_ioctl_dir(cmd); | |
1247 | ||
1248 | if (_IOC_SIZE(cmd) > sizeof(data)) | |
1249 | return -EINVAL; | |
1250 | ||
1251 | if (dir & _IOC_WRITE) | |
1252 | if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) | |
1253 | return -EFAULT; | |
c30707be RSZ |
1254 | |
1255 | switch (cmd) { | |
1256 | case ION_IOC_ALLOC: | |
1257 | { | |
47b40458 | 1258 | struct ion_handle *handle; |
c30707be | 1259 | |
db866e3d CC |
1260 | handle = ion_alloc(client, data.allocation.len, |
1261 | data.allocation.align, | |
1262 | data.allocation.heap_id_mask, | |
1263 | data.allocation.flags); | |
47b40458 CC |
1264 | if (IS_ERR(handle)) |
1265 | return PTR_ERR(handle); | |
1266 | ||
db866e3d | 1267 | data.allocation.handle = handle->id; |
54ac0784 | 1268 | |
db866e3d | 1269 | cleanup_handle = handle; |
c30707be RSZ |
1270 | break; |
1271 | } | |
1272 | case ION_IOC_FREE: | |
1273 | { | |
47b40458 | 1274 | struct ion_handle *handle; |
c30707be | 1275 | |
db866e3d | 1276 | handle = ion_handle_get_by_id(client, data.handle.handle); |
83271f62 CC |
1277 | if (IS_ERR(handle)) |
1278 | return PTR_ERR(handle); | |
47b40458 | 1279 | ion_free(client, handle); |
83271f62 | 1280 | ion_handle_put(handle); |
c30707be RSZ |
1281 | break; |
1282 | } | |
c30707be | 1283 | case ION_IOC_SHARE: |
df0f6c76 | 1284 | case ION_IOC_MAP: |
c30707be | 1285 | { |
47b40458 | 1286 | struct ion_handle *handle; |
c30707be | 1287 | |
db866e3d | 1288 | handle = ion_handle_get_by_id(client, data.handle.handle); |
83271f62 CC |
1289 | if (IS_ERR(handle)) |
1290 | return PTR_ERR(handle); | |
db866e3d | 1291 | data.fd.fd = ion_share_dma_buf_fd(client, handle); |
83271f62 | 1292 | ion_handle_put(handle); |
db866e3d CC |
1293 | if (data.fd.fd < 0) |
1294 | ret = data.fd.fd; | |
c30707be RSZ |
1295 | break; |
1296 | } | |
1297 | case ION_IOC_IMPORT: | |
1298 | { | |
47b40458 | 1299 | struct ion_handle *handle; |
10f62861 | 1300 | |
db866e3d | 1301 | handle = ion_import_dma_buf(client, data.fd.fd); |
47b40458 CC |
1302 | if (IS_ERR(handle)) |
1303 | ret = PTR_ERR(handle); | |
1304 | else | |
db866e3d | 1305 | data.handle.handle = handle->id; |
c30707be RSZ |
1306 | break; |
1307 | } | |
0b9ec1cf RSZ |
1308 | case ION_IOC_SYNC: |
1309 | { | |
db866e3d | 1310 | ret = ion_sync_for_device(client, data.fd.fd); |
0b9ec1cf RSZ |
1311 | break; |
1312 | } | |
c30707be RSZ |
1313 | case ION_IOC_CUSTOM: |
1314 | { | |
c30707be RSZ |
1315 | if (!dev->custom_ioctl) |
1316 | return -ENOTTY; | |
db866e3d CC |
1317 | ret = dev->custom_ioctl(client, data.custom.cmd, |
1318 | data.custom.arg); | |
1319 | break; | |
c30707be RSZ |
1320 | } |
1321 | default: | |
1322 | return -ENOTTY; | |
1323 | } | |
db866e3d CC |
1324 | |
1325 | if (dir & _IOC_READ) { | |
1326 | if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { | |
1327 | if (cleanup_handle) | |
1328 | ion_free(client, cleanup_handle); | |
1329 | return -EFAULT; | |
1330 | } | |
1331 | } | |
1332 | return ret; | |
c30707be RSZ |
1333 | } |
1334 | ||
1335 | static int ion_release(struct inode *inode, struct file *file) | |
1336 | { | |
1337 | struct ion_client *client = file->private_data; | |
1338 | ||
1339 | pr_debug("%s: %d\n", __func__, __LINE__); | |
b892bf75 | 1340 | ion_client_destroy(client); |
c30707be RSZ |
1341 | return 0; |
1342 | } | |
1343 | ||
1344 | static int ion_open(struct inode *inode, struct file *file) | |
1345 | { | |
1346 | struct miscdevice *miscdev = file->private_data; | |
1347 | struct ion_device *dev = container_of(miscdev, struct ion_device, dev); | |
1348 | struct ion_client *client; | |
483ed03f | 1349 | char debug_name[64]; |
c30707be RSZ |
1350 | |
1351 | pr_debug("%s: %d\n", __func__, __LINE__); | |
483ed03f LA |
1352 | snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); |
1353 | client = ion_client_create(dev, debug_name); | |
9e907654 | 1354 | if (IS_ERR(client)) |
c30707be RSZ |
1355 | return PTR_ERR(client); |
1356 | file->private_data = client; | |
1357 | ||
1358 | return 0; | |
1359 | } | |
1360 | ||
1361 | static const struct file_operations ion_fops = { | |
1362 | .owner = THIS_MODULE, | |
1363 | .open = ion_open, | |
1364 | .release = ion_release, | |
1365 | .unlocked_ioctl = ion_ioctl, | |
827c849e | 1366 | .compat_ioctl = compat_ion_ioctl, |
c30707be RSZ |
1367 | }; |
1368 | ||
1369 | static size_t ion_debug_heap_total(struct ion_client *client, | |
2bb9f503 | 1370 | unsigned int id) |
c30707be RSZ |
1371 | { |
1372 | size_t size = 0; | |
1373 | struct rb_node *n; | |
1374 | ||
1375 | mutex_lock(&client->lock); | |
1376 | for (n = rb_first(&client->handles); n; n = rb_next(n)) { | |
1377 | struct ion_handle *handle = rb_entry(n, | |
1378 | struct ion_handle, | |
1379 | node); | |
2bb9f503 | 1380 | if (handle->buffer->heap->id == id) |
c30707be RSZ |
1381 | size += handle->buffer->size; |
1382 | } | |
1383 | mutex_unlock(&client->lock); | |
1384 | return size; | |
1385 | } | |
1386 | ||
1387 | static int ion_debug_heap_show(struct seq_file *s, void *unused) | |
1388 | { | |
1389 | struct ion_heap *heap = s->private; | |
1390 | struct ion_device *dev = heap->dev; | |
1391 | struct rb_node *n; | |
5ad7bc3a RSZ |
1392 | size_t total_size = 0; |
1393 | size_t total_orphaned_size = 0; | |
c30707be RSZ |
1394 | |
1395 | seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); | |
164ad86d | 1396 | seq_puts(s, "----------------------------------------------------\n"); |
c30707be | 1397 | |
b892bf75 | 1398 | for (n = rb_first(&dev->clients); n; n = rb_next(n)) { |
c30707be RSZ |
1399 | struct ion_client *client = rb_entry(n, struct ion_client, |
1400 | node); | |
2bb9f503 | 1401 | size_t size = ion_debug_heap_total(client, heap->id); |
10f62861 | 1402 | |
c30707be RSZ |
1403 | if (!size) |
1404 | continue; | |
b892bf75 RSZ |
1405 | if (client->task) { |
1406 | char task_comm[TASK_COMM_LEN]; | |
1407 | ||
1408 | get_task_comm(task_comm, client->task); | |
e61fc915 | 1409 | seq_printf(s, "%16.s %16u %16zu\n", task_comm, |
b892bf75 RSZ |
1410 | client->pid, size); |
1411 | } else { | |
e61fc915 | 1412 | seq_printf(s, "%16.s %16u %16zu\n", client->name, |
b892bf75 RSZ |
1413 | client->pid, size); |
1414 | } | |
c30707be | 1415 | } |
164ad86d IM |
1416 | seq_puts(s, "----------------------------------------------------\n"); |
1417 | seq_puts(s, "orphaned allocations (info is from last known client):\n"); | |
8d7ab9a9 | 1418 | mutex_lock(&dev->buffer_lock); |
5ad7bc3a RSZ |
1419 | for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { |
1420 | struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, | |
1421 | node); | |
2bb9f503 | 1422 | if (buffer->heap->id != heap->id) |
45b17a80 RSZ |
1423 | continue; |
1424 | total_size += buffer->size; | |
5ad7bc3a | 1425 | if (!buffer->handle_count) { |
e61fc915 CC |
1426 | seq_printf(s, "%16.s %16u %16zu %d %d\n", |
1427 | buffer->task_comm, buffer->pid, | |
1428 | buffer->size, buffer->kmap_cnt, | |
092c354b | 1429 | atomic_read(&buffer->ref.refcount)); |
5ad7bc3a RSZ |
1430 | total_orphaned_size += buffer->size; |
1431 | } | |
1432 | } | |
8d7ab9a9 | 1433 | mutex_unlock(&dev->buffer_lock); |
164ad86d | 1434 | seq_puts(s, "----------------------------------------------------\n"); |
e61fc915 | 1435 | seq_printf(s, "%16.s %16zu\n", "total orphaned", |
5ad7bc3a | 1436 | total_orphaned_size); |
e61fc915 | 1437 | seq_printf(s, "%16.s %16zu\n", "total ", total_size); |
2540c73a | 1438 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
e61fc915 | 1439 | seq_printf(s, "%16.s %16zu\n", "deferred free", |
2540c73a | 1440 | heap->free_list_size); |
164ad86d | 1441 | seq_puts(s, "----------------------------------------------------\n"); |
45b17a80 RSZ |
1442 | |
1443 | if (heap->debug_show) | |
1444 | heap->debug_show(heap, s, unused); | |
5ad7bc3a | 1445 | |
c30707be RSZ |
1446 | return 0; |
1447 | } | |
1448 | ||
1449 | static int ion_debug_heap_open(struct inode *inode, struct file *file) | |
1450 | { | |
1451 | return single_open(file, ion_debug_heap_show, inode->i_private); | |
1452 | } | |
1453 | ||
1454 | static const struct file_operations debug_heap_fops = { | |
1455 | .open = ion_debug_heap_open, | |
1456 | .read = seq_read, | |
1457 | .llseek = seq_lseek, | |
1458 | .release = single_release, | |
1459 | }; | |
1460 | ||
ea313b5f RSZ |
1461 | #ifdef DEBUG_HEAP_SHRINKER |
1462 | static int debug_shrink_set(void *data, u64 val) | |
fe2faea7 | 1463 | { |
e1d855b0 JS |
1464 | struct ion_heap *heap = data; |
1465 | struct shrink_control sc; | |
1466 | int objs; | |
fe2faea7 | 1467 | |
e1d855b0 JS |
1468 | sc.gfp_mask = -1; |
1469 | sc.nr_to_scan = 0; | |
fe2faea7 | 1470 | |
e1d855b0 JS |
1471 | if (!val) |
1472 | return 0; | |
fe2faea7 | 1473 | |
e1d855b0 JS |
1474 | objs = heap->shrinker.shrink(&heap->shrinker, &sc); |
1475 | sc.nr_to_scan = objs; | |
fe2faea7 | 1476 | |
e1d855b0 JS |
1477 | heap->shrinker.shrink(&heap->shrinker, &sc); |
1478 | return 0; | |
fe2faea7 RSZ |
1479 | } |
1480 | ||
ea313b5f | 1481 | static int debug_shrink_get(void *data, u64 *val) |
fe2faea7 | 1482 | { |
e1d855b0 JS |
1483 | struct ion_heap *heap = data; |
1484 | struct shrink_control sc; | |
1485 | int objs; | |
fe2faea7 | 1486 | |
e1d855b0 JS |
1487 | sc.gfp_mask = -1; |
1488 | sc.nr_to_scan = 0; | |
fe2faea7 | 1489 | |
e1d855b0 JS |
1490 | objs = heap->shrinker.shrink(&heap->shrinker, &sc); |
1491 | *val = objs; | |
1492 | return 0; | |
fe2faea7 RSZ |
1493 | } |
1494 | ||
ea313b5f | 1495 | DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, |
e1d855b0 | 1496 | debug_shrink_set, "%llu\n"); |
ea313b5f RSZ |
1497 | #endif |
1498 | ||
c30707be RSZ |
1499 | void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) |
1500 | { | |
b08585fb MH |
1501 | struct dentry *debug_file; |
1502 | ||
29ae6bc7 RSZ |
1503 | if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || |
1504 | !heap->ops->unmap_dma) | |
1505 | pr_err("%s: can not add heap with invalid ops struct.\n", | |
1506 | __func__); | |
1507 | ||
ea313b5f RSZ |
1508 | if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) |
1509 | ion_heap_init_deferred_free(heap); | |
fe2faea7 | 1510 | |
b9daf0b6 CC |
1511 | if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) |
1512 | ion_heap_init_shrinker(heap); | |
1513 | ||
c30707be | 1514 | heap->dev = dev; |
8d7ab9a9 | 1515 | down_write(&dev->lock); |
cd69488c RSZ |
1516 | /* use negative heap->id to reverse the priority -- when traversing |
1517 | the list later attempt higher id numbers first */ | |
1518 | plist_node_init(&heap->node, -heap->id); | |
1519 | plist_add(&heap->node, &dev->heaps); | |
b08585fb MH |
1520 | debug_file = debugfs_create_file(heap->name, 0664, |
1521 | dev->heaps_debug_root, heap, | |
1522 | &debug_heap_fops); | |
1523 | ||
1524 | if (!debug_file) { | |
1525 | char buf[256], *path; | |
10f62861 | 1526 | |
b08585fb MH |
1527 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1528 | pr_err("Failed to create heap debugfs at %s/%s\n", | |
1529 | path, heap->name); | |
1530 | } | |
1531 | ||
ea313b5f RSZ |
1532 | #ifdef DEBUG_HEAP_SHRINKER |
1533 | if (heap->shrinker.shrink) { | |
1534 | char debug_name[64]; | |
1535 | ||
1536 | snprintf(debug_name, 64, "%s_shrink", heap->name); | |
b08585fb MH |
1537 | debug_file = debugfs_create_file( |
1538 | debug_name, 0644, dev->heaps_debug_root, heap, | |
1539 | &debug_shrink_fops); | |
1540 | if (!debug_file) { | |
1541 | char buf[256], *path; | |
10f62861 | 1542 | |
b08585fb MH |
1543 | path = dentry_path(dev->heaps_debug_root, buf, 256); |
1544 | pr_err("Failed to create heap shrinker debugfs at %s/%s\n", | |
1545 | path, debug_name); | |
1546 | } | |
ea313b5f RSZ |
1547 | } |
1548 | #endif | |
8d7ab9a9 | 1549 | up_write(&dev->lock); |
c30707be RSZ |
1550 | } |
1551 | ||
1552 | struct ion_device *ion_device_create(long (*custom_ioctl) | |
1553 | (struct ion_client *client, | |
1554 | unsigned int cmd, | |
1555 | unsigned long arg)) | |
1556 | { | |
1557 | struct ion_device *idev; | |
1558 | int ret; | |
1559 | ||
1560 | idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); | |
1561 | if (!idev) | |
1562 | return ERR_PTR(-ENOMEM); | |
1563 | ||
1564 | idev->dev.minor = MISC_DYNAMIC_MINOR; | |
1565 | idev->dev.name = "ion"; | |
1566 | idev->dev.fops = &ion_fops; | |
1567 | idev->dev.parent = NULL; | |
1568 | ret = misc_register(&idev->dev); | |
1569 | if (ret) { | |
1570 | pr_err("ion: failed to register misc device.\n"); | |
1571 | return ERR_PTR(ret); | |
1572 | } | |
1573 | ||
1574 | idev->debug_root = debugfs_create_dir("ion", NULL); | |
b08585fb MH |
1575 | if (!idev->debug_root) { |
1576 | pr_err("ion: failed to create debugfs root directory.\n"); | |
1577 | goto debugfs_done; | |
1578 | } | |
1579 | idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); | |
1580 | if (!idev->heaps_debug_root) { | |
1581 | pr_err("ion: failed to create debugfs heaps directory.\n"); | |
1582 | goto debugfs_done; | |
1583 | } | |
1584 | idev->clients_debug_root = debugfs_create_dir("clients", | |
1585 | idev->debug_root); | |
1586 | if (!idev->clients_debug_root) | |
1587 | pr_err("ion: failed to create debugfs clients directory.\n"); | |
1588 | ||
1589 | debugfs_done: | |
c30707be RSZ |
1590 | |
1591 | idev->custom_ioctl = custom_ioctl; | |
1592 | idev->buffers = RB_ROOT; | |
8d7ab9a9 RSZ |
1593 | mutex_init(&idev->buffer_lock); |
1594 | init_rwsem(&idev->lock); | |
cd69488c | 1595 | plist_head_init(&idev->heaps); |
b892bf75 | 1596 | idev->clients = RB_ROOT; |
c30707be RSZ |
1597 | return idev; |
1598 | } | |
1599 | ||
1600 | void ion_device_destroy(struct ion_device *dev) | |
1601 | { | |
1602 | misc_deregister(&dev->dev); | |
b08585fb | 1603 | debugfs_remove_recursive(dev->debug_root); |
c30707be RSZ |
1604 | /* XXX need to free the heaps and clients ? */ |
1605 | kfree(dev); | |
1606 | } | |
2991b7a0 RSZ |
1607 | |
1608 | void __init ion_reserve(struct ion_platform_data *data) | |
1609 | { | |
fa9bba55 | 1610 | int i; |
2991b7a0 RSZ |
1611 | |
1612 | for (i = 0; i < data->nr; i++) { | |
1613 | if (data->heaps[i].size == 0) | |
1614 | continue; | |
fa9bba55 RSZ |
1615 | |
1616 | if (data->heaps[i].base == 0) { | |
1617 | phys_addr_t paddr; | |
10f62861 | 1618 | |
fa9bba55 RSZ |
1619 | paddr = memblock_alloc_base(data->heaps[i].size, |
1620 | data->heaps[i].align, | |
1621 | MEMBLOCK_ALLOC_ANYWHERE); | |
1622 | if (!paddr) { | |
51108985 | 1623 | pr_err("%s: error allocating memblock for heap %d\n", |
fa9bba55 RSZ |
1624 | __func__, i); |
1625 | continue; | |
1626 | } | |
1627 | data->heaps[i].base = paddr; | |
1628 | } else { | |
1629 | int ret = memblock_reserve(data->heaps[i].base, | |
1630 | data->heaps[i].size); | |
1631 | if (ret) | |
e61fc915 | 1632 | pr_err("memblock reserve of %zx@%lx failed\n", |
fa9bba55 RSZ |
1633 | data->heaps[i].size, |
1634 | data->heaps[i].base); | |
1635 | } | |
e61fc915 | 1636 | pr_info("%s: %s reserved base %lx size %zu\n", __func__, |
fa9bba55 RSZ |
1637 | data->heaps[i].name, |
1638 | data->heaps[i].base, | |
1639 | data->heaps[i].size); | |
2991b7a0 RSZ |
1640 | } |
1641 | } |