sched/headers: Prepare to move the task_lock()/unlock() APIs to <linux/sched/task.h>
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / staging / android / ion / ion.c
1 /*
2 *
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39 #include <linux/sched/task.h>
40
41 #include "ion.h"
42 #include "ion_priv.h"
43 #include "compat_ion.h"
44
45 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
46 {
47 return (buffer->flags & ION_FLAG_CACHED) &&
48 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
49 }
50
51 bool ion_buffer_cached(struct ion_buffer *buffer)
52 {
53 return !!(buffer->flags & ION_FLAG_CACHED);
54 }
55
56 static inline struct page *ion_buffer_page(struct page *page)
57 {
58 return (struct page *)((unsigned long)page & ~(1UL));
59 }
60
61 static inline bool ion_buffer_page_is_dirty(struct page *page)
62 {
63 return !!((unsigned long)page & 1UL);
64 }
65
66 static inline void ion_buffer_page_dirty(struct page **page)
67 {
68 *page = (struct page *)((unsigned long)(*page) | 1UL);
69 }
70
71 static inline void ion_buffer_page_clean(struct page **page)
72 {
73 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
74 }
75
76 /* this function should only be called while dev->lock is held */
77 static void ion_buffer_add(struct ion_device *dev,
78 struct ion_buffer *buffer)
79 {
80 struct rb_node **p = &dev->buffers.rb_node;
81 struct rb_node *parent = NULL;
82 struct ion_buffer *entry;
83
84 while (*p) {
85 parent = *p;
86 entry = rb_entry(parent, struct ion_buffer, node);
87
88 if (buffer < entry) {
89 p = &(*p)->rb_left;
90 } else if (buffer > entry) {
91 p = &(*p)->rb_right;
92 } else {
93 pr_err("%s: buffer already found.", __func__);
94 BUG();
95 }
96 }
97
98 rb_link_node(&buffer->node, parent, p);
99 rb_insert_color(&buffer->node, &dev->buffers);
100 }
101
102 /* this function should only be called while dev->lock is held */
103 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
104 struct ion_device *dev,
105 unsigned long len,
106 unsigned long align,
107 unsigned long flags)
108 {
109 struct ion_buffer *buffer;
110 struct sg_table *table;
111 struct scatterlist *sg;
112 int i, ret;
113
114 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
115 if (!buffer)
116 return ERR_PTR(-ENOMEM);
117
118 buffer->heap = heap;
119 buffer->flags = flags;
120 kref_init(&buffer->ref);
121
122 ret = heap->ops->allocate(heap, buffer, len, align, flags);
123
124 if (ret) {
125 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
126 goto err2;
127
128 ion_heap_freelist_drain(heap, 0);
129 ret = heap->ops->allocate(heap, buffer, len, align,
130 flags);
131 if (ret)
132 goto err2;
133 }
134
135 if (buffer->sg_table == NULL) {
136 WARN_ONCE(1, "This heap needs to set the sgtable");
137 ret = -EINVAL;
138 goto err1;
139 }
140
141 table = buffer->sg_table;
142 buffer->dev = dev;
143 buffer->size = len;
144
145 if (ion_buffer_fault_user_mappings(buffer)) {
146 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
147 struct scatterlist *sg;
148 int i, j, k = 0;
149
150 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
151 if (!buffer->pages) {
152 ret = -ENOMEM;
153 goto err1;
154 }
155
156 for_each_sg(table->sgl, sg, table->nents, i) {
157 struct page *page = sg_page(sg);
158
159 for (j = 0; j < sg->length / PAGE_SIZE; j++)
160 buffer->pages[k++] = page++;
161 }
162 }
163
164 buffer->dev = dev;
165 buffer->size = len;
166 INIT_LIST_HEAD(&buffer->vmas);
167 mutex_init(&buffer->lock);
168 /*
169 * this will set up dma addresses for the sglist -- it is not
170 * technically correct as per the dma api -- a specific
171 * device isn't really taking ownership here. However, in practice on
172 * our systems the only dma_address space is physical addresses.
173 * Additionally, we can't afford the overhead of invalidating every
174 * allocation via dma_map_sg. The implicit contract here is that
175 * memory coming from the heaps is ready for dma, ie if it has a
176 * cached mapping that mapping has been invalidated
177 */
178 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
179 sg_dma_address(sg) = sg_phys(sg);
180 sg_dma_len(sg) = sg->length;
181 }
182 mutex_lock(&dev->buffer_lock);
183 ion_buffer_add(dev, buffer);
184 mutex_unlock(&dev->buffer_lock);
185 return buffer;
186
187 err1:
188 heap->ops->free(buffer);
189 err2:
190 kfree(buffer);
191 return ERR_PTR(ret);
192 }
193
194 void ion_buffer_destroy(struct ion_buffer *buffer)
195 {
196 if (WARN_ON(buffer->kmap_cnt > 0))
197 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
198 buffer->heap->ops->free(buffer);
199 vfree(buffer->pages);
200 kfree(buffer);
201 }
202
203 static void _ion_buffer_destroy(struct kref *kref)
204 {
205 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
206 struct ion_heap *heap = buffer->heap;
207 struct ion_device *dev = buffer->dev;
208
209 mutex_lock(&dev->buffer_lock);
210 rb_erase(&buffer->node, &dev->buffers);
211 mutex_unlock(&dev->buffer_lock);
212
213 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
214 ion_heap_freelist_add(heap, buffer);
215 else
216 ion_buffer_destroy(buffer);
217 }
218
219 static void ion_buffer_get(struct ion_buffer *buffer)
220 {
221 kref_get(&buffer->ref);
222 }
223
224 static int ion_buffer_put(struct ion_buffer *buffer)
225 {
226 return kref_put(&buffer->ref, _ion_buffer_destroy);
227 }
228
229 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
230 {
231 mutex_lock(&buffer->lock);
232 buffer->handle_count++;
233 mutex_unlock(&buffer->lock);
234 }
235
236 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
237 {
238 /*
239 * when a buffer is removed from a handle, if it is not in
240 * any other handles, copy the taskcomm and the pid of the
241 * process it's being removed from into the buffer. At this
242 * point there will be no way to track what processes this buffer is
243 * being used by, it only exists as a dma_buf file descriptor.
244 * The taskcomm and pid can provide a debug hint as to where this fd
245 * is in the system
246 */
247 mutex_lock(&buffer->lock);
248 buffer->handle_count--;
249 BUG_ON(buffer->handle_count < 0);
250 if (!buffer->handle_count) {
251 struct task_struct *task;
252
253 task = current->group_leader;
254 get_task_comm(buffer->task_comm, task);
255 buffer->pid = task_pid_nr(task);
256 }
257 mutex_unlock(&buffer->lock);
258 }
259
260 static struct ion_handle *ion_handle_create(struct ion_client *client,
261 struct ion_buffer *buffer)
262 {
263 struct ion_handle *handle;
264
265 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
266 if (!handle)
267 return ERR_PTR(-ENOMEM);
268 kref_init(&handle->ref);
269 RB_CLEAR_NODE(&handle->node);
270 handle->client = client;
271 ion_buffer_get(buffer);
272 ion_buffer_add_to_handle(buffer);
273 handle->buffer = buffer;
274
275 return handle;
276 }
277
278 static void ion_handle_kmap_put(struct ion_handle *);
279
280 static void ion_handle_destroy(struct kref *kref)
281 {
282 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
283 struct ion_client *client = handle->client;
284 struct ion_buffer *buffer = handle->buffer;
285
286 mutex_lock(&buffer->lock);
287 while (handle->kmap_cnt)
288 ion_handle_kmap_put(handle);
289 mutex_unlock(&buffer->lock);
290
291 idr_remove(&client->idr, handle->id);
292 if (!RB_EMPTY_NODE(&handle->node))
293 rb_erase(&handle->node, &client->handles);
294
295 ion_buffer_remove_from_handle(buffer);
296 ion_buffer_put(buffer);
297
298 kfree(handle);
299 }
300
301 static void ion_handle_get(struct ion_handle *handle)
302 {
303 kref_get(&handle->ref);
304 }
305
306 int ion_handle_put_nolock(struct ion_handle *handle)
307 {
308 return kref_put(&handle->ref, ion_handle_destroy);
309 }
310
311 int ion_handle_put(struct ion_handle *handle)
312 {
313 struct ion_client *client = handle->client;
314 int ret;
315
316 mutex_lock(&client->lock);
317 ret = ion_handle_put_nolock(handle);
318 mutex_unlock(&client->lock);
319
320 return ret;
321 }
322
323 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
324 struct ion_buffer *buffer)
325 {
326 struct rb_node *n = client->handles.rb_node;
327
328 while (n) {
329 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
330
331 if (buffer < entry->buffer)
332 n = n->rb_left;
333 else if (buffer > entry->buffer)
334 n = n->rb_right;
335 else
336 return entry;
337 }
338 return ERR_PTR(-EINVAL);
339 }
340
341 struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
342 int id)
343 {
344 struct ion_handle *handle;
345
346 handle = idr_find(&client->idr, id);
347 if (handle)
348 ion_handle_get(handle);
349
350 return handle ? handle : ERR_PTR(-EINVAL);
351 }
352
353 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
354 int id)
355 {
356 struct ion_handle *handle;
357
358 mutex_lock(&client->lock);
359 handle = ion_handle_get_by_id_nolock(client, id);
360 mutex_unlock(&client->lock);
361
362 return handle;
363 }
364
365 static bool ion_handle_validate(struct ion_client *client,
366 struct ion_handle *handle)
367 {
368 WARN_ON(!mutex_is_locked(&client->lock));
369 return idr_find(&client->idr, handle->id) == handle;
370 }
371
372 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
373 {
374 int id;
375 struct rb_node **p = &client->handles.rb_node;
376 struct rb_node *parent = NULL;
377 struct ion_handle *entry;
378
379 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
380 if (id < 0)
381 return id;
382
383 handle->id = id;
384
385 while (*p) {
386 parent = *p;
387 entry = rb_entry(parent, struct ion_handle, node);
388
389 if (handle->buffer < entry->buffer)
390 p = &(*p)->rb_left;
391 else if (handle->buffer > entry->buffer)
392 p = &(*p)->rb_right;
393 else
394 WARN(1, "%s: buffer already found.", __func__);
395 }
396
397 rb_link_node(&handle->node, parent, p);
398 rb_insert_color(&handle->node, &client->handles);
399
400 return 0;
401 }
402
403 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
404 size_t align, unsigned int heap_id_mask,
405 unsigned int flags)
406 {
407 struct ion_handle *handle;
408 struct ion_device *dev = client->dev;
409 struct ion_buffer *buffer = NULL;
410 struct ion_heap *heap;
411 int ret;
412
413 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
414 len, align, heap_id_mask, flags);
415 /*
416 * traverse the list of heaps available in this system in priority
417 * order. If the heap type is supported by the client, and matches the
418 * request of the caller allocate from it. Repeat until allocate has
419 * succeeded or all heaps have been tried
420 */
421 len = PAGE_ALIGN(len);
422
423 if (!len)
424 return ERR_PTR(-EINVAL);
425
426 down_read(&dev->lock);
427 plist_for_each_entry(heap, &dev->heaps, node) {
428 /* if the caller didn't specify this heap id */
429 if (!((1 << heap->id) & heap_id_mask))
430 continue;
431 buffer = ion_buffer_create(heap, dev, len, align, flags);
432 if (!IS_ERR(buffer))
433 break;
434 }
435 up_read(&dev->lock);
436
437 if (buffer == NULL)
438 return ERR_PTR(-ENODEV);
439
440 if (IS_ERR(buffer))
441 return ERR_CAST(buffer);
442
443 handle = ion_handle_create(client, buffer);
444
445 /*
446 * ion_buffer_create will create a buffer with a ref_cnt of 1,
447 * and ion_handle_create will take a second reference, drop one here
448 */
449 ion_buffer_put(buffer);
450
451 if (IS_ERR(handle))
452 return handle;
453
454 mutex_lock(&client->lock);
455 ret = ion_handle_add(client, handle);
456 mutex_unlock(&client->lock);
457 if (ret) {
458 ion_handle_put(handle);
459 handle = ERR_PTR(ret);
460 }
461
462 return handle;
463 }
464 EXPORT_SYMBOL(ion_alloc);
465
466 void ion_free_nolock(struct ion_client *client,
467 struct ion_handle *handle)
468 {
469 if (!ion_handle_validate(client, handle)) {
470 WARN(1, "%s: invalid handle passed to free.\n", __func__);
471 return;
472 }
473 ion_handle_put_nolock(handle);
474 }
475
476 void ion_free(struct ion_client *client, struct ion_handle *handle)
477 {
478 BUG_ON(client != handle->client);
479
480 mutex_lock(&client->lock);
481 ion_free_nolock(client, handle);
482 mutex_unlock(&client->lock);
483 }
484 EXPORT_SYMBOL(ion_free);
485
486 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
487 {
488 void *vaddr;
489
490 if (buffer->kmap_cnt) {
491 buffer->kmap_cnt++;
492 return buffer->vaddr;
493 }
494 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
495 if (WARN_ONCE(vaddr == NULL,
496 "heap->ops->map_kernel should return ERR_PTR on error"))
497 return ERR_PTR(-EINVAL);
498 if (IS_ERR(vaddr))
499 return vaddr;
500 buffer->vaddr = vaddr;
501 buffer->kmap_cnt++;
502 return vaddr;
503 }
504
505 static void *ion_handle_kmap_get(struct ion_handle *handle)
506 {
507 struct ion_buffer *buffer = handle->buffer;
508 void *vaddr;
509
510 if (handle->kmap_cnt) {
511 handle->kmap_cnt++;
512 return buffer->vaddr;
513 }
514 vaddr = ion_buffer_kmap_get(buffer);
515 if (IS_ERR(vaddr))
516 return vaddr;
517 handle->kmap_cnt++;
518 return vaddr;
519 }
520
521 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
522 {
523 buffer->kmap_cnt--;
524 if (!buffer->kmap_cnt) {
525 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
526 buffer->vaddr = NULL;
527 }
528 }
529
530 static void ion_handle_kmap_put(struct ion_handle *handle)
531 {
532 struct ion_buffer *buffer = handle->buffer;
533
534 if (!handle->kmap_cnt) {
535 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
536 return;
537 }
538 handle->kmap_cnt--;
539 if (!handle->kmap_cnt)
540 ion_buffer_kmap_put(buffer);
541 }
542
543 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
544 {
545 struct ion_buffer *buffer;
546 void *vaddr;
547
548 mutex_lock(&client->lock);
549 if (!ion_handle_validate(client, handle)) {
550 pr_err("%s: invalid handle passed to map_kernel.\n",
551 __func__);
552 mutex_unlock(&client->lock);
553 return ERR_PTR(-EINVAL);
554 }
555
556 buffer = handle->buffer;
557
558 if (!handle->buffer->heap->ops->map_kernel) {
559 pr_err("%s: map_kernel is not implemented by this heap.\n",
560 __func__);
561 mutex_unlock(&client->lock);
562 return ERR_PTR(-ENODEV);
563 }
564
565 mutex_lock(&buffer->lock);
566 vaddr = ion_handle_kmap_get(handle);
567 mutex_unlock(&buffer->lock);
568 mutex_unlock(&client->lock);
569 return vaddr;
570 }
571 EXPORT_SYMBOL(ion_map_kernel);
572
573 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
574 {
575 struct ion_buffer *buffer;
576
577 mutex_lock(&client->lock);
578 buffer = handle->buffer;
579 mutex_lock(&buffer->lock);
580 ion_handle_kmap_put(handle);
581 mutex_unlock(&buffer->lock);
582 mutex_unlock(&client->lock);
583 }
584 EXPORT_SYMBOL(ion_unmap_kernel);
585
586 static struct mutex debugfs_mutex;
587 static struct rb_root *ion_root_client;
588 static int is_client_alive(struct ion_client *client)
589 {
590 struct rb_node *node;
591 struct ion_client *tmp;
592 struct ion_device *dev;
593
594 node = ion_root_client->rb_node;
595 dev = container_of(ion_root_client, struct ion_device, clients);
596
597 down_read(&dev->lock);
598 while (node) {
599 tmp = rb_entry(node, struct ion_client, node);
600 if (client < tmp) {
601 node = node->rb_left;
602 } else if (client > tmp) {
603 node = node->rb_right;
604 } else {
605 up_read(&dev->lock);
606 return 1;
607 }
608 }
609
610 up_read(&dev->lock);
611 return 0;
612 }
613
614 static int ion_debug_client_show(struct seq_file *s, void *unused)
615 {
616 struct ion_client *client = s->private;
617 struct rb_node *n;
618 size_t sizes[ION_NUM_HEAP_IDS] = {0};
619 const char *names[ION_NUM_HEAP_IDS] = {NULL};
620 int i;
621
622 mutex_lock(&debugfs_mutex);
623 if (!is_client_alive(client)) {
624 seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
625 client);
626 mutex_unlock(&debugfs_mutex);
627 return 0;
628 }
629
630 mutex_lock(&client->lock);
631 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
632 struct ion_handle *handle = rb_entry(n, struct ion_handle,
633 node);
634 unsigned int id = handle->buffer->heap->id;
635
636 if (!names[id])
637 names[id] = handle->buffer->heap->name;
638 sizes[id] += handle->buffer->size;
639 }
640 mutex_unlock(&client->lock);
641 mutex_unlock(&debugfs_mutex);
642
643 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
644 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
645 if (!names[i])
646 continue;
647 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
648 }
649 return 0;
650 }
651
652 static int ion_debug_client_open(struct inode *inode, struct file *file)
653 {
654 return single_open(file, ion_debug_client_show, inode->i_private);
655 }
656
657 static const struct file_operations debug_client_fops = {
658 .open = ion_debug_client_open,
659 .read = seq_read,
660 .llseek = seq_lseek,
661 .release = single_release,
662 };
663
664 static int ion_get_client_serial(const struct rb_root *root,
665 const unsigned char *name)
666 {
667 int serial = -1;
668 struct rb_node *node;
669
670 for (node = rb_first(root); node; node = rb_next(node)) {
671 struct ion_client *client = rb_entry(node, struct ion_client,
672 node);
673
674 if (strcmp(client->name, name))
675 continue;
676 serial = max(serial, client->display_serial);
677 }
678 return serial + 1;
679 }
680
681 struct ion_client *ion_client_create(struct ion_device *dev,
682 const char *name)
683 {
684 struct ion_client *client;
685 struct task_struct *task;
686 struct rb_node **p;
687 struct rb_node *parent = NULL;
688 struct ion_client *entry;
689 pid_t pid;
690
691 if (!name) {
692 pr_err("%s: Name cannot be null\n", __func__);
693 return ERR_PTR(-EINVAL);
694 }
695
696 get_task_struct(current->group_leader);
697 task_lock(current->group_leader);
698 pid = task_pid_nr(current->group_leader);
699 /*
700 * don't bother to store task struct for kernel threads,
701 * they can't be killed anyway
702 */
703 if (current->group_leader->flags & PF_KTHREAD) {
704 put_task_struct(current->group_leader);
705 task = NULL;
706 } else {
707 task = current->group_leader;
708 }
709 task_unlock(current->group_leader);
710
711 client = kzalloc(sizeof(*client), GFP_KERNEL);
712 if (!client)
713 goto err_put_task_struct;
714
715 client->dev = dev;
716 client->handles = RB_ROOT;
717 idr_init(&client->idr);
718 mutex_init(&client->lock);
719 client->task = task;
720 client->pid = pid;
721 client->name = kstrdup(name, GFP_KERNEL);
722 if (!client->name)
723 goto err_free_client;
724
725 down_write(&dev->lock);
726 client->display_serial = ion_get_client_serial(&dev->clients, name);
727 client->display_name = kasprintf(
728 GFP_KERNEL, "%s-%d", name, client->display_serial);
729 if (!client->display_name) {
730 up_write(&dev->lock);
731 goto err_free_client_name;
732 }
733 p = &dev->clients.rb_node;
734 while (*p) {
735 parent = *p;
736 entry = rb_entry(parent, struct ion_client, node);
737
738 if (client < entry)
739 p = &(*p)->rb_left;
740 else if (client > entry)
741 p = &(*p)->rb_right;
742 }
743 rb_link_node(&client->node, parent, p);
744 rb_insert_color(&client->node, &dev->clients);
745
746 client->debug_root = debugfs_create_file(client->display_name, 0664,
747 dev->clients_debug_root,
748 client, &debug_client_fops);
749 if (!client->debug_root) {
750 char buf[256], *path;
751
752 path = dentry_path(dev->clients_debug_root, buf, 256);
753 pr_err("Failed to create client debugfs at %s/%s\n",
754 path, client->display_name);
755 }
756
757 up_write(&dev->lock);
758
759 return client;
760
761 err_free_client_name:
762 kfree(client->name);
763 err_free_client:
764 kfree(client);
765 err_put_task_struct:
766 if (task)
767 put_task_struct(current->group_leader);
768 return ERR_PTR(-ENOMEM);
769 }
770 EXPORT_SYMBOL(ion_client_create);
771
772 void ion_client_destroy(struct ion_client *client)
773 {
774 struct ion_device *dev = client->dev;
775 struct rb_node *n;
776
777 pr_debug("%s: %d\n", __func__, __LINE__);
778 mutex_lock(&debugfs_mutex);
779 while ((n = rb_first(&client->handles))) {
780 struct ion_handle *handle = rb_entry(n, struct ion_handle,
781 node);
782 ion_handle_destroy(&handle->ref);
783 }
784
785 idr_destroy(&client->idr);
786
787 down_write(&dev->lock);
788 if (client->task)
789 put_task_struct(client->task);
790 rb_erase(&client->node, &dev->clients);
791 debugfs_remove_recursive(client->debug_root);
792 up_write(&dev->lock);
793
794 kfree(client->display_name);
795 kfree(client->name);
796 kfree(client);
797 mutex_unlock(&debugfs_mutex);
798 }
799 EXPORT_SYMBOL(ion_client_destroy);
800
801 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
802 struct device *dev,
803 enum dma_data_direction direction);
804
805 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
806 enum dma_data_direction direction)
807 {
808 struct dma_buf *dmabuf = attachment->dmabuf;
809 struct ion_buffer *buffer = dmabuf->priv;
810
811 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
812 return buffer->sg_table;
813 }
814
815 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
816 struct sg_table *table,
817 enum dma_data_direction direction)
818 {
819 }
820
821 void ion_pages_sync_for_device(struct device *dev, struct page *page,
822 size_t size, enum dma_data_direction dir)
823 {
824 struct scatterlist sg;
825
826 sg_init_table(&sg, 1);
827 sg_set_page(&sg, page, size, 0);
828 /*
829 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
830 * for the targeted device, but this works on the currently targeted
831 * hardware.
832 */
833 sg_dma_address(&sg) = page_to_phys(page);
834 dma_sync_sg_for_device(dev, &sg, 1, dir);
835 }
836
837 struct ion_vma_list {
838 struct list_head list;
839 struct vm_area_struct *vma;
840 };
841
842 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
843 struct device *dev,
844 enum dma_data_direction dir)
845 {
846 struct ion_vma_list *vma_list;
847 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
848 int i;
849
850 pr_debug("%s: syncing for device %s\n", __func__,
851 dev ? dev_name(dev) : "null");
852
853 if (!ion_buffer_fault_user_mappings(buffer))
854 return;
855
856 mutex_lock(&buffer->lock);
857 for (i = 0; i < pages; i++) {
858 struct page *page = buffer->pages[i];
859
860 if (ion_buffer_page_is_dirty(page))
861 ion_pages_sync_for_device(dev, ion_buffer_page(page),
862 PAGE_SIZE, dir);
863
864 ion_buffer_page_clean(buffer->pages + i);
865 }
866 list_for_each_entry(vma_list, &buffer->vmas, list) {
867 struct vm_area_struct *vma = vma_list->vma;
868
869 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
870 }
871 mutex_unlock(&buffer->lock);
872 }
873
874 static int ion_vm_fault(struct vm_fault *vmf)
875 {
876 struct ion_buffer *buffer = vmf->vma->vm_private_data;
877 unsigned long pfn;
878 int ret;
879
880 mutex_lock(&buffer->lock);
881 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
882 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
883
884 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
885 ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
886 mutex_unlock(&buffer->lock);
887 if (ret)
888 return VM_FAULT_ERROR;
889
890 return VM_FAULT_NOPAGE;
891 }
892
893 static void ion_vm_open(struct vm_area_struct *vma)
894 {
895 struct ion_buffer *buffer = vma->vm_private_data;
896 struct ion_vma_list *vma_list;
897
898 vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
899 if (!vma_list)
900 return;
901 vma_list->vma = vma;
902 mutex_lock(&buffer->lock);
903 list_add(&vma_list->list, &buffer->vmas);
904 mutex_unlock(&buffer->lock);
905 pr_debug("%s: adding %p\n", __func__, vma);
906 }
907
908 static void ion_vm_close(struct vm_area_struct *vma)
909 {
910 struct ion_buffer *buffer = vma->vm_private_data;
911 struct ion_vma_list *vma_list, *tmp;
912
913 pr_debug("%s\n", __func__);
914 mutex_lock(&buffer->lock);
915 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
916 if (vma_list->vma != vma)
917 continue;
918 list_del(&vma_list->list);
919 kfree(vma_list);
920 pr_debug("%s: deleting %p\n", __func__, vma);
921 break;
922 }
923 mutex_unlock(&buffer->lock);
924 }
925
926 static const struct vm_operations_struct ion_vma_ops = {
927 .open = ion_vm_open,
928 .close = ion_vm_close,
929 .fault = ion_vm_fault,
930 };
931
932 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
933 {
934 struct ion_buffer *buffer = dmabuf->priv;
935 int ret = 0;
936
937 if (!buffer->heap->ops->map_user) {
938 pr_err("%s: this heap does not define a method for mapping to userspace\n",
939 __func__);
940 return -EINVAL;
941 }
942
943 if (ion_buffer_fault_user_mappings(buffer)) {
944 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
945 VM_DONTDUMP;
946 vma->vm_private_data = buffer;
947 vma->vm_ops = &ion_vma_ops;
948 ion_vm_open(vma);
949 return 0;
950 }
951
952 if (!(buffer->flags & ION_FLAG_CACHED))
953 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
954
955 mutex_lock(&buffer->lock);
956 /* now map it to userspace */
957 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
958 mutex_unlock(&buffer->lock);
959
960 if (ret)
961 pr_err("%s: failure mapping buffer to userspace\n",
962 __func__);
963
964 return ret;
965 }
966
967 static void ion_dma_buf_release(struct dma_buf *dmabuf)
968 {
969 struct ion_buffer *buffer = dmabuf->priv;
970
971 ion_buffer_put(buffer);
972 }
973
974 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
975 {
976 struct ion_buffer *buffer = dmabuf->priv;
977
978 return buffer->vaddr + offset * PAGE_SIZE;
979 }
980
981 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
982 void *ptr)
983 {
984 }
985
986 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
987 enum dma_data_direction direction)
988 {
989 struct ion_buffer *buffer = dmabuf->priv;
990 void *vaddr;
991
992 if (!buffer->heap->ops->map_kernel) {
993 pr_err("%s: map kernel is not implemented by this heap.\n",
994 __func__);
995 return -ENODEV;
996 }
997
998 mutex_lock(&buffer->lock);
999 vaddr = ion_buffer_kmap_get(buffer);
1000 mutex_unlock(&buffer->lock);
1001 return PTR_ERR_OR_ZERO(vaddr);
1002 }
1003
1004 static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1005 enum dma_data_direction direction)
1006 {
1007 struct ion_buffer *buffer = dmabuf->priv;
1008
1009 mutex_lock(&buffer->lock);
1010 ion_buffer_kmap_put(buffer);
1011 mutex_unlock(&buffer->lock);
1012
1013 return 0;
1014 }
1015
1016 static const struct dma_buf_ops dma_buf_ops = {
1017 .map_dma_buf = ion_map_dma_buf,
1018 .unmap_dma_buf = ion_unmap_dma_buf,
1019 .mmap = ion_mmap,
1020 .release = ion_dma_buf_release,
1021 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1022 .end_cpu_access = ion_dma_buf_end_cpu_access,
1023 .kmap_atomic = ion_dma_buf_kmap,
1024 .kunmap_atomic = ion_dma_buf_kunmap,
1025 .kmap = ion_dma_buf_kmap,
1026 .kunmap = ion_dma_buf_kunmap,
1027 };
1028
1029 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1030 struct ion_handle *handle)
1031 {
1032 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1033 struct ion_buffer *buffer;
1034 struct dma_buf *dmabuf;
1035 bool valid_handle;
1036
1037 mutex_lock(&client->lock);
1038 valid_handle = ion_handle_validate(client, handle);
1039 if (!valid_handle) {
1040 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1041 mutex_unlock(&client->lock);
1042 return ERR_PTR(-EINVAL);
1043 }
1044 buffer = handle->buffer;
1045 ion_buffer_get(buffer);
1046 mutex_unlock(&client->lock);
1047
1048 exp_info.ops = &dma_buf_ops;
1049 exp_info.size = buffer->size;
1050 exp_info.flags = O_RDWR;
1051 exp_info.priv = buffer;
1052
1053 dmabuf = dma_buf_export(&exp_info);
1054 if (IS_ERR(dmabuf)) {
1055 ion_buffer_put(buffer);
1056 return dmabuf;
1057 }
1058
1059 return dmabuf;
1060 }
1061 EXPORT_SYMBOL(ion_share_dma_buf);
1062
1063 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1064 {
1065 struct dma_buf *dmabuf;
1066 int fd;
1067
1068 dmabuf = ion_share_dma_buf(client, handle);
1069 if (IS_ERR(dmabuf))
1070 return PTR_ERR(dmabuf);
1071
1072 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1073 if (fd < 0)
1074 dma_buf_put(dmabuf);
1075
1076 return fd;
1077 }
1078 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1079
1080 struct ion_handle *ion_import_dma_buf(struct ion_client *client,
1081 struct dma_buf *dmabuf)
1082 {
1083 struct ion_buffer *buffer;
1084 struct ion_handle *handle;
1085 int ret;
1086
1087 /* if this memory came from ion */
1088
1089 if (dmabuf->ops != &dma_buf_ops) {
1090 pr_err("%s: can not import dmabuf from another exporter\n",
1091 __func__);
1092 return ERR_PTR(-EINVAL);
1093 }
1094 buffer = dmabuf->priv;
1095
1096 mutex_lock(&client->lock);
1097 /* if a handle exists for this buffer just take a reference to it */
1098 handle = ion_handle_lookup(client, buffer);
1099 if (!IS_ERR(handle)) {
1100 ion_handle_get(handle);
1101 mutex_unlock(&client->lock);
1102 goto end;
1103 }
1104
1105 handle = ion_handle_create(client, buffer);
1106 if (IS_ERR(handle)) {
1107 mutex_unlock(&client->lock);
1108 goto end;
1109 }
1110
1111 ret = ion_handle_add(client, handle);
1112 mutex_unlock(&client->lock);
1113 if (ret) {
1114 ion_handle_put(handle);
1115 handle = ERR_PTR(ret);
1116 }
1117
1118 end:
1119 return handle;
1120 }
1121 EXPORT_SYMBOL(ion_import_dma_buf);
1122
1123 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
1124 {
1125 struct dma_buf *dmabuf;
1126 struct ion_handle *handle;
1127
1128 dmabuf = dma_buf_get(fd);
1129 if (IS_ERR(dmabuf))
1130 return ERR_CAST(dmabuf);
1131
1132 handle = ion_import_dma_buf(client, dmabuf);
1133 dma_buf_put(dmabuf);
1134 return handle;
1135 }
1136 EXPORT_SYMBOL(ion_import_dma_buf_fd);
1137
1138 int ion_sync_for_device(struct ion_client *client, int fd)
1139 {
1140 struct dma_buf *dmabuf;
1141 struct ion_buffer *buffer;
1142
1143 dmabuf = dma_buf_get(fd);
1144 if (IS_ERR(dmabuf))
1145 return PTR_ERR(dmabuf);
1146
1147 /* if this memory came from ion */
1148 if (dmabuf->ops != &dma_buf_ops) {
1149 pr_err("%s: can not sync dmabuf from another exporter\n",
1150 __func__);
1151 dma_buf_put(dmabuf);
1152 return -EINVAL;
1153 }
1154 buffer = dmabuf->priv;
1155
1156 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1157 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1158 dma_buf_put(dmabuf);
1159 return 0;
1160 }
1161
1162 int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
1163 {
1164 struct ion_device *dev = client->dev;
1165 struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
1166 int ret = -EINVAL, cnt = 0, max_cnt;
1167 struct ion_heap *heap;
1168 struct ion_heap_data hdata;
1169
1170 memset(&hdata, 0, sizeof(hdata));
1171
1172 down_read(&dev->lock);
1173 if (!buffer) {
1174 query->cnt = dev->heap_cnt;
1175 ret = 0;
1176 goto out;
1177 }
1178
1179 if (query->cnt <= 0)
1180 goto out;
1181
1182 max_cnt = query->cnt;
1183
1184 plist_for_each_entry(heap, &dev->heaps, node) {
1185 strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
1186 hdata.name[sizeof(hdata.name) - 1] = '\0';
1187 hdata.type = heap->type;
1188 hdata.heap_id = heap->id;
1189
1190 if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
1191 ret = -EFAULT;
1192 goto out;
1193 }
1194
1195 cnt++;
1196 if (cnt >= max_cnt)
1197 break;
1198 }
1199
1200 query->cnt = cnt;
1201 out:
1202 up_read(&dev->lock);
1203 return ret;
1204 }
1205
1206 static int ion_release(struct inode *inode, struct file *file)
1207 {
1208 struct ion_client *client = file->private_data;
1209
1210 pr_debug("%s: %d\n", __func__, __LINE__);
1211 ion_client_destroy(client);
1212 return 0;
1213 }
1214
1215 static int ion_open(struct inode *inode, struct file *file)
1216 {
1217 struct miscdevice *miscdev = file->private_data;
1218 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1219 struct ion_client *client;
1220 char debug_name[64];
1221
1222 pr_debug("%s: %d\n", __func__, __LINE__);
1223 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1224 client = ion_client_create(dev, debug_name);
1225 if (IS_ERR(client))
1226 return PTR_ERR(client);
1227 file->private_data = client;
1228
1229 return 0;
1230 }
1231
1232 static const struct file_operations ion_fops = {
1233 .owner = THIS_MODULE,
1234 .open = ion_open,
1235 .release = ion_release,
1236 .unlocked_ioctl = ion_ioctl,
1237 .compat_ioctl = compat_ion_ioctl,
1238 };
1239
1240 static size_t ion_debug_heap_total(struct ion_client *client,
1241 unsigned int id)
1242 {
1243 size_t size = 0;
1244 struct rb_node *n;
1245
1246 mutex_lock(&client->lock);
1247 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1248 struct ion_handle *handle = rb_entry(n,
1249 struct ion_handle,
1250 node);
1251 if (handle->buffer->heap->id == id)
1252 size += handle->buffer->size;
1253 }
1254 mutex_unlock(&client->lock);
1255 return size;
1256 }
1257
1258 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1259 {
1260 struct ion_heap *heap = s->private;
1261 struct ion_device *dev = heap->dev;
1262 struct rb_node *n;
1263 size_t total_size = 0;
1264 size_t total_orphaned_size = 0;
1265
1266 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1267 seq_puts(s, "----------------------------------------------------\n");
1268
1269 mutex_lock(&debugfs_mutex);
1270 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1271 struct ion_client *client = rb_entry(n, struct ion_client,
1272 node);
1273 size_t size = ion_debug_heap_total(client, heap->id);
1274
1275 if (!size)
1276 continue;
1277 if (client->task) {
1278 char task_comm[TASK_COMM_LEN];
1279
1280 get_task_comm(task_comm, client->task);
1281 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1282 client->pid, size);
1283 } else {
1284 seq_printf(s, "%16s %16u %16zu\n", client->name,
1285 client->pid, size);
1286 }
1287 }
1288 mutex_unlock(&debugfs_mutex);
1289
1290 seq_puts(s, "----------------------------------------------------\n");
1291 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1292 mutex_lock(&dev->buffer_lock);
1293 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1294 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1295 node);
1296 if (buffer->heap->id != heap->id)
1297 continue;
1298 total_size += buffer->size;
1299 if (!buffer->handle_count) {
1300 seq_printf(s, "%16s %16u %16zu %d %d\n",
1301 buffer->task_comm, buffer->pid,
1302 buffer->size, buffer->kmap_cnt,
1303 kref_read(&buffer->ref));
1304 total_orphaned_size += buffer->size;
1305 }
1306 }
1307 mutex_unlock(&dev->buffer_lock);
1308 seq_puts(s, "----------------------------------------------------\n");
1309 seq_printf(s, "%16s %16zu\n", "total orphaned",
1310 total_orphaned_size);
1311 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1312 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1313 seq_printf(s, "%16s %16zu\n", "deferred free",
1314 heap->free_list_size);
1315 seq_puts(s, "----------------------------------------------------\n");
1316
1317 if (heap->debug_show)
1318 heap->debug_show(heap, s, unused);
1319
1320 return 0;
1321 }
1322
1323 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1324 {
1325 return single_open(file, ion_debug_heap_show, inode->i_private);
1326 }
1327
1328 static const struct file_operations debug_heap_fops = {
1329 .open = ion_debug_heap_open,
1330 .read = seq_read,
1331 .llseek = seq_lseek,
1332 .release = single_release,
1333 };
1334
1335 static int debug_shrink_set(void *data, u64 val)
1336 {
1337 struct ion_heap *heap = data;
1338 struct shrink_control sc;
1339 int objs;
1340
1341 sc.gfp_mask = GFP_HIGHUSER;
1342 sc.nr_to_scan = val;
1343
1344 if (!val) {
1345 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1346 sc.nr_to_scan = objs;
1347 }
1348
1349 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1350 return 0;
1351 }
1352
1353 static int debug_shrink_get(void *data, u64 *val)
1354 {
1355 struct ion_heap *heap = data;
1356 struct shrink_control sc;
1357 int objs;
1358
1359 sc.gfp_mask = GFP_HIGHUSER;
1360 sc.nr_to_scan = 0;
1361
1362 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1363 *val = objs;
1364 return 0;
1365 }
1366
1367 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1368 debug_shrink_set, "%llu\n");
1369
1370 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1371 {
1372 struct dentry *debug_file;
1373
1374 if (!heap->ops->allocate || !heap->ops->free)
1375 pr_err("%s: can not add heap with invalid ops struct.\n",
1376 __func__);
1377
1378 spin_lock_init(&heap->free_lock);
1379 heap->free_list_size = 0;
1380
1381 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1382 ion_heap_init_deferred_free(heap);
1383
1384 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1385 ion_heap_init_shrinker(heap);
1386
1387 heap->dev = dev;
1388 down_write(&dev->lock);
1389 /*
1390 * use negative heap->id to reverse the priority -- when traversing
1391 * the list later attempt higher id numbers first
1392 */
1393 plist_node_init(&heap->node, -heap->id);
1394 plist_add(&heap->node, &dev->heaps);
1395 debug_file = debugfs_create_file(heap->name, 0664,
1396 dev->heaps_debug_root, heap,
1397 &debug_heap_fops);
1398
1399 if (!debug_file) {
1400 char buf[256], *path;
1401
1402 path = dentry_path(dev->heaps_debug_root, buf, 256);
1403 pr_err("Failed to create heap debugfs at %s/%s\n",
1404 path, heap->name);
1405 }
1406
1407 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1408 char debug_name[64];
1409
1410 snprintf(debug_name, 64, "%s_shrink", heap->name);
1411 debug_file = debugfs_create_file(
1412 debug_name, 0644, dev->heaps_debug_root, heap,
1413 &debug_shrink_fops);
1414 if (!debug_file) {
1415 char buf[256], *path;
1416
1417 path = dentry_path(dev->heaps_debug_root, buf, 256);
1418 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1419 path, debug_name);
1420 }
1421 }
1422
1423 dev->heap_cnt++;
1424 up_write(&dev->lock);
1425 }
1426 EXPORT_SYMBOL(ion_device_add_heap);
1427
1428 struct ion_device *ion_device_create(long (*custom_ioctl)
1429 (struct ion_client *client,
1430 unsigned int cmd,
1431 unsigned long arg))
1432 {
1433 struct ion_device *idev;
1434 int ret;
1435
1436 idev = kzalloc(sizeof(*idev), GFP_KERNEL);
1437 if (!idev)
1438 return ERR_PTR(-ENOMEM);
1439
1440 idev->dev.minor = MISC_DYNAMIC_MINOR;
1441 idev->dev.name = "ion";
1442 idev->dev.fops = &ion_fops;
1443 idev->dev.parent = NULL;
1444 ret = misc_register(&idev->dev);
1445 if (ret) {
1446 pr_err("ion: failed to register misc device.\n");
1447 kfree(idev);
1448 return ERR_PTR(ret);
1449 }
1450
1451 idev->debug_root = debugfs_create_dir("ion", NULL);
1452 if (!idev->debug_root) {
1453 pr_err("ion: failed to create debugfs root directory.\n");
1454 goto debugfs_done;
1455 }
1456 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1457 if (!idev->heaps_debug_root) {
1458 pr_err("ion: failed to create debugfs heaps directory.\n");
1459 goto debugfs_done;
1460 }
1461 idev->clients_debug_root = debugfs_create_dir("clients",
1462 idev->debug_root);
1463 if (!idev->clients_debug_root)
1464 pr_err("ion: failed to create debugfs clients directory.\n");
1465
1466 debugfs_done:
1467
1468 idev->custom_ioctl = custom_ioctl;
1469 idev->buffers = RB_ROOT;
1470 mutex_init(&idev->buffer_lock);
1471 init_rwsem(&idev->lock);
1472 plist_head_init(&idev->heaps);
1473 idev->clients = RB_ROOT;
1474 ion_root_client = &idev->clients;
1475 mutex_init(&debugfs_mutex);
1476 return idev;
1477 }
1478 EXPORT_SYMBOL(ion_device_create);
1479
1480 void ion_device_destroy(struct ion_device *dev)
1481 {
1482 misc_deregister(&dev->dev);
1483 debugfs_remove_recursive(dev->debug_root);
1484 /* XXX need to free the heaps and clients ? */
1485 kfree(dev);
1486 }
1487 EXPORT_SYMBOL(ion_device_destroy);