MIPS: Update ip27_defconfig for SCSI_DH change
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / staging / android / ion / ion.c
1 /*
2 *
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18 #include <linux/device.h>
19 #include <linux/err.h>
20 #include <linux/file.h>
21 #include <linux/freezer.h>
22 #include <linux/fs.h>
23 #include <linux/anon_inodes.h>
24 #include <linux/kthread.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/miscdevice.h>
28 #include <linux/export.h>
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/rbtree.h>
32 #include <linux/slab.h>
33 #include <linux/seq_file.h>
34 #include <linux/uaccess.h>
35 #include <linux/vmalloc.h>
36 #include <linux/debugfs.h>
37 #include <linux/dma-buf.h>
38 #include <linux/idr.h>
39
40 #include "ion.h"
41 #include "ion_priv.h"
42 #include "compat_ion.h"
43
44 /**
45 * struct ion_device - the metadata of the ion device node
46 * @dev: the actual misc device
47 * @buffers: an rb tree of all the existing buffers
48 * @buffer_lock: lock protecting the tree of buffers
49 * @lock: rwsem protecting the tree of heaps and clients
50 * @heaps: list of all the heaps in the system
51 * @user_clients: list of all the clients created from userspace
52 */
53 struct ion_device {
54 struct miscdevice dev;
55 struct rb_root buffers;
56 struct mutex buffer_lock;
57 struct rw_semaphore lock;
58 struct plist_head heaps;
59 long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60 unsigned long arg);
61 struct rb_root clients;
62 struct dentry *debug_root;
63 struct dentry *heaps_debug_root;
64 struct dentry *clients_debug_root;
65 };
66
67 /**
68 * struct ion_client - a process/hw block local address space
69 * @node: node in the tree of all clients
70 * @dev: backpointer to ion device
71 * @handles: an rb tree of all the handles in this client
72 * @idr: an idr space for allocating handle ids
73 * @lock: lock protecting the tree of handles
74 * @name: used for debugging
75 * @display_name: used for debugging (unique version of @name)
76 * @display_serial: used for debugging (to make display_name unique)
77 * @task: used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83 struct ion_client {
84 struct rb_node node;
85 struct ion_device *dev;
86 struct rb_root handles;
87 struct idr idr;
88 struct mutex lock;
89 const char *name;
90 char *display_name;
91 int display_serial;
92 struct task_struct *task;
93 pid_t pid;
94 struct dentry *debug_root;
95 };
96
97 /**
98 * ion_handle - a client local reference to a buffer
99 * @ref: reference count
100 * @client: back pointer to the client the buffer resides in
101 * @buffer: pointer to the buffer
102 * @node: node in the client's handle rbtree
103 * @kmap_cnt: count of times this client has mapped to kernel
104 * @id: client-unique id allocated by client->idr
105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client. Other fields are never changed after initialization.
108 */
109 struct ion_handle {
110 struct kref ref;
111 struct ion_client *client;
112 struct ion_buffer *buffer;
113 struct rb_node node;
114 unsigned int kmap_cnt;
115 int id;
116 };
117
118 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119 {
120 return (buffer->flags & ION_FLAG_CACHED) &&
121 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122 }
123
124 bool ion_buffer_cached(struct ion_buffer *buffer)
125 {
126 return !!(buffer->flags & ION_FLAG_CACHED);
127 }
128
129 static inline struct page *ion_buffer_page(struct page *page)
130 {
131 return (struct page *)((unsigned long)page & ~(1UL));
132 }
133
134 static inline bool ion_buffer_page_is_dirty(struct page *page)
135 {
136 return !!((unsigned long)page & 1UL);
137 }
138
139 static inline void ion_buffer_page_dirty(struct page **page)
140 {
141 *page = (struct page *)((unsigned long)(*page) | 1UL);
142 }
143
144 static inline void ion_buffer_page_clean(struct page **page)
145 {
146 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
147 }
148
149 /* this function should only be called while dev->lock is held */
150 static void ion_buffer_add(struct ion_device *dev,
151 struct ion_buffer *buffer)
152 {
153 struct rb_node **p = &dev->buffers.rb_node;
154 struct rb_node *parent = NULL;
155 struct ion_buffer *entry;
156
157 while (*p) {
158 parent = *p;
159 entry = rb_entry(parent, struct ion_buffer, node);
160
161 if (buffer < entry) {
162 p = &(*p)->rb_left;
163 } else if (buffer > entry) {
164 p = &(*p)->rb_right;
165 } else {
166 pr_err("%s: buffer already found.", __func__);
167 BUG();
168 }
169 }
170
171 rb_link_node(&buffer->node, parent, p);
172 rb_insert_color(&buffer->node, &dev->buffers);
173 }
174
175 /* this function should only be called while dev->lock is held */
176 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177 struct ion_device *dev,
178 unsigned long len,
179 unsigned long align,
180 unsigned long flags)
181 {
182 struct ion_buffer *buffer;
183 struct sg_table *table;
184 struct scatterlist *sg;
185 int i, ret;
186
187 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188 if (!buffer)
189 return ERR_PTR(-ENOMEM);
190
191 buffer->heap = heap;
192 buffer->flags = flags;
193 kref_init(&buffer->ref);
194
195 ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197 if (ret) {
198 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199 goto err2;
200
201 ion_heap_freelist_drain(heap, 0);
202 ret = heap->ops->allocate(heap, buffer, len, align,
203 flags);
204 if (ret)
205 goto err2;
206 }
207
208 buffer->dev = dev;
209 buffer->size = len;
210
211 table = heap->ops->map_dma(heap, buffer);
212 if (WARN_ONCE(table == NULL,
213 "heap->ops->map_dma should return ERR_PTR on error"))
214 table = ERR_PTR(-EINVAL);
215 if (IS_ERR(table)) {
216 ret = -EINVAL;
217 goto err1;
218 }
219
220 buffer->sg_table = table;
221 if (ion_buffer_fault_user_mappings(buffer)) {
222 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223 struct scatterlist *sg;
224 int i, j, k = 0;
225
226 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227 if (!buffer->pages) {
228 ret = -ENOMEM;
229 goto err;
230 }
231
232 for_each_sg(table->sgl, sg, table->nents, i) {
233 struct page *page = sg_page(sg);
234
235 for (j = 0; j < sg->length / PAGE_SIZE; j++)
236 buffer->pages[k++] = page++;
237 }
238 }
239
240 buffer->dev = dev;
241 buffer->size = len;
242 INIT_LIST_HEAD(&buffer->vmas);
243 mutex_init(&buffer->lock);
244 /*
245 * this will set up dma addresses for the sglist -- it is not
246 * technically correct as per the dma api -- a specific
247 * device isn't really taking ownership here. However, in practice on
248 * our systems the only dma_address space is physical addresses.
249 * Additionally, we can't afford the overhead of invalidating every
250 * allocation via dma_map_sg. The implicit contract here is that
251 * memory coming from the heaps is ready for dma, ie if it has a
252 * cached mapping that mapping has been invalidated
253 */
254 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
255 sg_dma_address(sg) = sg_phys(sg);
256 sg_dma_len(sg) = sg->length;
257 }
258 mutex_lock(&dev->buffer_lock);
259 ion_buffer_add(dev, buffer);
260 mutex_unlock(&dev->buffer_lock);
261 return buffer;
262
263 err:
264 heap->ops->unmap_dma(heap, buffer);
265 err1:
266 heap->ops->free(buffer);
267 err2:
268 kfree(buffer);
269 return ERR_PTR(ret);
270 }
271
272 void ion_buffer_destroy(struct ion_buffer *buffer)
273 {
274 if (WARN_ON(buffer->kmap_cnt > 0))
275 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277 buffer->heap->ops->free(buffer);
278 vfree(buffer->pages);
279 kfree(buffer);
280 }
281
282 static void _ion_buffer_destroy(struct kref *kref)
283 {
284 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285 struct ion_heap *heap = buffer->heap;
286 struct ion_device *dev = buffer->dev;
287
288 mutex_lock(&dev->buffer_lock);
289 rb_erase(&buffer->node, &dev->buffers);
290 mutex_unlock(&dev->buffer_lock);
291
292 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293 ion_heap_freelist_add(heap, buffer);
294 else
295 ion_buffer_destroy(buffer);
296 }
297
298 static void ion_buffer_get(struct ion_buffer *buffer)
299 {
300 kref_get(&buffer->ref);
301 }
302
303 static int ion_buffer_put(struct ion_buffer *buffer)
304 {
305 return kref_put(&buffer->ref, _ion_buffer_destroy);
306 }
307
308 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309 {
310 mutex_lock(&buffer->lock);
311 buffer->handle_count++;
312 mutex_unlock(&buffer->lock);
313 }
314
315 static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316 {
317 /*
318 * when a buffer is removed from a handle, if it is not in
319 * any other handles, copy the taskcomm and the pid of the
320 * process it's being removed from into the buffer. At this
321 * point there will be no way to track what processes this buffer is
322 * being used by, it only exists as a dma_buf file descriptor.
323 * The taskcomm and pid can provide a debug hint as to where this fd
324 * is in the system
325 */
326 mutex_lock(&buffer->lock);
327 buffer->handle_count--;
328 BUG_ON(buffer->handle_count < 0);
329 if (!buffer->handle_count) {
330 struct task_struct *task;
331
332 task = current->group_leader;
333 get_task_comm(buffer->task_comm, task);
334 buffer->pid = task_pid_nr(task);
335 }
336 mutex_unlock(&buffer->lock);
337 }
338
339 static struct ion_handle *ion_handle_create(struct ion_client *client,
340 struct ion_buffer *buffer)
341 {
342 struct ion_handle *handle;
343
344 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
345 if (!handle)
346 return ERR_PTR(-ENOMEM);
347 kref_init(&handle->ref);
348 RB_CLEAR_NODE(&handle->node);
349 handle->client = client;
350 ion_buffer_get(buffer);
351 ion_buffer_add_to_handle(buffer);
352 handle->buffer = buffer;
353
354 return handle;
355 }
356
357 static void ion_handle_kmap_put(struct ion_handle *);
358
359 static void ion_handle_destroy(struct kref *kref)
360 {
361 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362 struct ion_client *client = handle->client;
363 struct ion_buffer *buffer = handle->buffer;
364
365 mutex_lock(&buffer->lock);
366 while (handle->kmap_cnt)
367 ion_handle_kmap_put(handle);
368 mutex_unlock(&buffer->lock);
369
370 idr_remove(&client->idr, handle->id);
371 if (!RB_EMPTY_NODE(&handle->node))
372 rb_erase(&handle->node, &client->handles);
373
374 ion_buffer_remove_from_handle(buffer);
375 ion_buffer_put(buffer);
376
377 kfree(handle);
378 }
379
380 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381 {
382 return handle->buffer;
383 }
384
385 static void ion_handle_get(struct ion_handle *handle)
386 {
387 kref_get(&handle->ref);
388 }
389
390 static int ion_handle_put_nolock(struct ion_handle *handle)
391 {
392 int ret;
393
394 ret = kref_put(&handle->ref, ion_handle_destroy);
395
396 return ret;
397 }
398
399 int ion_handle_put(struct ion_handle *handle)
400 {
401 struct ion_client *client = handle->client;
402 int ret;
403
404 mutex_lock(&client->lock);
405 ret = ion_handle_put_nolock(handle);
406 mutex_unlock(&client->lock);
407
408 return ret;
409 }
410
411 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
412 struct ion_buffer *buffer)
413 {
414 struct rb_node *n = client->handles.rb_node;
415
416 while (n) {
417 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
418
419 if (buffer < entry->buffer)
420 n = n->rb_left;
421 else if (buffer > entry->buffer)
422 n = n->rb_right;
423 else
424 return entry;
425 }
426 return ERR_PTR(-EINVAL);
427 }
428
429 static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
430 int id)
431 {
432 struct ion_handle *handle;
433
434 handle = idr_find(&client->idr, id);
435 if (handle)
436 ion_handle_get(handle);
437
438 return handle ? handle : ERR_PTR(-EINVAL);
439 }
440
441 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
442 int id)
443 {
444 struct ion_handle *handle;
445
446 mutex_lock(&client->lock);
447 handle = ion_handle_get_by_id_nolock(client, id);
448 mutex_unlock(&client->lock);
449
450 return handle;
451 }
452
453 static bool ion_handle_validate(struct ion_client *client,
454 struct ion_handle *handle)
455 {
456 WARN_ON(!mutex_is_locked(&client->lock));
457 return idr_find(&client->idr, handle->id) == handle;
458 }
459
460 static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
461 {
462 int id;
463 struct rb_node **p = &client->handles.rb_node;
464 struct rb_node *parent = NULL;
465 struct ion_handle *entry;
466
467 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
468 if (id < 0)
469 return id;
470
471 handle->id = id;
472
473 while (*p) {
474 parent = *p;
475 entry = rb_entry(parent, struct ion_handle, node);
476
477 if (handle->buffer < entry->buffer)
478 p = &(*p)->rb_left;
479 else if (handle->buffer > entry->buffer)
480 p = &(*p)->rb_right;
481 else
482 WARN(1, "%s: buffer already found.", __func__);
483 }
484
485 rb_link_node(&handle->node, parent, p);
486 rb_insert_color(&handle->node, &client->handles);
487
488 return 0;
489 }
490
491 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
492 size_t align, unsigned int heap_id_mask,
493 unsigned int flags)
494 {
495 struct ion_handle *handle;
496 struct ion_device *dev = client->dev;
497 struct ion_buffer *buffer = NULL;
498 struct ion_heap *heap;
499 int ret;
500
501 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
502 len, align, heap_id_mask, flags);
503 /*
504 * traverse the list of heaps available in this system in priority
505 * order. If the heap type is supported by the client, and matches the
506 * request of the caller allocate from it. Repeat until allocate has
507 * succeeded or all heaps have been tried
508 */
509 len = PAGE_ALIGN(len);
510
511 if (!len)
512 return ERR_PTR(-EINVAL);
513
514 down_read(&dev->lock);
515 plist_for_each_entry(heap, &dev->heaps, node) {
516 /* if the caller didn't specify this heap id */
517 if (!((1 << heap->id) & heap_id_mask))
518 continue;
519 buffer = ion_buffer_create(heap, dev, len, align, flags);
520 if (!IS_ERR(buffer))
521 break;
522 }
523 up_read(&dev->lock);
524
525 if (buffer == NULL)
526 return ERR_PTR(-ENODEV);
527
528 if (IS_ERR(buffer))
529 return ERR_CAST(buffer);
530
531 handle = ion_handle_create(client, buffer);
532
533 /*
534 * ion_buffer_create will create a buffer with a ref_cnt of 1,
535 * and ion_handle_create will take a second reference, drop one here
536 */
537 ion_buffer_put(buffer);
538
539 if (IS_ERR(handle))
540 return handle;
541
542 mutex_lock(&client->lock);
543 ret = ion_handle_add(client, handle);
544 mutex_unlock(&client->lock);
545 if (ret) {
546 ion_handle_put(handle);
547 handle = ERR_PTR(ret);
548 }
549
550 return handle;
551 }
552 EXPORT_SYMBOL(ion_alloc);
553
554 static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
555 {
556 bool valid_handle;
557
558 BUG_ON(client != handle->client);
559
560 valid_handle = ion_handle_validate(client, handle);
561
562 if (!valid_handle) {
563 WARN(1, "%s: invalid handle passed to free.\n", __func__);
564 return;
565 }
566 ion_handle_put_nolock(handle);
567 }
568
569 void ion_free(struct ion_client *client, struct ion_handle *handle)
570 {
571 BUG_ON(client != handle->client);
572
573 mutex_lock(&client->lock);
574 ion_free_nolock(client, handle);
575 mutex_unlock(&client->lock);
576 }
577 EXPORT_SYMBOL(ion_free);
578
579 int ion_phys(struct ion_client *client, struct ion_handle *handle,
580 ion_phys_addr_t *addr, size_t *len)
581 {
582 struct ion_buffer *buffer;
583 int ret;
584
585 mutex_lock(&client->lock);
586 if (!ion_handle_validate(client, handle)) {
587 mutex_unlock(&client->lock);
588 return -EINVAL;
589 }
590
591 buffer = handle->buffer;
592
593 if (!buffer->heap->ops->phys) {
594 pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
595 __func__, buffer->heap->name, buffer->heap->type);
596 mutex_unlock(&client->lock);
597 return -ENODEV;
598 }
599 mutex_unlock(&client->lock);
600 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
601 return ret;
602 }
603 EXPORT_SYMBOL(ion_phys);
604
605 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
606 {
607 void *vaddr;
608
609 if (buffer->kmap_cnt) {
610 buffer->kmap_cnt++;
611 return buffer->vaddr;
612 }
613 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
614 if (WARN_ONCE(vaddr == NULL,
615 "heap->ops->map_kernel should return ERR_PTR on error"))
616 return ERR_PTR(-EINVAL);
617 if (IS_ERR(vaddr))
618 return vaddr;
619 buffer->vaddr = vaddr;
620 buffer->kmap_cnt++;
621 return vaddr;
622 }
623
624 static void *ion_handle_kmap_get(struct ion_handle *handle)
625 {
626 struct ion_buffer *buffer = handle->buffer;
627 void *vaddr;
628
629 if (handle->kmap_cnt) {
630 handle->kmap_cnt++;
631 return buffer->vaddr;
632 }
633 vaddr = ion_buffer_kmap_get(buffer);
634 if (IS_ERR(vaddr))
635 return vaddr;
636 handle->kmap_cnt++;
637 return vaddr;
638 }
639
640 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
641 {
642 buffer->kmap_cnt--;
643 if (!buffer->kmap_cnt) {
644 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
645 buffer->vaddr = NULL;
646 }
647 }
648
649 static void ion_handle_kmap_put(struct ion_handle *handle)
650 {
651 struct ion_buffer *buffer = handle->buffer;
652
653 if (!handle->kmap_cnt) {
654 WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
655 return;
656 }
657 handle->kmap_cnt--;
658 if (!handle->kmap_cnt)
659 ion_buffer_kmap_put(buffer);
660 }
661
662 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
663 {
664 struct ion_buffer *buffer;
665 void *vaddr;
666
667 mutex_lock(&client->lock);
668 if (!ion_handle_validate(client, handle)) {
669 pr_err("%s: invalid handle passed to map_kernel.\n",
670 __func__);
671 mutex_unlock(&client->lock);
672 return ERR_PTR(-EINVAL);
673 }
674
675 buffer = handle->buffer;
676
677 if (!handle->buffer->heap->ops->map_kernel) {
678 pr_err("%s: map_kernel is not implemented by this heap.\n",
679 __func__);
680 mutex_unlock(&client->lock);
681 return ERR_PTR(-ENODEV);
682 }
683
684 mutex_lock(&buffer->lock);
685 vaddr = ion_handle_kmap_get(handle);
686 mutex_unlock(&buffer->lock);
687 mutex_unlock(&client->lock);
688 return vaddr;
689 }
690 EXPORT_SYMBOL(ion_map_kernel);
691
692 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
693 {
694 struct ion_buffer *buffer;
695
696 mutex_lock(&client->lock);
697 buffer = handle->buffer;
698 mutex_lock(&buffer->lock);
699 ion_handle_kmap_put(handle);
700 mutex_unlock(&buffer->lock);
701 mutex_unlock(&client->lock);
702 }
703 EXPORT_SYMBOL(ion_unmap_kernel);
704
705 static int ion_debug_client_show(struct seq_file *s, void *unused)
706 {
707 struct ion_client *client = s->private;
708 struct rb_node *n;
709 size_t sizes[ION_NUM_HEAP_IDS] = {0};
710 const char *names[ION_NUM_HEAP_IDS] = {NULL};
711 int i;
712
713 mutex_lock(&client->lock);
714 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
715 struct ion_handle *handle = rb_entry(n, struct ion_handle,
716 node);
717 unsigned int id = handle->buffer->heap->id;
718
719 if (!names[id])
720 names[id] = handle->buffer->heap->name;
721 sizes[id] += handle->buffer->size;
722 }
723 mutex_unlock(&client->lock);
724
725 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
726 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
727 if (!names[i])
728 continue;
729 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
730 }
731 return 0;
732 }
733
734 static int ion_debug_client_open(struct inode *inode, struct file *file)
735 {
736 return single_open(file, ion_debug_client_show, inode->i_private);
737 }
738
739 static const struct file_operations debug_client_fops = {
740 .open = ion_debug_client_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744 };
745
746 static int ion_get_client_serial(const struct rb_root *root,
747 const unsigned char *name)
748 {
749 int serial = -1;
750 struct rb_node *node;
751
752 for (node = rb_first(root); node; node = rb_next(node)) {
753 struct ion_client *client = rb_entry(node, struct ion_client,
754 node);
755
756 if (strcmp(client->name, name))
757 continue;
758 serial = max(serial, client->display_serial);
759 }
760 return serial + 1;
761 }
762
763 struct ion_client *ion_client_create(struct ion_device *dev,
764 const char *name)
765 {
766 struct ion_client *client;
767 struct task_struct *task;
768 struct rb_node **p;
769 struct rb_node *parent = NULL;
770 struct ion_client *entry;
771 pid_t pid;
772
773 if (!name) {
774 pr_err("%s: Name cannot be null\n", __func__);
775 return ERR_PTR(-EINVAL);
776 }
777
778 get_task_struct(current->group_leader);
779 task_lock(current->group_leader);
780 pid = task_pid_nr(current->group_leader);
781 /*
782 * don't bother to store task struct for kernel threads,
783 * they can't be killed anyway
784 */
785 if (current->group_leader->flags & PF_KTHREAD) {
786 put_task_struct(current->group_leader);
787 task = NULL;
788 } else {
789 task = current->group_leader;
790 }
791 task_unlock(current->group_leader);
792
793 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
794 if (!client)
795 goto err_put_task_struct;
796
797 client->dev = dev;
798 client->handles = RB_ROOT;
799 idr_init(&client->idr);
800 mutex_init(&client->lock);
801 client->task = task;
802 client->pid = pid;
803 client->name = kstrdup(name, GFP_KERNEL);
804 if (!client->name)
805 goto err_free_client;
806
807 down_write(&dev->lock);
808 client->display_serial = ion_get_client_serial(&dev->clients, name);
809 client->display_name = kasprintf(
810 GFP_KERNEL, "%s-%d", name, client->display_serial);
811 if (!client->display_name) {
812 up_write(&dev->lock);
813 goto err_free_client_name;
814 }
815 p = &dev->clients.rb_node;
816 while (*p) {
817 parent = *p;
818 entry = rb_entry(parent, struct ion_client, node);
819
820 if (client < entry)
821 p = &(*p)->rb_left;
822 else if (client > entry)
823 p = &(*p)->rb_right;
824 }
825 rb_link_node(&client->node, parent, p);
826 rb_insert_color(&client->node, &dev->clients);
827
828 client->debug_root = debugfs_create_file(client->display_name, 0664,
829 dev->clients_debug_root,
830 client, &debug_client_fops);
831 if (!client->debug_root) {
832 char buf[256], *path;
833
834 path = dentry_path(dev->clients_debug_root, buf, 256);
835 pr_err("Failed to create client debugfs at %s/%s\n",
836 path, client->display_name);
837 }
838
839 up_write(&dev->lock);
840
841 return client;
842
843 err_free_client_name:
844 kfree(client->name);
845 err_free_client:
846 kfree(client);
847 err_put_task_struct:
848 if (task)
849 put_task_struct(current->group_leader);
850 return ERR_PTR(-ENOMEM);
851 }
852 EXPORT_SYMBOL(ion_client_create);
853
854 void ion_client_destroy(struct ion_client *client)
855 {
856 struct ion_device *dev = client->dev;
857 struct rb_node *n;
858
859 pr_debug("%s: %d\n", __func__, __LINE__);
860 while ((n = rb_first(&client->handles))) {
861 struct ion_handle *handle = rb_entry(n, struct ion_handle,
862 node);
863 ion_handle_destroy(&handle->ref);
864 }
865
866 idr_destroy(&client->idr);
867
868 down_write(&dev->lock);
869 if (client->task)
870 put_task_struct(client->task);
871 rb_erase(&client->node, &dev->clients);
872 debugfs_remove_recursive(client->debug_root);
873 up_write(&dev->lock);
874
875 kfree(client->display_name);
876 kfree(client->name);
877 kfree(client);
878 }
879 EXPORT_SYMBOL(ion_client_destroy);
880
881 struct sg_table *ion_sg_table(struct ion_client *client,
882 struct ion_handle *handle)
883 {
884 struct ion_buffer *buffer;
885 struct sg_table *table;
886
887 mutex_lock(&client->lock);
888 if (!ion_handle_validate(client, handle)) {
889 pr_err("%s: invalid handle passed to map_dma.\n",
890 __func__);
891 mutex_unlock(&client->lock);
892 return ERR_PTR(-EINVAL);
893 }
894 buffer = handle->buffer;
895 table = buffer->sg_table;
896 mutex_unlock(&client->lock);
897 return table;
898 }
899 EXPORT_SYMBOL(ion_sg_table);
900
901 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
902 struct device *dev,
903 enum dma_data_direction direction);
904
905 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
906 enum dma_data_direction direction)
907 {
908 struct dma_buf *dmabuf = attachment->dmabuf;
909 struct ion_buffer *buffer = dmabuf->priv;
910
911 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
912 return buffer->sg_table;
913 }
914
915 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
916 struct sg_table *table,
917 enum dma_data_direction direction)
918 {
919 }
920
921 void ion_pages_sync_for_device(struct device *dev, struct page *page,
922 size_t size, enum dma_data_direction dir)
923 {
924 struct scatterlist sg;
925
926 sg_init_table(&sg, 1);
927 sg_set_page(&sg, page, size, 0);
928 /*
929 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
930 * for the targeted device, but this works on the currently targeted
931 * hardware.
932 */
933 sg_dma_address(&sg) = page_to_phys(page);
934 dma_sync_sg_for_device(dev, &sg, 1, dir);
935 }
936
937 struct ion_vma_list {
938 struct list_head list;
939 struct vm_area_struct *vma;
940 };
941
942 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
943 struct device *dev,
944 enum dma_data_direction dir)
945 {
946 struct ion_vma_list *vma_list;
947 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
948 int i;
949
950 pr_debug("%s: syncing for device %s\n", __func__,
951 dev ? dev_name(dev) : "null");
952
953 if (!ion_buffer_fault_user_mappings(buffer))
954 return;
955
956 mutex_lock(&buffer->lock);
957 for (i = 0; i < pages; i++) {
958 struct page *page = buffer->pages[i];
959
960 if (ion_buffer_page_is_dirty(page))
961 ion_pages_sync_for_device(dev, ion_buffer_page(page),
962 PAGE_SIZE, dir);
963
964 ion_buffer_page_clean(buffer->pages + i);
965 }
966 list_for_each_entry(vma_list, &buffer->vmas, list) {
967 struct vm_area_struct *vma = vma_list->vma;
968
969 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
970 NULL);
971 }
972 mutex_unlock(&buffer->lock);
973 }
974
975 static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
976 {
977 struct ion_buffer *buffer = vma->vm_private_data;
978 unsigned long pfn;
979 int ret;
980
981 mutex_lock(&buffer->lock);
982 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
983 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
984
985 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
986 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
987 mutex_unlock(&buffer->lock);
988 if (ret)
989 return VM_FAULT_ERROR;
990
991 return VM_FAULT_NOPAGE;
992 }
993
994 static void ion_vm_open(struct vm_area_struct *vma)
995 {
996 struct ion_buffer *buffer = vma->vm_private_data;
997 struct ion_vma_list *vma_list;
998
999 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1000 if (!vma_list)
1001 return;
1002 vma_list->vma = vma;
1003 mutex_lock(&buffer->lock);
1004 list_add(&vma_list->list, &buffer->vmas);
1005 mutex_unlock(&buffer->lock);
1006 pr_debug("%s: adding %p\n", __func__, vma);
1007 }
1008
1009 static void ion_vm_close(struct vm_area_struct *vma)
1010 {
1011 struct ion_buffer *buffer = vma->vm_private_data;
1012 struct ion_vma_list *vma_list, *tmp;
1013
1014 pr_debug("%s\n", __func__);
1015 mutex_lock(&buffer->lock);
1016 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1017 if (vma_list->vma != vma)
1018 continue;
1019 list_del(&vma_list->list);
1020 kfree(vma_list);
1021 pr_debug("%s: deleting %p\n", __func__, vma);
1022 break;
1023 }
1024 mutex_unlock(&buffer->lock);
1025 }
1026
1027 static const struct vm_operations_struct ion_vma_ops = {
1028 .open = ion_vm_open,
1029 .close = ion_vm_close,
1030 .fault = ion_vm_fault,
1031 };
1032
1033 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1034 {
1035 struct ion_buffer *buffer = dmabuf->priv;
1036 int ret = 0;
1037
1038 if (!buffer->heap->ops->map_user) {
1039 pr_err("%s: this heap does not define a method for mapping to userspace\n",
1040 __func__);
1041 return -EINVAL;
1042 }
1043
1044 if (ion_buffer_fault_user_mappings(buffer)) {
1045 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1046 VM_DONTDUMP;
1047 vma->vm_private_data = buffer;
1048 vma->vm_ops = &ion_vma_ops;
1049 ion_vm_open(vma);
1050 return 0;
1051 }
1052
1053 if (!(buffer->flags & ION_FLAG_CACHED))
1054 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1055
1056 mutex_lock(&buffer->lock);
1057 /* now map it to userspace */
1058 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1059 mutex_unlock(&buffer->lock);
1060
1061 if (ret)
1062 pr_err("%s: failure mapping buffer to userspace\n",
1063 __func__);
1064
1065 return ret;
1066 }
1067
1068 static void ion_dma_buf_release(struct dma_buf *dmabuf)
1069 {
1070 struct ion_buffer *buffer = dmabuf->priv;
1071
1072 ion_buffer_put(buffer);
1073 }
1074
1075 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1076 {
1077 struct ion_buffer *buffer = dmabuf->priv;
1078
1079 return buffer->vaddr + offset * PAGE_SIZE;
1080 }
1081
1082 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1083 void *ptr)
1084 {
1085 }
1086
1087 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1088 size_t len,
1089 enum dma_data_direction direction)
1090 {
1091 struct ion_buffer *buffer = dmabuf->priv;
1092 void *vaddr;
1093
1094 if (!buffer->heap->ops->map_kernel) {
1095 pr_err("%s: map kernel is not implemented by this heap.\n",
1096 __func__);
1097 return -ENODEV;
1098 }
1099
1100 mutex_lock(&buffer->lock);
1101 vaddr = ion_buffer_kmap_get(buffer);
1102 mutex_unlock(&buffer->lock);
1103 return PTR_ERR_OR_ZERO(vaddr);
1104 }
1105
1106 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1107 size_t len,
1108 enum dma_data_direction direction)
1109 {
1110 struct ion_buffer *buffer = dmabuf->priv;
1111
1112 mutex_lock(&buffer->lock);
1113 ion_buffer_kmap_put(buffer);
1114 mutex_unlock(&buffer->lock);
1115 }
1116
1117 static struct dma_buf_ops dma_buf_ops = {
1118 .map_dma_buf = ion_map_dma_buf,
1119 .unmap_dma_buf = ion_unmap_dma_buf,
1120 .mmap = ion_mmap,
1121 .release = ion_dma_buf_release,
1122 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1123 .end_cpu_access = ion_dma_buf_end_cpu_access,
1124 .kmap_atomic = ion_dma_buf_kmap,
1125 .kunmap_atomic = ion_dma_buf_kunmap,
1126 .kmap = ion_dma_buf_kmap,
1127 .kunmap = ion_dma_buf_kunmap,
1128 };
1129
1130 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1131 struct ion_handle *handle)
1132 {
1133 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1134 struct ion_buffer *buffer;
1135 struct dma_buf *dmabuf;
1136 bool valid_handle;
1137
1138 mutex_lock(&client->lock);
1139 valid_handle = ion_handle_validate(client, handle);
1140 if (!valid_handle) {
1141 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1142 mutex_unlock(&client->lock);
1143 return ERR_PTR(-EINVAL);
1144 }
1145 buffer = handle->buffer;
1146 ion_buffer_get(buffer);
1147 mutex_unlock(&client->lock);
1148
1149 exp_info.ops = &dma_buf_ops;
1150 exp_info.size = buffer->size;
1151 exp_info.flags = O_RDWR;
1152 exp_info.priv = buffer;
1153
1154 dmabuf = dma_buf_export(&exp_info);
1155 if (IS_ERR(dmabuf)) {
1156 ion_buffer_put(buffer);
1157 return dmabuf;
1158 }
1159
1160 return dmabuf;
1161 }
1162 EXPORT_SYMBOL(ion_share_dma_buf);
1163
1164 int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1165 {
1166 struct dma_buf *dmabuf;
1167 int fd;
1168
1169 dmabuf = ion_share_dma_buf(client, handle);
1170 if (IS_ERR(dmabuf))
1171 return PTR_ERR(dmabuf);
1172
1173 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1174 if (fd < 0)
1175 dma_buf_put(dmabuf);
1176
1177 return fd;
1178 }
1179 EXPORT_SYMBOL(ion_share_dma_buf_fd);
1180
1181 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1182 {
1183 struct dma_buf *dmabuf;
1184 struct ion_buffer *buffer;
1185 struct ion_handle *handle;
1186 int ret;
1187
1188 dmabuf = dma_buf_get(fd);
1189 if (IS_ERR(dmabuf))
1190 return ERR_CAST(dmabuf);
1191 /* if this memory came from ion */
1192
1193 if (dmabuf->ops != &dma_buf_ops) {
1194 pr_err("%s: can not import dmabuf from another exporter\n",
1195 __func__);
1196 dma_buf_put(dmabuf);
1197 return ERR_PTR(-EINVAL);
1198 }
1199 buffer = dmabuf->priv;
1200
1201 mutex_lock(&client->lock);
1202 /* if a handle exists for this buffer just take a reference to it */
1203 handle = ion_handle_lookup(client, buffer);
1204 if (!IS_ERR(handle)) {
1205 ion_handle_get(handle);
1206 mutex_unlock(&client->lock);
1207 goto end;
1208 }
1209
1210 handle = ion_handle_create(client, buffer);
1211 if (IS_ERR(handle)) {
1212 mutex_unlock(&client->lock);
1213 goto end;
1214 }
1215
1216 ret = ion_handle_add(client, handle);
1217 mutex_unlock(&client->lock);
1218 if (ret) {
1219 ion_handle_put(handle);
1220 handle = ERR_PTR(ret);
1221 }
1222
1223 end:
1224 dma_buf_put(dmabuf);
1225 return handle;
1226 }
1227 EXPORT_SYMBOL(ion_import_dma_buf);
1228
1229 static int ion_sync_for_device(struct ion_client *client, int fd)
1230 {
1231 struct dma_buf *dmabuf;
1232 struct ion_buffer *buffer;
1233
1234 dmabuf = dma_buf_get(fd);
1235 if (IS_ERR(dmabuf))
1236 return PTR_ERR(dmabuf);
1237
1238 /* if this memory came from ion */
1239 if (dmabuf->ops != &dma_buf_ops) {
1240 pr_err("%s: can not sync dmabuf from another exporter\n",
1241 __func__);
1242 dma_buf_put(dmabuf);
1243 return -EINVAL;
1244 }
1245 buffer = dmabuf->priv;
1246
1247 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1248 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1249 dma_buf_put(dmabuf);
1250 return 0;
1251 }
1252
1253 /* fix up the cases where the ioctl direction bits are incorrect */
1254 static unsigned int ion_ioctl_dir(unsigned int cmd)
1255 {
1256 switch (cmd) {
1257 case ION_IOC_SYNC:
1258 case ION_IOC_FREE:
1259 case ION_IOC_CUSTOM:
1260 return _IOC_WRITE;
1261 default:
1262 return _IOC_DIR(cmd);
1263 }
1264 }
1265
1266 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1267 {
1268 struct ion_client *client = filp->private_data;
1269 struct ion_device *dev = client->dev;
1270 struct ion_handle *cleanup_handle = NULL;
1271 int ret = 0;
1272 unsigned int dir;
1273
1274 union {
1275 struct ion_fd_data fd;
1276 struct ion_allocation_data allocation;
1277 struct ion_handle_data handle;
1278 struct ion_custom_data custom;
1279 } data;
1280
1281 dir = ion_ioctl_dir(cmd);
1282
1283 if (_IOC_SIZE(cmd) > sizeof(data))
1284 return -EINVAL;
1285
1286 if (dir & _IOC_WRITE)
1287 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1288 return -EFAULT;
1289
1290 switch (cmd) {
1291 case ION_IOC_ALLOC:
1292 {
1293 struct ion_handle *handle;
1294
1295 handle = ion_alloc(client, data.allocation.len,
1296 data.allocation.align,
1297 data.allocation.heap_id_mask,
1298 data.allocation.flags);
1299 if (IS_ERR(handle))
1300 return PTR_ERR(handle);
1301
1302 data.allocation.handle = handle->id;
1303
1304 cleanup_handle = handle;
1305 break;
1306 }
1307 case ION_IOC_FREE:
1308 {
1309 struct ion_handle *handle;
1310
1311 mutex_lock(&client->lock);
1312 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
1313 if (IS_ERR(handle)) {
1314 mutex_unlock(&client->lock);
1315 return PTR_ERR(handle);
1316 }
1317 ion_free_nolock(client, handle);
1318 ion_handle_put_nolock(handle);
1319 mutex_unlock(&client->lock);
1320 break;
1321 }
1322 case ION_IOC_SHARE:
1323 case ION_IOC_MAP:
1324 {
1325 struct ion_handle *handle;
1326
1327 handle = ion_handle_get_by_id(client, data.handle.handle);
1328 if (IS_ERR(handle))
1329 return PTR_ERR(handle);
1330 data.fd.fd = ion_share_dma_buf_fd(client, handle);
1331 ion_handle_put(handle);
1332 if (data.fd.fd < 0)
1333 ret = data.fd.fd;
1334 break;
1335 }
1336 case ION_IOC_IMPORT:
1337 {
1338 struct ion_handle *handle;
1339
1340 handle = ion_import_dma_buf(client, data.fd.fd);
1341 if (IS_ERR(handle))
1342 ret = PTR_ERR(handle);
1343 else
1344 data.handle.handle = handle->id;
1345 break;
1346 }
1347 case ION_IOC_SYNC:
1348 {
1349 ret = ion_sync_for_device(client, data.fd.fd);
1350 break;
1351 }
1352 case ION_IOC_CUSTOM:
1353 {
1354 if (!dev->custom_ioctl)
1355 return -ENOTTY;
1356 ret = dev->custom_ioctl(client, data.custom.cmd,
1357 data.custom.arg);
1358 break;
1359 }
1360 default:
1361 return -ENOTTY;
1362 }
1363
1364 if (dir & _IOC_READ) {
1365 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1366 if (cleanup_handle)
1367 ion_free(client, cleanup_handle);
1368 return -EFAULT;
1369 }
1370 }
1371 return ret;
1372 }
1373
1374 static int ion_release(struct inode *inode, struct file *file)
1375 {
1376 struct ion_client *client = file->private_data;
1377
1378 pr_debug("%s: %d\n", __func__, __LINE__);
1379 ion_client_destroy(client);
1380 return 0;
1381 }
1382
1383 static int ion_open(struct inode *inode, struct file *file)
1384 {
1385 struct miscdevice *miscdev = file->private_data;
1386 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1387 struct ion_client *client;
1388 char debug_name[64];
1389
1390 pr_debug("%s: %d\n", __func__, __LINE__);
1391 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1392 client = ion_client_create(dev, debug_name);
1393 if (IS_ERR(client))
1394 return PTR_ERR(client);
1395 file->private_data = client;
1396
1397 return 0;
1398 }
1399
1400 static const struct file_operations ion_fops = {
1401 .owner = THIS_MODULE,
1402 .open = ion_open,
1403 .release = ion_release,
1404 .unlocked_ioctl = ion_ioctl,
1405 .compat_ioctl = compat_ion_ioctl,
1406 };
1407
1408 static size_t ion_debug_heap_total(struct ion_client *client,
1409 unsigned int id)
1410 {
1411 size_t size = 0;
1412 struct rb_node *n;
1413
1414 mutex_lock(&client->lock);
1415 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1416 struct ion_handle *handle = rb_entry(n,
1417 struct ion_handle,
1418 node);
1419 if (handle->buffer->heap->id == id)
1420 size += handle->buffer->size;
1421 }
1422 mutex_unlock(&client->lock);
1423 return size;
1424 }
1425
1426 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1427 {
1428 struct ion_heap *heap = s->private;
1429 struct ion_device *dev = heap->dev;
1430 struct rb_node *n;
1431 size_t total_size = 0;
1432 size_t total_orphaned_size = 0;
1433
1434 seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1435 seq_puts(s, "----------------------------------------------------\n");
1436
1437 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1438 struct ion_client *client = rb_entry(n, struct ion_client,
1439 node);
1440 size_t size = ion_debug_heap_total(client, heap->id);
1441
1442 if (!size)
1443 continue;
1444 if (client->task) {
1445 char task_comm[TASK_COMM_LEN];
1446
1447 get_task_comm(task_comm, client->task);
1448 seq_printf(s, "%16s %16u %16zu\n", task_comm,
1449 client->pid, size);
1450 } else {
1451 seq_printf(s, "%16s %16u %16zu\n", client->name,
1452 client->pid, size);
1453 }
1454 }
1455 seq_puts(s, "----------------------------------------------------\n");
1456 seq_puts(s, "orphaned allocations (info is from last known client):\n");
1457 mutex_lock(&dev->buffer_lock);
1458 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1459 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1460 node);
1461 if (buffer->heap->id != heap->id)
1462 continue;
1463 total_size += buffer->size;
1464 if (!buffer->handle_count) {
1465 seq_printf(s, "%16s %16u %16zu %d %d\n",
1466 buffer->task_comm, buffer->pid,
1467 buffer->size, buffer->kmap_cnt,
1468 atomic_read(&buffer->ref.refcount));
1469 total_orphaned_size += buffer->size;
1470 }
1471 }
1472 mutex_unlock(&dev->buffer_lock);
1473 seq_puts(s, "----------------------------------------------------\n");
1474 seq_printf(s, "%16s %16zu\n", "total orphaned",
1475 total_orphaned_size);
1476 seq_printf(s, "%16s %16zu\n", "total ", total_size);
1477 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1478 seq_printf(s, "%16s %16zu\n", "deferred free",
1479 heap->free_list_size);
1480 seq_puts(s, "----------------------------------------------------\n");
1481
1482 if (heap->debug_show)
1483 heap->debug_show(heap, s, unused);
1484
1485 return 0;
1486 }
1487
1488 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1489 {
1490 return single_open(file, ion_debug_heap_show, inode->i_private);
1491 }
1492
1493 static const struct file_operations debug_heap_fops = {
1494 .open = ion_debug_heap_open,
1495 .read = seq_read,
1496 .llseek = seq_lseek,
1497 .release = single_release,
1498 };
1499
1500 static int debug_shrink_set(void *data, u64 val)
1501 {
1502 struct ion_heap *heap = data;
1503 struct shrink_control sc;
1504 int objs;
1505
1506 sc.gfp_mask = -1;
1507 sc.nr_to_scan = val;
1508
1509 if (!val) {
1510 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1511 sc.nr_to_scan = objs;
1512 }
1513
1514 heap->shrinker.scan_objects(&heap->shrinker, &sc);
1515 return 0;
1516 }
1517
1518 static int debug_shrink_get(void *data, u64 *val)
1519 {
1520 struct ion_heap *heap = data;
1521 struct shrink_control sc;
1522 int objs;
1523
1524 sc.gfp_mask = -1;
1525 sc.nr_to_scan = 0;
1526
1527 objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
1528 *val = objs;
1529 return 0;
1530 }
1531
1532 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1533 debug_shrink_set, "%llu\n");
1534
1535 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1536 {
1537 struct dentry *debug_file;
1538
1539 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1540 !heap->ops->unmap_dma)
1541 pr_err("%s: can not add heap with invalid ops struct.\n",
1542 __func__);
1543
1544 spin_lock_init(&heap->free_lock);
1545 heap->free_list_size = 0;
1546
1547 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1548 ion_heap_init_deferred_free(heap);
1549
1550 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1551 ion_heap_init_shrinker(heap);
1552
1553 heap->dev = dev;
1554 down_write(&dev->lock);
1555 /*
1556 * use negative heap->id to reverse the priority -- when traversing
1557 * the list later attempt higher id numbers first
1558 */
1559 plist_node_init(&heap->node, -heap->id);
1560 plist_add(&heap->node, &dev->heaps);
1561 debug_file = debugfs_create_file(heap->name, 0664,
1562 dev->heaps_debug_root, heap,
1563 &debug_heap_fops);
1564
1565 if (!debug_file) {
1566 char buf[256], *path;
1567
1568 path = dentry_path(dev->heaps_debug_root, buf, 256);
1569 pr_err("Failed to create heap debugfs at %s/%s\n",
1570 path, heap->name);
1571 }
1572
1573 if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
1574 char debug_name[64];
1575
1576 snprintf(debug_name, 64, "%s_shrink", heap->name);
1577 debug_file = debugfs_create_file(
1578 debug_name, 0644, dev->heaps_debug_root, heap,
1579 &debug_shrink_fops);
1580 if (!debug_file) {
1581 char buf[256], *path;
1582
1583 path = dentry_path(dev->heaps_debug_root, buf, 256);
1584 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1585 path, debug_name);
1586 }
1587 }
1588
1589 up_write(&dev->lock);
1590 }
1591 EXPORT_SYMBOL(ion_device_add_heap);
1592
1593 struct ion_device *ion_device_create(long (*custom_ioctl)
1594 (struct ion_client *client,
1595 unsigned int cmd,
1596 unsigned long arg))
1597 {
1598 struct ion_device *idev;
1599 int ret;
1600
1601 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1602 if (!idev)
1603 return ERR_PTR(-ENOMEM);
1604
1605 idev->dev.minor = MISC_DYNAMIC_MINOR;
1606 idev->dev.name = "ion";
1607 idev->dev.fops = &ion_fops;
1608 idev->dev.parent = NULL;
1609 ret = misc_register(&idev->dev);
1610 if (ret) {
1611 pr_err("ion: failed to register misc device.\n");
1612 kfree(idev);
1613 return ERR_PTR(ret);
1614 }
1615
1616 idev->debug_root = debugfs_create_dir("ion", NULL);
1617 if (!idev->debug_root) {
1618 pr_err("ion: failed to create debugfs root directory.\n");
1619 goto debugfs_done;
1620 }
1621 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1622 if (!idev->heaps_debug_root) {
1623 pr_err("ion: failed to create debugfs heaps directory.\n");
1624 goto debugfs_done;
1625 }
1626 idev->clients_debug_root = debugfs_create_dir("clients",
1627 idev->debug_root);
1628 if (!idev->clients_debug_root)
1629 pr_err("ion: failed to create debugfs clients directory.\n");
1630
1631 debugfs_done:
1632
1633 idev->custom_ioctl = custom_ioctl;
1634 idev->buffers = RB_ROOT;
1635 mutex_init(&idev->buffer_lock);
1636 init_rwsem(&idev->lock);
1637 plist_head_init(&idev->heaps);
1638 idev->clients = RB_ROOT;
1639 return idev;
1640 }
1641 EXPORT_SYMBOL(ion_device_create);
1642
1643 void ion_device_destroy(struct ion_device *dev)
1644 {
1645 misc_deregister(&dev->dev);
1646 debugfs_remove_recursive(dev->debug_root);
1647 /* XXX need to free the heaps and clients ? */
1648 kfree(dev);
1649 }
1650 EXPORT_SYMBOL(ion_device_destroy);
1651
1652 void __init ion_reserve(struct ion_platform_data *data)
1653 {
1654 int i;
1655
1656 for (i = 0; i < data->nr; i++) {
1657 if (data->heaps[i].size == 0)
1658 continue;
1659
1660 if (data->heaps[i].base == 0) {
1661 phys_addr_t paddr;
1662
1663 paddr = memblock_alloc_base(data->heaps[i].size,
1664 data->heaps[i].align,
1665 MEMBLOCK_ALLOC_ANYWHERE);
1666 if (!paddr) {
1667 pr_err("%s: error allocating memblock for heap %d\n",
1668 __func__, i);
1669 continue;
1670 }
1671 data->heaps[i].base = paddr;
1672 } else {
1673 int ret = memblock_reserve(data->heaps[i].base,
1674 data->heaps[i].size);
1675 if (ret)
1676 pr_err("memblock reserve of %zx@%lx failed\n",
1677 data->heaps[i].size,
1678 data->heaps[i].base);
1679 }
1680 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1681 data->heaps[i].name,
1682 data->heaps[i].base,
1683 data->heaps[i].size);
1684 }
1685 }