import PULS_20180308
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / android / ion / ion.c
CommitLineData
6fa3eb70
S
1/*
2
3 * drivers/gpu/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
4b9e9796
S
19#include <linux/err.h>
20#include <linux/atomic.h>
6fa3eb70
S
21#include <linux/file.h>
22#include <linux/freezer.h>
23#include <linux/fs.h>
24#include <linux/anon_inodes.h>
25#include <linux/kthread.h>
26#include <linux/list.h>
27#include <linux/memblock.h>
28#include <linux/miscdevice.h>
29#include <linux/export.h>
30#include <linux/mm.h>
31#include <linux/mm_types.h>
32#include <linux/rbtree.h>
33#include <linux/slab.h>
34#include <linux/seq_file.h>
35#include <linux/uaccess.h>
36#include <linux/vmalloc.h>
37#include <linux/debugfs.h>
38#include <linux/dma-buf.h>
39#include <linux/idr.h>
40#include <linux/mtk_ion.h>
41
42#include "ion_priv.h"
43#include "compat_ion.h"
44#include "ion_profile.h"
45
46#define ION_DEBUG 0
47#if ION_DEBUG
48#include <linux/ion_drv.h>
49#include "ion_debug.h"
50#include "ion_debug_db.h"
51#include <linux/kallsyms.h>
52#include <linux/module.h>
53#define ION_DEBUG_INFO KERN_DEBUG
54#define ION_DEBUG_TRACE KERN_DEBUG
55#define ION_DEBUG_ERROR KERN_ERR
56#define ION_DEBUG_WARN KERN_WARNING
57extern struct mutex buffer_lifecycle_mutex;
58#endif
59
60#define DEBUG_HEAP_SHRINKER
61
62#if 0 //we move it to ion_priv.h. so we can dump every buffer info in ion_mm_heap.c
63/**
64 * struct ion_device - the metadata of the ion device node
65 * @dev: the actual misc device
66 * @buffers: an rb tree of all the existing buffers
67 * @buffer_lock: lock protecting the tree of buffers
68 * @lock: rwsem protecting the tree of heaps and clients
69 * @heaps: list of all the heaps in the system
70 * @user_clients: list of all the clients created from userspace
71 */
72struct ion_device {
73 struct miscdevice dev;
74 struct rb_root buffers;
75 struct mutex buffer_lock;
76 struct rw_semaphore lock;
77 struct plist_head heaps;
78 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
79 unsigned long arg);
80 struct rb_root clients;
81 struct dentry *debug_root;
82 struct dentry *heaps_debug_root;
83 struct dentry *clients_debug_root;
84};
85
86/**
87 * struct ion_client - a process/hw block local address space
88 * @node: node in the tree of all clients
89 * @dev: backpointer to ion device
90 * @handles: an rb tree of all the handles in this client
91 * @idr: an idr space for allocating handle ids
92 * @lock: lock protecting the tree of handles
93 * @name: used for debugging
94 * @display_name: used for debugging (unique version of @name)
95 * @display_serial: used for debugging (to make display_name unique)
96 * @task: used for debugging
97 *
98 * A client represents a list of buffers this client may access.
99 * The mutex stored here is used to protect both handles tree
100 * as well as the handles themselves, and should be held while modifying either.
101 */
102struct ion_client {
103 struct rb_node node;
104 struct ion_device *dev;
105 struct rb_root handles;
106 struct idr idr;
107 struct mutex lock;
108 const char *name;
109 char *display_name;
110 int display_serial;
111 struct task_struct *task;
112 pid_t pid;
113 struct dentry *debug_root;
114};
115
116struct ion_handle_debug {
117 pid_t pid;
118 pid_t tgid;
119 unsigned int backtrace[BACKTRACE_SIZE];
120 unsigned int backtrace_num;
121};
122
123/**
124 * ion_handle - a client local reference to a buffer
125 * @ref: reference count
126 * @client: back pointer to the client the buffer resides in
127 * @buffer: pointer to the buffer
128 * @node: node in the client's handle rbtree
129 * @kmap_cnt: count of times this client has mapped to kernel
130 * @id: client-unique id allocated by client->idr
131 *
132 * Modifications to node, map_cnt or mapping should be protected by the
133 * lock in the client. Other fields are never changed after initialization.
134 */
135struct ion_handle {
136 struct kref ref;
137 struct ion_client *client;
138 struct ion_buffer *buffer;
139 struct rb_node node;
140 unsigned int kmap_cnt;
141 int id;
142#if ION_RUNTIME_DEBUGGER
143 struct ion_handle_debug dbg;
144#endif
145};
146
147#endif
148
149#if ION_DEBUG
150static void ion_debug_db_create_clentry(pid_t);
151static void ion_debug_db_destroy_clentry(pid_t pid);
152static void ion_debug_create_db(struct dentry *root);
153#endif
154
155static int ion_debug_kern_rec(struct ion_client *client,
156 struct ion_buffer *buffer,
157 struct ion_handle * handle,
158 unsigned int action,
159 unsigned int address_type,
160 unsigned int address,
161 unsigned length,
162 int fd);
163
164bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
165{
166 return (buffer->flags & ION_FLAG_CACHED) &&
167 !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
168}
169
170bool ion_buffer_cached(struct ion_buffer *buffer)
171{
172 return !!(buffer->flags & ION_FLAG_CACHED);
173}
174
175static inline struct page *ion_buffer_page(struct page *page)
176{
177 return (struct page *)((unsigned long)page & ~(1UL));
178}
179
180static inline bool ion_buffer_page_is_dirty(struct page *page)
181{
182 return !!((unsigned long)page & 1UL);
183}
184
185static inline void ion_buffer_page_dirty(struct page **page)
186{
187 *page = (struct page *)((unsigned long)(*page) | 1UL);
188}
189
190static inline void ion_buffer_page_clean(struct page **page)
191{
192 *page = (struct page *)((unsigned long)(*page) & ~(1UL));
193}
194
195/* this function should only be called while dev->lock is held */
196static void ion_buffer_add(struct ion_device *dev,
197 struct ion_buffer *buffer)
198{
199 struct rb_node **p = &dev->buffers.rb_node;
200 struct rb_node *parent = NULL;
201 struct ion_buffer *entry;
202
203 while (*p) {
204 parent = *p;
205 entry = rb_entry(parent, struct ion_buffer, node);
206
207 if (buffer < entry) {
208 p = &(*p)->rb_left;
209 } else if (buffer > entry) {
210 p = &(*p)->rb_right;
211 } else {
212 pr_err("%s: buffer already found.", __func__);
213 BUG();
214 }
215 }
216
217 rb_link_node(&buffer->node, parent, p);
218 rb_insert_color(&buffer->node, &dev->buffers);
219}
220
221/* this function should only be called while dev->lock is held */
222static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
223 struct ion_device *dev,
224 unsigned long len,
225 unsigned long align,
226 unsigned long flags)
227{
228 struct ion_buffer *buffer;
229 struct sg_table *table;
230 struct scatterlist *sg;
231 int i, ret;
232
233 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
234 if (!buffer) {
235 IONMSG("%s kzalloc failed, buffer is null.\n", __func__);
236 return ERR_PTR(-ENOMEM);
237 }
238
239 buffer->heap = heap;
240 buffer->flags = flags;
241 kref_init(&buffer->ref);
242
243 ret = heap->ops->allocate(heap, buffer, len, align, flags);
244
245 if (ret) {
246 if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
247 goto err2;
248
249 ion_heap_freelist_drain(heap, 0);
250 ret = heap->ops->allocate(heap, buffer, len, align,
251 flags);
252 if (ret)
253 goto err2;
254 }
255
256 buffer->dev = dev;
257 buffer->size = len;
258
259 table = heap->ops->map_dma(heap, buffer);
260 if (WARN_ONCE(table == NULL,
261 "heap->ops->map_dma should return ERR_PTR on error"))
262 table = ERR_PTR(-EINVAL);
263 if (IS_ERR(table)) {
264 IONMSG("%s table is err 0x%p.\n", __func__, table);
265 heap->ops->free(buffer);
266 kfree(buffer);
267 return ERR_PTR(PTR_ERR(table));
268 }
269 buffer->sg_table = table;
270 if (ion_buffer_fault_user_mappings(buffer)) {
271 int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
272 struct scatterlist *sg;
273 int i, j, k = 0;
274
275 buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
276 if (!buffer->pages) {
277 IONMSG("%s vamlloc failed pages is null.\n", __func__);
278 ret = -ENOMEM;
279 goto err1;
280 }
281
282 for_each_sg(table->sgl, sg, table->nents, i) {
283 struct page *page = sg_page(sg);
284
285 for (j = 0; j < sg->length / PAGE_SIZE; j++)
286 buffer->pages[k++] = page++;
287 }
288
289 if (ret)
290 goto err;
291 }
292
293 buffer->dev = dev;
294 buffer->size = len;
295 INIT_LIST_HEAD(&buffer->vmas);
296
297 //log task pid for debug +by k.zhang
298 {
299 struct task_struct *task;
300 task = current->group_leader;
301 get_task_comm(buffer->task_comm, task);
302 buffer->pid = task_pid_nr(task);
303 }
304
305 mutex_init(&buffer->lock);
306 /* this will set up dma addresses for the sglist -- it is not
307 technically correct as per the dma api -- a specific
308 device isn't really taking ownership here. However, in practice on
309 our systems the only dma_address space is physical addresses.
310 Additionally, we can't afford the overhead of invalidating every
311 allocation via dma_map_sg. The implicit contract here is that
312 memory comming from the heaps is ready for dma, ie if it has a
313 cached mapping that mapping has been invalidated */
314 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
315 sg_dma_address(sg) = sg_phys(sg);
316 #ifdef CONFIG_NEED_SG_DMA_LENGTH
317 sg->dma_length = sg->length;
318 #endif
319 }
320 mutex_lock(&dev->buffer_lock);
321 ion_buffer_add(dev, buffer);
322 mutex_unlock(&dev->buffer_lock);
323 return buffer;
324
325err:
326 heap->ops->unmap_dma(heap, buffer);
327 heap->ops->free(buffer);
328err1:
329 if (buffer->pages)
330 vfree(buffer->pages);
331err2:
332 kfree(buffer);
333 return ERR_PTR(ret);
334}
335
336void ion_buffer_destroy(struct ion_buffer *buffer)
337{
338 if (WARN_ON(buffer->kmap_cnt > 0))
339 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
340 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
341 buffer->heap->ops->free(buffer);
342 if (buffer->pages)
343 vfree(buffer->pages);
344 kfree(buffer);
345}
346
347static void _ion_buffer_destroy(struct kref *kref)
348{
349 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
350 struct ion_heap *heap = buffer->heap;
351 struct ion_device *dev = buffer->dev;
352
353 mutex_lock(&dev->buffer_lock);
354 rb_erase(&buffer->node, &dev->buffers);
355 mutex_unlock(&dev->buffer_lock);
356
357 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
358 ion_heap_freelist_add(heap, buffer);
359 else
360 ion_buffer_destroy(buffer);
361}
362
363static void ion_buffer_get(struct ion_buffer *buffer)
364{
365 kref_get(&buffer->ref);
366}
367
368static int ion_buffer_put(struct ion_buffer *buffer)
369{
370 return kref_put(&buffer->ref, _ion_buffer_destroy);
371}
372
373static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
374{
375 mutex_lock(&buffer->lock);
376 buffer->handle_count++;
377 mutex_unlock(&buffer->lock);
378}
379
380static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
381{
382 /*
383 * when a buffer is removed from a handle, if it is not in
384 * any other handles, copy the taskcomm and the pid of the
385 * process it's being removed from into the buffer. At this
386 * point there will be no way to track what processes this buffer is
387 * being used by, it only exists as a dma_buf file descriptor.
388 * The taskcomm and pid can provide a debug hint as to where this fd
389 * is in the system
390 */
391 mutex_lock(&buffer->lock);
392 buffer->handle_count--;
393 BUG_ON(buffer->handle_count < 0);
394 if (!buffer->handle_count) {
395 struct task_struct *task;
396
397 task = current->group_leader;
398 get_task_comm(buffer->task_comm, task);
399 buffer->pid = task_pid_nr(task);
400 }
401 mutex_unlock(&buffer->lock);
402}
403
404static struct ion_handle *ion_handle_create(struct ion_client *client,
405 struct ion_buffer *buffer)
406{
407 struct ion_handle *handle;
408
409 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
410 if (!handle) {
411 IONMSG("%s kzalloc failed handle is null.\n", __func__);
412 return ERR_PTR(-ENOMEM);
413 }
414 kref_init(&handle->ref);
415 RB_CLEAR_NODE(&handle->node);
416 handle->client = client;
417 ion_buffer_get(buffer);
418 ion_buffer_add_to_handle(buffer);
419 handle->buffer = buffer;
420
421 return handle;
422}
423
424static void ion_handle_kmap_put(struct ion_handle *);
425
426static void ion_handle_destroy(struct kref *kref)
427{
428 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
429 struct ion_client *client = handle->client;
430 struct ion_buffer *buffer = handle->buffer;
431
432 mutex_lock(&buffer->lock);
433 while (handle->kmap_cnt)
434 ion_handle_kmap_put(handle);
435 mutex_unlock(&buffer->lock);
436
437 idr_remove(&client->idr, handle->id);
438 if (!RB_EMPTY_NODE(&handle->node))
439 rb_erase(&handle->node, &client->handles);
440
441 ion_buffer_remove_from_handle(buffer);
442 ion_buffer_put(buffer);
443
444 handle->buffer = NULL;
445 handle->client = NULL;
446
447 kfree(handle);
448}
449
450struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
451{
452 return handle->buffer;
453}
454
455static void ion_handle_get(struct ion_handle *handle)
456{
457 kref_get(&handle->ref);
458}
459
4b9e9796
S
460/* Must hold the client lock */
461static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
462{
463 if (atomic_read(&handle->ref.refcount) + 1 == 0)
464 return ERR_PTR(-EOVERFLOW);
465 ion_handle_get(handle);
466 return handle;
467}
468
469static int ion_handle_put_nolock(struct ion_handle *handle)
470{
471 int ret;
472
473 ret = kref_put(&handle->ref, ion_handle_destroy);
474
475 return ret;
476}
477
478int ion_handle_put(struct ion_handle *handle)
6fa3eb70
S
479{
480 struct ion_client *client = handle->client;
481 int ret;
482
483 mutex_lock(&client->lock);
4b9e9796 484 ret = ion_handle_put_nolock(handle);
6fa3eb70
S
485 mutex_unlock(&client->lock);
486
487 return ret;
488}
489
4b9e9796
S
490/* Must hold the client lock */
491static void user_ion_handle_get(struct ion_handle *handle)
492{
493 if (handle->user_ref_count++ == 0)
494 kref_get(&handle->ref);
495
496}
497
498/* Must hold the client lock */
499static struct ion_handle *user_ion_handle_get_check_overflow(struct ion_handle *handle)
500{
501 if (handle->user_ref_count + 1 == 0)
502 return ERR_PTR(-EOVERFLOW);
503 user_ion_handle_get(handle);
504 return handle;
505}
506/* passes a kref to the user ref count.
507 * We know we're holding a kref to the object before and
508 * after this call, so no need to reverify handle.
509 */
510static struct ion_handle *pass_to_user(struct ion_handle *handle)
511{
512 struct ion_client *client = handle->client;
513 struct ion_handle *ret;
514 mutex_lock(&client->lock);
515 ret = user_ion_handle_get_check_overflow(handle);
516 ion_handle_put_nolock(handle);
517 mutex_unlock(&client->lock);
518 return ret;
519}
520
521/* Must hold the client lock */
522static int user_ion_handle_put_nolock(struct ion_handle *handle)
523{
524 int ret;
525 if (--handle->user_ref_count == 0)
526 ret = ion_handle_put_nolock(handle);
527
528 return ret;
529}
6fa3eb70
S
530static struct ion_handle *ion_handle_lookup(struct ion_client *client,
531 struct ion_buffer *buffer)
532{
533 struct rb_node *n = client->handles.rb_node;
534
535 while (n) {
536 struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
537 if (buffer < entry->buffer)
538 n = n->rb_left;
539 else if (buffer > entry->buffer)
540 n = n->rb_right;
541 else
542 return entry;
543 }
544 return ERR_PTR(-EINVAL);
545}
546
4b9e9796 547static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
6fa3eb70
S
548 int id)
549{
550 struct ion_handle *handle;
551
6fa3eb70
S
552 handle = idr_find(&client->idr, id);
553 if (handle)
4b9e9796
S
554 return ion_handle_get_check_overflow(handle);
555
556 return ERR_PTR(-EINVAL);
557}
558
559struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
560 int id)
561{
562 struct ion_handle *handle;
563
564 mutex_lock(&client->lock);
565 handle = ion_handle_get_by_id_nolock(client, id);
6fa3eb70
S
566 mutex_unlock(&client->lock);
567
4b9e9796 568 return handle;
6fa3eb70
S
569}
570
4b9e9796 571
6fa3eb70
S
572static bool ion_handle_validate(struct ion_client *client,
573 struct ion_handle *handle)
574{
575 WARN_ON(!mutex_is_locked(&client->lock));
576 return (idr_find(&client->idr, handle->id) == handle);
577}
578
579static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
580{
581 int id;
582 struct rb_node **p = &client->handles.rb_node;
583 struct rb_node *parent = NULL;
584 struct ion_handle *entry;
585
586 id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
587 if (id < 0) {
588 IONMSG("%s idr_alloc failed id = %d.\n", __func__, id);
589 return id;
590 }
591
592 handle->id = id;
593
594 while (*p) {
595 parent = *p;
596 entry = rb_entry(parent, struct ion_handle, node);
597
598 if (handle->buffer < entry->buffer)
599 p = &(*p)->rb_left;
600 else if (handle->buffer > entry->buffer)
601 p = &(*p)->rb_right;
602 else
603 WARN(1, "%s: buffer already found.", __func__);
604 }
605
606 rb_link_node(&handle->node, parent, p);
607 rb_insert_color(&handle->node, &client->handles);
608
609 return 0;
610}
611
612struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
613 size_t align, unsigned int heap_id_mask,
4b9e9796 614 unsigned int flags, bool grab_handle)
6fa3eb70
S
615{
616 struct ion_handle *handle;
617 struct ion_device *dev = client->dev;
618 struct ion_buffer *buffer = NULL;
619 struct ion_heap *heap;
620 int ret;
621
622 pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
623 len, align, heap_id_mask, flags);
624 /*
625 * traverse the list of heaps available in this system in priority
626 * order. If the heap type is supported by the client, and matches the
627 * request of the caller allocate from it. Repeat until allocate has
628 * succeeded or all heaps have been tried
629 */
630 len = PAGE_ALIGN(len);
631
632 if (!len) {
633 IONMSG("%s len cannot be zero.\n", __func__);
634 return ERR_PTR(-EINVAL);
635 }
636
637 //add by k.zhang for sgtable_init KE bug
638 if((len > 1024*1024*1024))
639 {
640 IONMSG("%s error: size (%zu) is more than 1G !!\n", __FUNCTION__,len);
641 return ERR_PTR(-EINVAL);
642 }
643 MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagStart, len, 0);
644
645 down_read(&dev->lock);
646 plist_for_each_entry(heap, &dev->heaps, node) {
647 /* if the caller didn't specify this heap id */
648 if (!((1 << heap->id) & heap_id_mask))
649 continue;
650 buffer = ion_buffer_create(heap, dev, len, align, flags);
651 if (!IS_ERR(buffer))
652 break;
653 }
654 up_read(&dev->lock);
655
656 if (buffer == NULL) {
657 IONMSG("%s buffer is null.\n", __func__);
658 return ERR_PTR(-ENODEV);
659 }
660
661 if (IS_ERR(buffer)) {
662 IONMSG("%s buffer is error 0x%p.\n", __func__, buffer);
663 return ERR_PTR(PTR_ERR(buffer));
664 }
665
666 handle = ion_handle_create(client, buffer);
667
668 /*
669 * ion_buffer_create will create a buffer with a ref_cnt of 1,
670 * and ion_handle_create will take a second reference, drop one here
671 */
672 ion_buffer_put(buffer);
673
674 if (IS_ERR(handle)) {
675 IONMSG("%s handle is error 0x%p.\n", __func__, handle);
676 return handle;
677 }
678
679 mutex_lock(&client->lock);
4b9e9796
S
680 if (grab_handle)
681 ion_handle_get(handle);
6fa3eb70
S
682 ret = ion_handle_add(client, handle);
683 mutex_unlock(&client->lock);
684 if (ret) {
685 ion_handle_put(handle);
686 handle = ERR_PTR(ret);
687 IONMSG("%s ion handle add failed %d.\n", __func__, ret);
688 }
689
690 MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagEnd, buffer->size, 0);
691
692 return handle;
693}
694
695struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
696 size_t align, unsigned int heap_id_mask,
697 unsigned int flags)
698{
4b9e9796 699 return __ion_alloc(client, len, align, heap_id_mask, flags, false);
6fa3eb70
S
700}
701EXPORT_SYMBOL(ion_alloc);
702
4b9e9796
S
703static void user_ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
704{
705 bool valid_handle;
706 BUG_ON(client != handle->client);
707 valid_handle = ion_handle_validate(client, handle);
708 if (!valid_handle) {
709 WARN(1, "%s: invalid handle passed to free.\n", __func__);
710 return;
711 }
712 if (!handle->user_ref_count > 0) {
713 WARN(1, "%s: User does not have access!\n", __func__);
714 return;
715 }
716 user_ion_handle_put_nolock(handle);
717}
718static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
6fa3eb70
S
719{
720 bool valid_handle;
721
722 BUG_ON(client != handle->client);
723
6fa3eb70
S
724 valid_handle = ion_handle_validate(client, handle);
725
726 if (!valid_handle) {
727 WARN(1, "%s: invalid handle passed to free.\n", __func__);
6fa3eb70
S
728 return;
729 }
4b9e9796 730 ion_handle_put_nolock(handle);
6fa3eb70
S
731}
732
733void ion_free(struct ion_client *client, struct ion_handle *handle)
734{
4b9e9796
S
735 BUG_ON(client != handle->client);
736
737 mutex_lock(&client->lock);
738 ion_free_nolock(client, handle);
739 mutex_unlock(&client->lock);
740
741 ion_debug_kern_rec(client, handle->buffer, NULL, ION_FUNCTION_FREE, 0, 0, 0, 0);
6fa3eb70
S
742}
743EXPORT_SYMBOL(ion_free);
744
745int ion_phys(struct ion_client *client, struct ion_handle *handle,
746 ion_phys_addr_t *addr, size_t *len)
747{
748 struct ion_buffer *buffer;
749 int ret;
750 MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagStart, (unsigned long)client, (unsigned long)handle);
751
752 mutex_lock(&client->lock);
753 if (!ion_handle_validate(client, handle)) {
754 mutex_unlock(&client->lock);
755 IONMSG("%s invalid handle pass to phys.\n", __func__);
756 return -EINVAL;
757 }
758
759 buffer = handle->buffer;
760
761 if (!buffer->heap->ops->phys) {
762 pr_err("%s: ion_phys is not implemented by this heap.\n",
763 __func__);
764 mutex_unlock(&client->lock);
765 return -ENODEV;
766 }
767 mutex_unlock(&client->lock);
768 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
769
770 MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagEnd, buffer->size, *addr);
771
772 return ret;
773}
774EXPORT_SYMBOL(ion_phys);
775
776static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
777{
778 void *vaddr;
779
780 if (buffer->kmap_cnt) {
781 buffer->kmap_cnt++;
782 return buffer->vaddr;
783 }
784 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
785 if (WARN_ONCE(vaddr == NULL,
786 "heap->ops->map_kernel should return ERR_PTR on error"))
787 return ERR_PTR(-EINVAL);
788 if (IS_ERR(vaddr)) {
789 IONMSG("%s map kernel is failed addr = 0x%p.\n", __func__, vaddr);
790 return vaddr;
791 }
792 buffer->vaddr = vaddr;
793 buffer->kmap_cnt++;
794 return vaddr;
795}
796
797static void *ion_handle_kmap_get(struct ion_handle *handle)
798{
799 struct ion_buffer *buffer = handle->buffer;
800 void *vaddr;
801
802 if (handle->kmap_cnt) {
803 handle->kmap_cnt++;
804 return buffer->vaddr;
805 }
806 vaddr = ion_buffer_kmap_get(buffer);
807 if (IS_ERR(vaddr)) {
808 IONMSG("%s vadd is error 0x%p.\n", __func__, vaddr);
809 return vaddr;
810 }
811 handle->kmap_cnt++;
812 return vaddr;
813}
814
815static void ion_buffer_kmap_put(struct ion_buffer *buffer)
816{
817 buffer->kmap_cnt--;
818 if (!buffer->kmap_cnt) {
819 MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagStart, buffer->size, 0);
820 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
821 MMProfileLogEx(ION_MMP_Events[PROFILE_UNMAP_KERNEL], MMProfileFlagEnd, buffer->size, 0);
822 buffer->vaddr = NULL;
823 }
824}
825
826static void ion_handle_kmap_put(struct ion_handle *handle)
827{
828 struct ion_buffer *buffer = handle->buffer;
829
830 handle->kmap_cnt--;
831 if (!handle->kmap_cnt)
832 ion_buffer_kmap_put(buffer);
833}
834
835void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
836{
837 struct ion_buffer *buffer;
838 void *vaddr;
839
840 mutex_lock(&client->lock);
841 if (!ion_handle_validate(client, handle)) {
842 pr_err("%s: invalid handle passed to map_kernel.\n",
843 __func__);
844 mutex_unlock(&client->lock);
845 return ERR_PTR(-EINVAL);
846 }
847
848 buffer = handle->buffer;
849
850 if (!handle->buffer->heap->ops->map_kernel) {
851 pr_err("%s: map_kernel is not implemented by this heap.\n",
852 __func__);
853 mutex_unlock(&client->lock);
854 return ERR_PTR(-ENODEV);
855 }
856
857 mutex_lock(&buffer->lock);
858 vaddr = ion_handle_kmap_get(handle);
859 mutex_unlock(&buffer->lock);
860 mutex_unlock(&client->lock);
861
862 ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_MMAP,
863 ADDRESS_KERNEL_VIRTUAL, (unsigned long)vaddr, handle->buffer->size, 0);
864
865 return vaddr;
866}
867EXPORT_SYMBOL(ion_map_kernel);
868
869void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
870{
871 struct ion_buffer *buffer;
872
873 ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_MUNMAP,
874 ADDRESS_KERNEL_VIRTUAL, (unsigned long)handle->buffer->vaddr, handle->buffer->size, 0);
875
876 mutex_lock(&client->lock);
877 buffer = handle->buffer;
878 mutex_lock(&buffer->lock);
879 ion_handle_kmap_put(handle);
880 mutex_unlock(&buffer->lock);
881 mutex_unlock(&client->lock);
882}
883EXPORT_SYMBOL(ion_unmap_kernel);
884
885static int ion_debug_client_show(struct seq_file *s, void *unused)
886{
887 struct ion_client *client = s->private;
888 struct rb_node *n;
889 size_t sizes[ION_NUM_HEAP_IDS] = {0};
890 const char *names[ION_NUM_HEAP_IDS] = {NULL};
891 int i;
892
893 mutex_lock(&client->lock);
894 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
895 struct ion_handle *handle = rb_entry(n, struct ion_handle,
896 node);
897 unsigned int id = handle->buffer->heap->id;
898
899 if (!names[id])
900 names[id] = handle->buffer->heap->name;
901 sizes[id] += handle->buffer->size;
902 }
903 mutex_unlock(&client->lock);
904
905 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
906 for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
907 if (!names[i])
908 continue;
909 seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
910 }
911 return 0;
912}
913
914static int ion_debug_client_open(struct inode *inode, struct file *file)
915{
916 return single_open(file, ion_debug_client_show, inode->i_private);
917}
918
919static const struct file_operations debug_client_fops = {
920 .open = ion_debug_client_open,
921 .read = seq_read,
922 .llseek = seq_lseek,
923 .release = single_release,
924};
925
926static int ion_get_client_serial(const struct rb_root *root,
927 const unsigned char *name)
928{
929 int serial = -1;
930 struct rb_node *node;
931 for (node = rb_first(root); node; node = rb_next(node)) {
932 struct ion_client *client = rb_entry(node, struct ion_client,
933 node);
934 if (strcmp(client->name, name))
935 continue;
936 serial = max(serial, client->display_serial);
937 }
938 return serial + 1;
939}
940
941struct ion_client *__ion_client_create(struct ion_device *dev,
942 const char *name)
943{
944 struct ion_client *client;
945 struct task_struct *task;
946 struct rb_node **p;
947 struct rb_node *parent = NULL;
948 struct ion_client *entry;
949 pid_t pid;
950
951 if (!name) {
952 pr_err("%s: Name cannot be null\n", __func__);
953 return ERR_PTR(-EINVAL);
954 }
955
956 get_task_struct(current->group_leader);
957 task_lock(current->group_leader);
958 pid = task_pid_nr(current->group_leader);
959 /* don't bother to store task struct for kernel threads,
960 they can't be killed anyway */
961 if (current->group_leader->flags & PF_KTHREAD) {
962 put_task_struct(current->group_leader);
963 task = NULL;
964 } else {
965 task = current->group_leader;
966 }
967 task_unlock(current->group_leader);
968
969 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
970 if (!client)
971 goto err_put_task_struct;
972
973 client->dev = dev;
974 client->handles = RB_ROOT;
975 idr_init(&client->idr);
976 mutex_init(&client->lock);
977 client->task = task;
978 client->pid = pid;
979 client->name = kstrdup(name, GFP_KERNEL);
980 if (!client->name)
981 goto err_free_client;
982
983 down_write(&dev->lock);
984 client->display_serial = ion_get_client_serial(&dev->clients, name);
985 client->display_name = kasprintf(
986 GFP_KERNEL, "%s-%d", name, client->display_serial);
987 if (!client->display_name) {
988 up_write(&dev->lock);
989 goto err_free_client_name;
990 }
991 p = &dev->clients.rb_node;
992 while (*p) {
993 parent = *p;
994 entry = rb_entry(parent, struct ion_client, node);
995
996 if (client < entry)
997 p = &(*p)->rb_left;
998 else if (client > entry)
999 p = &(*p)->rb_right;
1000 }
1001 rb_link_node(&client->node, parent, p);
1002 rb_insert_color(&client->node, &dev->clients);
1003
1004 client->debug_root = debugfs_create_file(client->display_name, 0664,
1005 dev->clients_debug_root,
1006 client, &debug_client_fops);
1007 if (!client->debug_root) {
1008 char buf[256], *path;
1009 path = dentry_path(dev->clients_debug_root, buf, 256);
1010 pr_err("Failed to create client debugfs at %s/%s\n",
1011 path, client->display_name);
1012 }
1013
1014 up_write(&dev->lock);
1015
1016#if ION_DEBUG
1017 ion_debug_db_create_clentry(client->pid);
1018#endif
1019 return client;
1020
1021err_free_client_name:
1022 kfree(client->name);
1023err_free_client:
1024 kfree(client);
1025err_put_task_struct:
1026 if (task)
1027 put_task_struct(current->group_leader);
1028 return ERR_PTR(-ENOMEM);
1029}
1030
1031
1032struct ion_client *ion_client_create(struct ion_device *dev,
1033 const char *name)
1034{
1035 struct ion_client *client;
1036
1037 client = __ion_client_create(dev, name);
1038 if(IS_ERR_OR_NULL(client)) {
1039 IONMSG("%s client is error or null 0x%p.\n", __func__, client);
1040 return client;
1041 }
1042
1043 ion_debug_kern_rec(client, NULL, NULL, ION_FUNCTION_CREATE_CLIENT, 0, 0, 0, 0);
1044
1045 return client;
1046}
1047EXPORT_SYMBOL(ion_client_create);
1048
1049void __ion_client_destroy(struct ion_client *client, int from_kern)
1050{
1051 struct ion_device *dev = client->dev;
1052 struct rb_node *n;
1053
1054 pr_debug("%s: %d\n", __func__, __LINE__);
1055 while ((n = rb_first(&client->handles))) {
1056 struct ion_handle *handle = rb_entry(n, struct ion_handle,
1057 node);
1058 mutex_lock(&client->lock);
1059 IONMSG("warning: release handle @ client destory: handle=%p, buf=%p, ref=%d, size=%zu, kmap=%d\n",
1060 handle, handle->buffer, atomic_read(&handle->buffer->ref.refcount), handle->buffer->size, handle->buffer->kmap_cnt);
1061 ion_handle_destroy(&handle->ref);
1062 mutex_unlock(&client->lock);
1063 }
1064
1065 idr_destroy(&client->idr);
1066
1067 down_write(&dev->lock);
1068 if (client->task)
1069 put_task_struct(client->task);
1070 rb_erase(&client->node, &dev->clients);
1071 debugfs_remove_recursive(client->debug_root);
1072 up_write(&dev->lock);
1073
1074 kfree(client->display_name);
1075 kfree(client->name);
1076#if ION_DEBUG
1077 if(from_kern)
1078 ion_debug_kern_rec(client, NULL, NULL, ION_FUNCTION_DESTROY_CLIENT, 0, 0, 0, 0);
1079 ion_debug_db_destroy_clentry(client->pid);
1080#endif
1081
1082 kfree(client);
1083}
1084
1085void ion_client_destroy(struct ion_client *client)
1086{
1087 __ion_client_destroy(client, 1);
1088}
1089EXPORT_SYMBOL(ion_client_destroy);
1090
1091struct sg_table *ion_sg_table(struct ion_client *client,
1092 struct ion_handle *handle)
1093{
1094 struct ion_buffer *buffer;
1095 struct sg_table *table;
1096
1097 mutex_lock(&client->lock);
1098 if (!ion_handle_validate(client, handle)) {
1099 pr_err("%s: invalid handle passed to map_dma.\n",
1100 __func__);
1101 mutex_unlock(&client->lock);
1102 return ERR_PTR(-EINVAL);
1103 }
1104 buffer = handle->buffer;
1105 table = buffer->sg_table;
1106 mutex_unlock(&client->lock);
1107 return table;
1108}
1109EXPORT_SYMBOL(ion_sg_table);
1110
1111static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1112 struct device *dev,
1113 enum dma_data_direction direction);
1114
1115static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
1116 enum dma_data_direction direction)
1117{
1118 struct dma_buf *dmabuf = attachment->dmabuf;
1119 struct ion_buffer *buffer = dmabuf->priv;
1120
1121 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
1122 return buffer->sg_table;
1123}
1124
1125static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
1126 struct sg_table *table,
1127 enum dma_data_direction direction)
1128{
1129}
1130
1131void ion_pages_sync_for_device(struct device *dev, struct page *page,
1132 size_t size, enum dma_data_direction dir)
1133{
1134 struct scatterlist sg;
1135
1136 sg_init_table(&sg, 1);
1137 sg_set_page(&sg, page, size, 0);
1138 /*
1139 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
1140 * for the the targeted device, but this works on the currently targeted
1141 * hardware.
1142 */
1143 sg_dma_address(&sg) = page_to_phys(page);
1144 dma_sync_sg_for_device(dev, &sg, 1, dir);
1145}
1146
1147struct ion_vma_list {
1148 struct list_head list;
1149 struct vm_area_struct *vma;
1150};
1151
1152static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
1153 struct device *dev,
1154 enum dma_data_direction dir)
1155{
1156 struct ion_vma_list *vma_list;
1157 int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
1158 int i;
1159
1160 pr_debug("%s: syncing for device %s\n", __func__,
1161 dev ? dev_name(dev) : "null");
1162
1163 if (!ion_buffer_fault_user_mappings(buffer))
1164 return;
1165
1166 mutex_lock(&buffer->lock);
1167 for (i = 0; i < pages; i++) {
1168 struct page *page = buffer->pages[i];
1169
1170 if (ion_buffer_page_is_dirty(page))
1171 ion_pages_sync_for_device(dev, ion_buffer_page(page),
1172 PAGE_SIZE, dir);
1173
1174 ion_buffer_page_clean(buffer->pages + i);
1175 }
1176 list_for_each_entry(vma_list, &buffer->vmas, list) {
1177 struct vm_area_struct *vma = vma_list->vma;
1178
1179 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
1180 NULL);
1181 }
1182 mutex_unlock(&buffer->lock);
1183}
1184
1185static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1186{
1187 struct ion_buffer *buffer = vma->vm_private_data;
1188 unsigned long pfn;
1189 int ret;
1190
1191 mutex_lock(&buffer->lock);
1192 ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
1193 BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
1194
1195 pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
1196 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
1197 mutex_unlock(&buffer->lock);
1198 if (ret) {
1199 IONMSG("%s vm insert pfn failed, vma = 0x%p, addr = 0x%p, pfn = %lu.\n", __func__, vma, vmf->virtual_address, pfn);
1200 return VM_FAULT_ERROR;
1201 }
1202
1203 return VM_FAULT_NOPAGE;
1204}
1205
1206static void ion_vm_open(struct vm_area_struct *vma)
1207{
1208 struct ion_buffer *buffer = vma->vm_private_data;
1209 struct ion_vma_list *vma_list;
1210
1211 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
1212 if (!vma_list) {
1213 IONMSG("%s kmalloc failed, vma_list is null.\n", __func__);
1214 return;
1215 }
1216 vma_list->vma = vma;
1217 mutex_lock(&buffer->lock);
1218 list_add(&vma_list->list, &buffer->vmas);
1219 mutex_unlock(&buffer->lock);
1220 pr_debug("%s: adding %p\n", __func__, vma);
1221}
1222
1223static void ion_vm_close(struct vm_area_struct *vma)
1224{
1225 struct ion_buffer *buffer = vma->vm_private_data;
1226 struct ion_vma_list *vma_list, *tmp;
1227
1228 pr_debug("%s\n", __func__);
1229 mutex_lock(&buffer->lock);
1230 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
1231 if (vma_list->vma != vma)
1232 continue;
1233 list_del(&vma_list->list);
1234 kfree(vma_list);
1235 pr_debug("%s: deleting %p\n", __func__, vma);
1236 break;
1237 }
1238 mutex_unlock(&buffer->lock);
1239}
1240
1241static struct vm_operations_struct ion_vma_ops = {
1242 .open = ion_vm_open,
1243 .close = ion_vm_close,
1244 .fault = ion_vm_fault,
1245};
1246
1247static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1248{
1249 struct ion_buffer *buffer = dmabuf->priv;
1250 int ret = 0;
1251
1252 MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagStart, buffer->size, vma->vm_start);
1253
1254 if (!buffer->heap->ops->map_user) {
1255 pr_err("%s: this heap does not define a method for mapping "
1256 "to userspace\n", __func__);
1257 return -EINVAL;
1258 }
1259
1260 if (ion_buffer_fault_user_mappings(buffer)) {
1261 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1262 VM_DONTDUMP;
1263 vma->vm_private_data = buffer;
1264 vma->vm_ops = &ion_vma_ops;
1265 ion_vm_open(vma);
1266 return 0;
1267 }
1268
1269 if (!(buffer->flags & ION_FLAG_CACHED))
1270 //vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1271 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1272
1273 mutex_lock(&buffer->lock);
1274 /* now map it to userspace */
1275 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1276 mutex_unlock(&buffer->lock);
1277
1278 if (ret)
1279 pr_err("%s: failure mapping buffer to userspace\n",
1280 __func__);
1281 MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagEnd, buffer->size, vma->vm_start);
1282
1283 return ret;
1284}
1285
1286static void ion_dma_buf_release(struct dma_buf *dmabuf)
1287{
1288 struct ion_buffer *buffer = dmabuf->priv;
1289 ion_buffer_put(buffer);
1290}
1291
1292static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1293{
1294 struct ion_buffer *buffer = dmabuf->priv;
1295 return buffer->vaddr + offset * PAGE_SIZE;
1296}
1297
1298static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1299 void *ptr)
1300{
1301 return;
1302}
1303
1304static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1305 size_t len,
1306 enum dma_data_direction direction)
1307{
1308 struct ion_buffer *buffer = dmabuf->priv;
1309 void *vaddr;
1310
1311 if (!buffer->heap->ops->map_kernel) {
1312 pr_err("%s: map kernel is not implemented by this heap.\n",
1313 __func__);
1314 return -ENODEV;
1315 }
1316
1317 mutex_lock(&buffer->lock);
1318 vaddr = ion_buffer_kmap_get(buffer);
1319 mutex_unlock(&buffer->lock);
1320 if (IS_ERR(vaddr))
1321 return PTR_ERR(vaddr);
1322 return 0;
1323}
1324
1325static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1326 size_t len,
1327 enum dma_data_direction direction)
1328{
1329 struct ion_buffer *buffer = dmabuf->priv;
1330
1331 mutex_lock(&buffer->lock);
1332 ion_buffer_kmap_put(buffer);
1333 mutex_unlock(&buffer->lock);
1334}
1335
1336static struct dma_buf_ops dma_buf_ops = {
1337 .map_dma_buf = ion_map_dma_buf,
1338 .unmap_dma_buf = ion_unmap_dma_buf,
1339 .mmap = ion_mmap,
1340 .release = ion_dma_buf_release,
1341 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
1342 .end_cpu_access = ion_dma_buf_end_cpu_access,
1343 .kmap_atomic = ion_dma_buf_kmap,
1344 .kunmap_atomic = ion_dma_buf_kunmap,
1345 .kmap = ion_dma_buf_kmap,
1346 .kunmap = ion_dma_buf_kunmap,
1347};
1348
1349struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1350 struct ion_handle *handle)
1351{
1352 struct ion_buffer *buffer;
1353 struct dma_buf *dmabuf;
1354 bool valid_handle;
1355
1356 mutex_lock(&client->lock);
1357 valid_handle = ion_handle_validate(client, handle);
1358 if (!valid_handle) {
1359 WARN(1, "%s: invalid handle passed to share.\n", __func__);
1360 mutex_unlock(&client->lock);
1361 return ERR_PTR(-EINVAL);
1362 }
1363 buffer = handle->buffer;
1364 ion_buffer_get(buffer);
1365 mutex_unlock(&client->lock);
1366
1367 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
1368 if (IS_ERR(dmabuf)) {
1369 IONMSG("%s dma buf export failed dmabuf is error 0x%p.\n", __func__, dmabuf);
1370 ion_buffer_put(buffer);
1371 return dmabuf;
1372 }
1373
1374 return dmabuf;
1375}
1376EXPORT_SYMBOL(ion_share_dma_buf);
1377
1378int __ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle, int from_kern)
1379{
1380 struct dma_buf *dmabuf;
1381 int fd;
1382
1383 dmabuf = ion_share_dma_buf(client, handle);
1384 if (IS_ERR(dmabuf)) {
1385 IONMSG("%s dmabuf is err 0x%p.\n", __func__, dmabuf);
1386 return PTR_ERR(dmabuf);
1387 }
1388
1389 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1390 if (fd < 0) {
1391 IONMSG("%s dma_buf_fd failed %d.\n", __func__, fd);
1392 dma_buf_put(dmabuf);
1393 }
1394#if ION_DEBUG
1395 if(from_kern)
1396 ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_SHARE, 0, 0, 0, fd);
1397#endif
1398
1399 return fd;
1400}
1401int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1402{
1403 return __ion_share_dma_buf_fd(client, handle, 1);
1404}
1405EXPORT_SYMBOL(ion_share_dma_buf_fd);
1406
1407struct ion_handle *__ion_import_dma_buf(struct ion_client *client, int fd, int from_kern)
1408{
1409 struct dma_buf *dmabuf;
1410 struct ion_buffer *buffer;
1411 struct ion_handle *handle;
1412 int ret;
1413
1414 MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagStart, 1, 1);
1415
1416 dmabuf = dma_buf_get(fd);
1417 if (IS_ERR(dmabuf))
1418 {
1419 IONMSG("%s dma_buf_get fail fd=%d ret=0x%p\n", __func__, fd, dmabuf);
1420 return ERR_PTR(PTR_ERR(dmabuf));
1421 }
1422 /* if this memory came from ion */
1423
1424 if (dmabuf->ops != &dma_buf_ops) {
1425 pr_err("%s: can not import dmabuf from another exporter\n",
1426 __func__);
1427 dma_buf_put(dmabuf);
1428 return ERR_PTR(-EINVAL);
1429 }
1430 buffer = dmabuf->priv;
1431
1432 mutex_lock(&client->lock);
1433 /* if a handle exists for this buffer just take a reference to it */
1434 handle = ion_handle_lookup(client, buffer);
1435 if (!IS_ERR(handle)) {
4b9e9796 1436 handle = ion_handle_get_check_overflow(handle);
6fa3eb70
S
1437 mutex_unlock(&client->lock);
1438 goto end;
1439 }
1440
1441 handle = ion_handle_create(client, buffer);
1442 if (IS_ERR(handle))
1443 {
1444 mutex_unlock(&client->lock);
1445 IONMSG("%s handle is error 0x%p.\n", __func__, handle);
1446 goto end;
1447 }
1448
1449 ret = ion_handle_add(client, handle);
1450 mutex_unlock(&client->lock);
1451 if (ret) {
1452 ion_handle_put(handle);
1453 handle = ERR_PTR(ret);
1454 IONMSG("ion_import: ion_handle_add fail %d\n", ret);
1455 }
1456
1457end:
1458 dma_buf_put(dmabuf);
1459
1460#if ION_DEBUG
1461 if (!IS_ERR_OR_NULL(handle) && from_kern)
1462 ion_debug_kern_rec(client, handle->buffer, handle, ION_FUNCTION_IMPORT, 0, 0, 0, 0);
1463#endif
1464
1465 MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagEnd, 1, 1);
1466 return handle;
1467}
1468
1469struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1470{
1471 return __ion_import_dma_buf(client, fd, 1);
1472}
1473
1474EXPORT_SYMBOL(ion_import_dma_buf);
1475
1476static int ion_sync_for_device(struct ion_client *client, int fd)
1477{
1478 struct dma_buf *dmabuf;
1479 struct ion_buffer *buffer;
1480
1481 dmabuf = dma_buf_get(fd);
1482 if (IS_ERR(dmabuf)) {
1483 IONMSG("%s dma_buf_get failed dmabuf is err %d, 0x%p.\n", __func__, fd, dmabuf);
1484 return PTR_ERR(dmabuf);
1485 }
1486
1487 /* if this memory came from ion */
1488 if (dmabuf->ops != &dma_buf_ops) {
1489 pr_err("%s: can not sync dmabuf from another exporter\n",
1490 __func__);
1491 dma_buf_put(dmabuf);
1492 return -EINVAL;
1493 }
1494 buffer = dmabuf->priv;
1495
1496 dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1497 buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1498 dma_buf_put(dmabuf);
1499 return 0;
1500}
1501
1502/* fix up the cases where the ioctl direction bits are incorrect */
1503static unsigned int ion_ioctl_dir(unsigned int cmd)
1504{
1505 switch (cmd) {
1506 case ION_IOC_SYNC:
1507 case ION_IOC_FREE:
1508 case ION_IOC_CUSTOM:
1509 return _IOC_WRITE;
1510 default:
1511 return _IOC_DIR(cmd);
1512 }
1513}
1514
1515static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1516{
1517 struct ion_client *client = filp->private_data;
1518 struct ion_device *dev = client->dev;
1519 struct ion_handle *cleanup_handle = NULL;
1520 int ret = 0;
1521 unsigned int dir;
1522
1523 union {
1524 struct ion_fd_data fd;
1525 struct ion_allocation_data allocation;
1526 struct ion_handle_data handle;
1527 struct ion_custom_data custom;
1528 } data;
1529
1530 dir = ion_ioctl_dir(cmd);
1531
1532 if (_IOC_SIZE(cmd) > sizeof(data)) {
1533 IONMSG("ion_ioctl cmd = %d, _IOC_SIZE(cmd) = %d, sizeof(data) = %zd.\n", cmd, _IOC_SIZE(cmd), sizeof(data));
1534 return -EINVAL;
1535 }
1536
1537 if (dir & _IOC_WRITE) {
1538 if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) {
1539 IONMSG("ion_ioctl copy_from_user fail!. cmd = %d, n = %d.\n", cmd, _IOC_SIZE(cmd));
1540 return -EFAULT;
1541 }
1542 }
1543
1544 switch (cmd) {
1545 case ION_IOC_ALLOC:
1546 {
1547 struct ion_handle *handle;
1548
1549 handle = __ion_alloc(client, data.allocation.len,
1550 data.allocation.align,
1551 data.allocation.heap_id_mask,
4b9e9796 1552 data.allocation.flags, true);
6fa3eb70
S
1553 if (IS_ERR(handle)) {
1554 ret = PTR_ERR(handle);
1555 IONMSG("ION_IOC_ALLOC handle is invalid. ret = %d.\n", ret);
1556 return ret;
1557 }
4b9e9796 1558 pass_to_user(handle);
6fa3eb70
S
1559 data.allocation.handle = handle->id;
1560
1561 cleanup_handle = handle;
1562 break;
1563 }
1564 case ION_IOC_FREE:
1565 {
1566 struct ion_handle *handle;
1567
4b9e9796
S
1568 mutex_lock(&client->lock);
1569 handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
6fa3eb70 1570 if (IS_ERR(handle)) {
4b9e9796
S
1571 mutex_unlock(&client->lock);
1572 IONMSG("ION_IOC_FREE handle is invalid. handle = %d, ret = %d.\n", data.handle.handle, ret);
1573 return PTR_ERR(handle);
1574 }
1575 user_ion_free_nolock(client, handle);
1576 ion_handle_put_nolock(handle);
1577 mutex_unlock(&client->lock);
6fa3eb70
S
1578 break;
1579 }
1580 case ION_IOC_SHARE:
1581 case ION_IOC_MAP:
1582 {
1583 struct ion_handle *handle;
1584
1585 handle = ion_handle_get_by_id(client, data.handle.handle);
1586 if (IS_ERR(handle)) {
1587 ret = PTR_ERR(handle);
1588 IONMSG("ION_IOC_SHARE handle is invalid. handle = %d, ret = %d.\n", data.handle.handle, ret);
1589 return ret;
1590 }
1591 data.fd.fd = __ion_share_dma_buf_fd(client, handle, 0);
1592 ion_handle_put(handle);
1593 if (data.fd.fd < 0) {
1594 IONMSG("ION_IOC_SHARE fd = %d.\n", data.fd.fd);
1595 ret = data.fd.fd;
1596 }
1597 break;
1598 }
1599 case ION_IOC_IMPORT:
1600 {
1601 struct ion_handle *handle;
1602 handle = __ion_import_dma_buf(client, data.fd.fd, 0);
1603 if (IS_ERR(handle))
1604 {
1605 ret = PTR_ERR(handle);
1606 IONMSG("ion_import fail: fd=%d, ret=%d\n", data.fd.fd, ret);
4b9e9796
S
1607 } else {
1608 handle = pass_to_user(handle);
1609 if (IS_ERR(handle))
1610 ret = PTR_ERR(handle);
1611 else
1612 data.handle.handle = handle->id;
1613 } break;
6fa3eb70
S
1614 }
1615 case ION_IOC_SYNC:
1616 {
1617 ret = ion_sync_for_device(client, data.fd.fd);
1618 break;
1619 }
1620 case ION_IOC_CUSTOM:
1621 {
1622 if (!dev->custom_ioctl) {
1623 IONMSG("ION_IOC_CUSTOM dev has no custom ioctl!.\n");
1624 return -ENOTTY;
1625 }
1626 ret = dev->custom_ioctl(client, data.custom.cmd,
1627 data.custom.arg);
1628 break;
1629 }
1630 default: {
1631 IONMSG("ion_ioctl : No such command!! 0x%x\n", cmd);
1632 return -ENOTTY;
1633 }
1634 }
1635
1636 if (dir & _IOC_READ) {
1637 if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
4b9e9796
S
1638 if (cleanup_handle) {
1639 mutex_lock(&client->lock);
1640 user_ion_free_nolock(client, cleanup_handle);
1641 ion_handle_put_nolock(cleanup_handle);
1642 mutex_unlock(&client->lock);
1643 }
6fa3eb70
S
1644 IONMSG("ion_ioctl copy_to_user fail! cmd = %d, n = %d.\n", cmd, _IOC_SIZE(cmd));
1645 return -EFAULT;
1646 }
1647 }
4b9e9796
S
1648 if (cleanup_handle)
1649 ion_handle_put(cleanup_handle);
6fa3eb70
S
1650 return ret;
1651}
1652
1653static int ion_release(struct inode *inode, struct file *file)
1654{
1655 struct ion_client *client = file->private_data;
1656
1657 pr_debug("%s: %d\n", __func__, __LINE__);
1658 __ion_client_destroy(client, 0);
1659 return 0;
1660}
1661
1662static int ion_open(struct inode *inode, struct file *file)
1663{
1664 struct miscdevice *miscdev = file->private_data;
1665 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1666 struct ion_client *client;
1667 char debug_name[64];
1668
1669 pr_debug("%s: %d\n", __func__, __LINE__);
1670 snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1671 client = __ion_client_create(dev, debug_name);
1672 if (IS_ERR(client)) {
1673 IONMSG("%s ion client create failed 0x%p.\n", __func__, client);
1674 return PTR_ERR(client);
1675 }
1676 file->private_data = client;
1677
1678 return 0;
1679}
1680
1681static const struct file_operations ion_fops = {
1682 .owner = THIS_MODULE,
1683 .open = ion_open,
1684 .release = ion_release,
1685 .unlocked_ioctl = ion_ioctl,
1686 .compat_ioctl = compat_ion_ioctl,
1687};
1688
1689static size_t ion_debug_heap_total(struct ion_client *client,
1690 unsigned int id)
1691{
1692 size_t size = 0;
1693 struct rb_node *n;
1694
1695 mutex_lock(&client->lock);
1696 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1697 struct ion_handle *handle = rb_entry(n,
1698 struct ion_handle,
1699 node);
1700 if (handle->buffer->heap->id == id)
1701 size += handle->buffer->size;
1702 }
1703 mutex_unlock(&client->lock);
1704 return size;
1705}
1706
1707static int ion_debug_heap_show(struct seq_file *s, void *unused)
1708{
1709 struct ion_heap *heap = s->private;
1710 struct ion_device *dev = heap->dev;
1711 struct rb_node *n;
1712 size_t total_size = 0;
1713 size_t total_orphaned_size = 0;
1714
1715 seq_printf(s, "%16.s(%16.s) %16.s %16.s %s\n", "client", "dbg_name", "pid", "size", "address");
1716 seq_printf(s, "----------------------------------------------------\n");
1717
1718 down_read(&dev->lock);
1719 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1720 struct ion_client *client = rb_entry(n, struct ion_client,
1721 node);
1722 size_t size = ion_debug_heap_total(client, heap->id);
1723 if (!size)
1724 continue;
1725 if (client->task) {
1726 char task_comm[TASK_COMM_LEN];
1727
1728 get_task_comm(task_comm, client->task);
1729 seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", task_comm,
1730 client->dbg_name, client->pid, size, client);
1731 } else {
1732 seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", client->name,
1733 "from_kernel", client->pid, size, client);
1734 }
1735 }
1736 up_read(&dev->lock);
1737 seq_printf(s, "----------------------------------------------------\n");
1738 seq_printf(s, "orphaned allocations (info is from last known client):"
1739 "\n");
1740 mutex_lock(&dev->buffer_lock);
1741 for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1742 struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1743 node);
1744 if (buffer->heap->id != heap->id)
1745 continue;
1746 total_size += buffer->size;
1747 if (!buffer->handle_count) {
1748 seq_printf(s, "%16.s %16u %16zu %d %d\n",
1749 buffer->task_comm, buffer->pid,
1750 buffer->size, buffer->kmap_cnt,
1751 atomic_read(&buffer->ref.refcount));
1752 total_orphaned_size += buffer->size;
1753 }
1754 }
1755 mutex_unlock(&dev->buffer_lock);
1756 seq_printf(s, "----------------------------------------------------\n");
1757 seq_printf(s, "%16.s %16zu\n", "total orphaned",
1758 total_orphaned_size);
1759 seq_printf(s, "%16.s %16zu\n", "total ", total_size);
1760 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1761 seq_printf(s, "%16.s %16zu\n", "deferred free",
1762 heap->free_list_size);
1763 seq_printf(s, "----------------------------------------------------\n");
1764
1765 if (heap->debug_show)
1766 heap->debug_show(heap, s, unused);
1767
1768 return 0;
1769}
1770
1771static int ion_debug_heap_open(struct inode *inode, struct file *file)
1772{
1773 return single_open(file, ion_debug_heap_show, inode->i_private);
1774}
1775
1776static const struct file_operations debug_heap_fops = {
1777 .open = ion_debug_heap_open,
1778 .read = seq_read,
1779 .llseek = seq_lseek,
1780 .release = single_release,
1781};
1782
1783#ifdef DEBUG_HEAP_SHRINKER
1784static int debug_shrink_set(void *data, u64 val)
1785{
1786 struct ion_heap *heap = data;
1787 struct shrink_control sc;
1788 int objs;
1789
1790 sc.gfp_mask = -1;
1791 sc.nr_to_scan = 0;
1792
1793 if (!val) {
1794 IONMSG("%s val cannot be zero.\n", __func__);
1795 return 0;
1796 }
1797
1798 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1799 sc.nr_to_scan = objs;
1800
1801 heap->shrinker.shrink(&heap->shrinker, &sc);
1802 return 0;
1803}
1804
1805static int debug_shrink_get(void *data, u64 *val)
1806{
1807 struct ion_heap *heap = data;
1808 struct shrink_control sc;
1809 int objs;
1810
1811 sc.gfp_mask = -1;
1812 sc.nr_to_scan = 0;
1813
1814 objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1815 *val = objs;
1816 return 0;
1817}
1818
1819DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1820 debug_shrink_set, "%llu\n");
1821#endif
1822
1823void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1824{
1825 struct dentry *debug_file;
1826
1827 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1828 !heap->ops->unmap_dma)
1829 pr_err("%s: can not add heap with invalid ops struct.\n",
1830 __func__);
1831
1832 if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1833 ion_heap_init_deferred_free(heap);
1834
1835 if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1836 ion_heap_init_shrinker(heap);
1837
1838 heap->dev = dev;
1839 down_write(&dev->lock);
1840 /* use negative heap->id to reverse the priority -- when traversing
1841 the list later attempt higher id numbers first */
1842 plist_node_init(&heap->node, -heap->id);
1843 plist_add(&heap->node, &dev->heaps);
1844 debug_file = debugfs_create_file(heap->name, 0664,
1845 dev->heaps_debug_root, heap,
1846 &debug_heap_fops);
1847
1848 if (!debug_file) {
1849 char buf[256], *path;
1850 path = dentry_path(dev->heaps_debug_root, buf, 256);
1851 pr_err("Failed to create heap debugfs at %s/%s\n",
1852 path, heap->name);
1853 }
1854
1855#ifdef DEBUG_HEAP_SHRINKER
1856 if (heap->shrinker.shrink) {
1857 char debug_name[64];
1858
1859 snprintf(debug_name, 64, "%s_shrink", heap->name);
1860 debug_file = debugfs_create_file(
1861 debug_name, 0644, dev->heaps_debug_root, heap,
1862 &debug_shrink_fops);
1863 if (!debug_file) {
1864 char buf[256], *path;
1865 path = dentry_path(dev->heaps_debug_root, buf, 256);
1866 pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1867 path, debug_name);
1868 }
1869 }
1870#endif
1871 up_write(&dev->lock);
1872}
1873
1874struct ion_device *ion_device_create(long (*custom_ioctl)
1875 (struct ion_client *client,
1876 unsigned int cmd,
1877 unsigned long arg))
1878{
1879 struct ion_device *idev;
1880 int ret;
1881
1882 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1883 if (!idev) {
1884 IONMSG("%s kzalloc failed idev is null.\n", __func__);
1885 return ERR_PTR(-ENOMEM);
1886 }
1887
1888 idev->dev.minor = MISC_DYNAMIC_MINOR;
1889 idev->dev.name = "ion";
1890 idev->dev.fops = &ion_fops;
1891 idev->dev.parent = NULL;
1892 ret = misc_register(&idev->dev);
1893 if (ret) {
1894 pr_err("ion: failed to register misc device.\n");
1895 return ERR_PTR(ret);
1896 }
1897
1898 idev->debug_root = debugfs_create_dir("ion", NULL);
1899 if (!idev->debug_root) {
1900 pr_err("ion: failed to create debugfs root directory.\n");
1901 goto debugfs_done;
1902 }
1903 idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1904 if (!idev->heaps_debug_root) {
1905 pr_err("ion: failed to create debugfs heaps directory.\n");
1906 goto debugfs_done;
1907 }
1908 idev->clients_debug_root = debugfs_create_dir("clients",
1909 idev->debug_root);
1910 if (!idev->clients_debug_root)
1911 pr_err("ion: failed to create debugfs clients directory.\n");
1912
1913debugfs_done:
1914
1915 idev->custom_ioctl = custom_ioctl;
1916 idev->buffers = RB_ROOT;
1917 mutex_init(&idev->buffer_lock);
1918 init_rwsem(&idev->lock);
1919 plist_head_init(&idev->heaps);
1920 idev->clients = RB_ROOT;
1921#if ION_DEBUG
1922 /* Create ION Debug DB Root */
1923 ion_debug_create_db(idev->debug_root);
1924#endif
1925 return idev;
1926}
1927
1928void ion_device_destroy(struct ion_device *dev)
1929{
1930 misc_deregister(&dev->dev);
1931 debugfs_remove_recursive(dev->debug_root);
1932 /* XXX need to free the heaps and clients ? */
1933 kfree(dev);
1934}
1935
1936void __init ion_reserve(struct ion_platform_data *data)
1937{
1938 int i;
1939
1940 for (i = 0; i < data->nr; i++) {
1941 if (data->heaps[i].size == 0)
1942 continue;
1943
1944 if (data->heaps[i].base == 0) {
1945 phys_addr_t paddr;
1946 paddr = memblock_alloc_base(data->heaps[i].size,
1947 data->heaps[i].align,
1948 MEMBLOCK_ALLOC_ANYWHERE);
1949 if (!paddr) {
1950 pr_err("%s: error allocating memblock for "
1951 "heap %d\n",
1952 __func__, i);
1953 continue;
1954 }
1955 data->heaps[i].base = paddr;
1956 } else {
1957 int ret = memblock_reserve(data->heaps[i].base,
1958 data->heaps[i].size);
1959 if (ret)
1960 pr_err("memblock reserve of %zx@%lx failed\n",
1961 data->heaps[i].size,
1962 data->heaps[i].base);
1963 }
1964 pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1965 data->heaps[i].name,
1966 data->heaps[i].base,
1967 data->heaps[i].size);
1968 }
1969}
1970
1971//============================================================================
1972// helper functions
1973//============================================================================
1974
1975struct ion_handle* ion_drv_get_handle(struct ion_client* client, int user_handle, struct ion_handle* kernel_handle, int from_kernel)
1976{
1977 struct ion_handle* handle;
1978
1979 if (from_kernel) {
1980 handle = kernel_handle;
1981
1982 if (IS_ERR_OR_NULL(handle)) {
1983 IONMSG("%s handle invalid, handle = 0x%p.\n", __FUNCTION__, handle);
1984 return ERR_PTR(-EINVAL);
1985 }
1986
1987 mutex_lock(&client->lock);
1988 if (!ion_handle_validate(client, handle)) {
1989 IONMSG("%s handle invalid, handle=0x%p\n", __FUNCTION__, handle);
1990 mutex_unlock(&client->lock);
1991 return ERR_PTR(-EINVAL);
1992 }
1993 ion_handle_get(handle);
1994 mutex_unlock(&client->lock);
1995 } else {
1996 handle = ion_handle_get_by_id(client, user_handle);
1997 if (!handle) {
1998 IONMSG("%s handle invalid, handle_id=%d\n", __FUNCTION__, user_handle);
1999 return ERR_PTR(-EINVAL);
2000 }
2001 }
2002 return handle;
2003}
2004
2005int ion_drv_put_kernel_handle(void *kernel_handle)
2006{
2007 return ion_handle_put(kernel_handle);
2008}
2009
2010//=============================================================================================
2011
2012#if ION_DEBUG
2013static int ion_debug_kern_rec(struct ion_client *client,
2014 struct ion_buffer *buffer,
2015 struct ion_handle * handle,
2016 unsigned int action,
2017 unsigned int address_type,
2018 unsigned int address,
2019 unsigned length,
2020 int fd)
2021{
2022 ion_sys_record_t record_param;
2023 record_param.client = client;
2024 record_param.pid = client->pid;
2025 if(current->pid != current->tgid)
2026 {
2027 record_param.group_id = current->tgid;
2028 printk(ION_DEBUG_INFO "[KERNEL tgid is %d]\n",(unsigned int)current->tgid);
2029 }
2030 else
2031 {
2032 record_param.group_id = current->pid;
2033 }
2034
2035 record_param.buffer = buffer;
2036 record_param.handle = handle;
2037 record_param.action = action;
2038 record_param.address_type = address_type;
2039 record_param.address = (unsigned int)address;
2040 record_param.length = length;
2041 record_param.fd = fd;
2042 record_param.backtrace_num = get_kernel_backtrace((unsigned long *)record_param.backtrace);
2043 get_kernel_symbol((unsigned long *)record_param.backtrace,record_param.backtrace_num,&(record_param.kernel_symbol[0]));
2044 record_ion_info((int)1,&record_param);
2045 return 0;
2046}
2047#else
2048static int ion_debug_kern_rec(struct ion_client *client,
2049 struct ion_buffer *buffer,
2050 struct ion_handle * handle,
2051 unsigned int action,
2052 unsigned int address_type,
2053 unsigned int address,
2054 unsigned length,
2055 int fd)
2056{
2057 return 0;
2058}
2059#endif
2060
2061#if ION_DEBUG
2062/*
2063 * ION Debug assistant function
2064 */
2065static void *ion_get_list_from_buffer(struct ion_buffer *buf, unsigned int type)
2066{
2067 struct ion_buffer_record *buf_rec = NULL;
2068
2069 /* Get the inuse buffer record */
2070 buf_rec = ion_get_inuse_buffer_record();
2071 if (!buf_rec) {
2072 printk(KERN_WARNING "No inuse buffers!\n");
2073 return NULL;
2074 }
2075
2076 /* Go through it */
2077 do {
2078 /* We only need to find out the record with corresponding buffer */
2079 if (buf_rec->buffer_address == buf) {
2080 return ion_get_list(LIST_BUFFER,buf_rec, type);
2081 }
2082 /* Next record */
2083 buf_rec = buf_rec->next;
2084 } while (!!buf_rec);
2085
2086 return NULL;
2087}
2088
2089/*
2090 * ION Debug assistant function
2091 */
2092static void *ion_get_list_from_process(pid_t pid, unsigned int type)
2093{
2094 struct ion_process_record *process_rec = NULL;
2095
2096 /* Get the inuse buffer record */
2097 process_rec = (struct ion_process_record *)ion_get_inuse_process_usage_record2();
2098 if (!process_rec) {
2099 printk(KERN_WARNING "No inuse process!\n");
2100 return NULL;
2101 }
2102
2103 /* Go through it */
2104 do {
2105 /* We only need to find out the record with corresponding buffer */
2106 if (process_rec->pid == pid) {
2107 return ion_get_list(LIST_PROCESS,process_rec, type);
2108 }
2109 /* Next record */
2110 process_rec = process_rec->next;
2111 } while (!!process_rec);
2112
2113 return NULL;
2114}
2115
2116/*
2117 * ION Debug assistant function
2118 */
2119static void *ion_get_client_record(struct ion_client *client)
2120{
2121 struct ion_client_usage_record *client_rec = NULL;
2122
2123 /* Get the inuse buffer record */
2124 client_rec = ion_get_inuse_client_record();
2125 if (!client_rec) {
2126 printk(KERN_WARNING "No inuse client!\n");
2127 return NULL;
2128 }
2129
2130 /* Go through it */
2131 do {
2132 /* We only need to find out the record with corresponding buffer */
2133 if ((client_rec->tracking_info.from_kernel)&&(client_rec->tracking_info.recordID.client_address == (unsigned int)client) && (client_rec->tracking_info.recordID.group_pid == client->pid))
2134 {
2135 return (void *)client_rec;
2136 }
2137 else if ((!client_rec->tracking_info.from_kernel)&&(client_rec->tracking_info.recordID.client_address == (unsigned int)client) && (client_rec->tracking_info.recordID.pid == client->pid))
2138 {
2139 return (void *)client_rec;
2140 }
2141 /* Next record */
2142 client_rec = (struct ion_client_usage_record *)client_rec->next;
2143 } while (!!client_rec);
2144
2145 return NULL;
2146}
2147
2148/*
2149 * ION Debug DB assistant function of showing backtrace
2150 */
2151static int ion_debugdb_show_backtrace(struct seq_file *s, struct ion_record_basic_info *ti, unsigned int sbt)
2152{
2153 unsigned int i = 0;
2154 unsigned int backtrace_count = 0;
2155 ObjectEntry *tmp = NULL;
2156 unsigned int stringCount = KSYM_SYMBOL_LEN + 30;
2157
2158 if (ti == NULL) {
2159 return 0;
2160 }
2161
2162 if (sbt == ALLOCATE_BACKTRACE_INFO) {
2163 tmp = (ObjectEntry *)ti->allocate_backtrace;
2164 if (tmp == NULL)
2165 return 0;
2166 backtrace_count = tmp->numEntries;
2167 } else if (sbt == RELEASE_BACKTRACE_INFO) {
2168 tmp = (ObjectEntry *)ti->release_backtrace;
2169 if(tmp == NULL)
2170 return 0;
2171 backtrace_count = tmp->numEntries;
2172 }
2173
2174 //printk("%s [%d] backtrace_count = (%d)\n",__FUNCTION__,__LINE__,backtrace_count);
2175 if (backtrace_count != 0) {
2176 seq_printf(s, "%19s\n", "[BACKTRACE]");
2177 }
2178
2179 for (i = 0;i < backtrace_count;i++) {
2180 char tmpString[stringCount];
2181 ion_get_backtrace_info(ti, tmpString, stringCount, i, sbt);
2182 seq_printf(s, "%10s %s", "::", tmpString);
2183 }
2184 return 1;
2185}
2186
2187/*
2188 * ION Debug DB file operations
2189 */
2190
2191extern struct ion_device *g_ion_device;
2192static int ion_debug_dbcl_show(struct seq_file *s, void *unused)
2193{
2194 unsigned long key =(unsigned long) s->private;
2195 pid_t raw_key;
2196 enum dbcl_types type;
2197
2198 struct ion_device *dev = g_ion_device;
2199 struct rb_node *cn, *hn;
2200 struct ion_client *client;
2201 struct ion_handle *handle;
2202 struct ion_buffer *buffer;
2203 int client_cnt = 0, buffer_cnt = 0;
2204
2205
2206 /*
2207 * Here is an introduction about how we convert key to raw_key.
2208 *
2209 * Firstly, we have following observations,
2210 * 1. Process IDs have a maximum bound of pid_max, which is rarely larger than PID_MAX_DEFAULT(0x8000).
2211
2212 * (No-use)2. Kernel modules often have higher value than 0xbf000000 and are page-aligned.
2213 * (No-use)3. Other kernel parts often have higher value than 0xc0000000.
2214 *
2215 * Based on above observations, we can using following rules to change raw_key to key & vice versa.
2216 * 1. For processes, we use ((dbcl_types << 16) | raw_key) as the key, in which raw_key equals Process ID.
2217
2218 * (No-use)2. For kernel modules, we use (raw_key | dbcl_types) as the key, in which raw_key is the virtual address the module is resident in.
2219 * (No-use)3. For other kernel parts, we use dbcl_types as the key.
2220 *
2221 */
2222
2223#if 0
2224 if (unlikely(key >= 0xbf000000)) {
2225 /* Rarely-used case */
2226 } else if (likely(key >= 0x8000)) {
2227 type = key >> 16;
2228 raw_key = key & 0xffff;
2229 } else {
2230 /* Rarely-used case */
2231 }
2232#endif
2233
2234 /* Which type */
2235 type = key >> 16;
2236
2237 /* Which process */
2238 raw_key = key & 0xffff;
2239 seq_printf(s, "Process [%d]\n", raw_key);
2240
2241 /* Which type */
2242 switch (type) {
2243 case DBCL_CLIENT:
2244 /* Lv1 - all clients
2245 * Lv2 - all client-handles
2246 * Lv3 - all client-handle-buffers
2247 */
2248 printk(KERN_INFO "DBCL_CLIENT\n");
2249 {
2250 /* Lv1 - all clients */
2251 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2252 client = rb_entry(cn, struct ion_client, node);
2253 /* Matched clients */
2254 if (client->pid == raw_key) {
2255 seq_printf(s, "%-8s[%2d] %12p\n", "client", client_cnt++, client);
2256 mutex_lock(&client->lock);
2257 /* Lv2 - all client-handles */
2258 for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) {
2259 handle = rb_entry(hn, struct ion_handle, node);
2260 seq_printf(s, "%10s[%2d] kmap_cnt(%d)\n", "handle", buffer_cnt, handle->kmap_cnt);
2261 /* Lv3 - all client-handle-buffers */
2262 buffer = handle->buffer;
2263 mutex_lock(&buffer->lock);
2264 seq_printf(s, "%10s[%2d] heap(%s) flags(%d) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n",
2265 "buffer", buffer_cnt++, buffer->heap->name, (unsigned int)buffer->flags,
2266 buffer->size, (unsigned int)buffer->kmap_cnt, (unsigned int)buffer->vaddr);
2267 mutex_unlock(&buffer->lock);
2268 }
2269 mutex_unlock(&client->lock);
2270 }
2271 }
2272 }
2273 break;
2274 case DBCL_BUFFER:
2275 /* Lv1 - all buffers
2276 * Lv2 - all buffer-usage
2277 * */
2278 printk(KERN_INFO "DBCL_BUFFER\n");
2279 {
2280 struct ion_buffer_usage_record *usg_rec;
2281 struct ion_buffer_record *buf_rec = NULL;
2282 int buffer_count = 0;
2283 buf_rec = ion_get_inuse_buffer_record();
2284
2285 /* Find matched clients */
2286 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2287 client = rb_entry(cn, struct ion_client, node);
2288 /* Matched clients */
2289 if (client->pid == raw_key) {
2290 mutex_lock(&client->lock);
2291 /* Lv1 - all buffers */
2292 for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) {
2293 handle = rb_entry(hn, struct ion_handle, node);
2294 buffer = handle->buffer;
2295 mutex_lock(&buffer->lock);
2296 seq_printf(s, "%s[%2d] size(%d) %12p\n", "buffer", buffer_cnt++, buffer->size, buffer);
2297 mutex_unlock(&buffer->lock);
2298 /* Lv2 - all buffer-usage */
2299
2300 usg_rec = ion_get_list_from_buffer(buffer, BUFFER_ALLOCATION_LIST);
2301 if(usg_rec != NULL)
2302 seq_printf(s, "%s\n"," <BUFFER_ALLOCATION_LIST>");
2303 while (!!usg_rec) {
2304 seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d]) \n"," client",
2305 usg_rec->tracking_info.recordID.client_address,
2306 "Process",
2307 usg_rec->tracking_info.recordID.pid,
2308 "GroupLeader",
2309 usg_rec->tracking_info.recordID.group_pid);
2310 /* Show buffer allocation backtrace */
2311 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2312 /* Next buffer usage record */
2313 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2314 }
2315#if 0
2316 usg_rec = ion_get_list_from_buffer(buffer, BUFFER_FREE_LIST);
2317 if(usg_rec != NULL)
2318 seq_printf(s, "%s\n"," <BUFFER_FREE_LIST>");
2319 while (!!usg_rec) {
2320 seq_printf(s, "%s [0x%x] %10s [%d] \n"," client",
2321 usg_rec->tracking_info.recordID.client_address,
2322 "Process",
2323 usg_rec->tracking_info.recordID.pid,
2324 "GroupLeader",
2325 usg_rec->tracking_info.recordID.group_pid);
2326 /* Show buffer free backtrace */
2327 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2328 /* Next buffer usage record */
2329 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2330 }
2331 seq_printf(s, "%10s\n","==================================================");
2332#endif
2333 }
2334 mutex_unlock(&client->lock);
2335 }
2336 }
2337 while (buf_rec != NULL) {
2338 /* Allocation */
2339 usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_ALLOCATION_LIST);
2340
2341 while ((!!usg_rec) &&(usg_rec->tracking_info.recordID.pid== raw_key)) {
2342 buffer_count++;
2343 if(buffer_count == 1)
2344 {
2345 seq_printf(s, "%8s[%2d] buffer: 0x%p buffer structure adr: 0x%p size(%d)\n", "buffer", buffer_cnt++, buf_rec->buffer, buf_rec->buffer_address, buf_rec->buffer->size);
2346 }
2347 seq_printf(s, "%s\n"," <BUFFER_ALLOCATION_LIST>");
2348 seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d])\n"," client",
2349 usg_rec->tracking_info.recordID.client_address,
2350 "Process",
2351 usg_rec->tracking_info.recordID.pid,
2352 "GroupLeader",
2353 usg_rec->tracking_info.recordID.group_pid);
2354
2355 /* Show buffer allocation backtrace */
2356 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2357 /* Next buffer usage record */
2358 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2359 }
2360 buffer_count = 0;
2361#if 0
2362 /* Free */
2363 usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_FREE_LIST);
2364 while ((!!usg_rec)&&(usg_rec->tracking_info.recordID.pid== raw_key)) {
2365 seq_printf(s, "%s\n"," <BUFFER_FREE_LIST>");
2366 seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d])\n"," client",
2367 usg_rec->tracking_info.recordID.client_address,
2368 "Process",
2369 usg_rec->tracking_info.recordID.pid,
2370 "GroupLeader",
2371 usg_rec->tracking_info.recordID.group_pid);
2372 /* Show buffer free backtrace */
2373 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2374 /* Next buffer usage record */
2375 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2376 }
2377#endif
2378 /* Next record */
2379 buf_rec = buf_rec->next;
2380 }
2381 }
2382 break;
2383 case DBCL_MMAP:
2384 /* Lv1 - all buffers
2385 * Lv2 - all buffer-mmaps
2386 */
2387 printk(KERN_INFO "DBCL_MMAP\n");
2388 {
2389 struct ion_address_usage_record *adr_rec;
2390 struct ion_client_usage_record *client_rec;
2391 /* Find matched clients */
2392 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2393 client = rb_entry(cn, struct ion_client, node);
2394 /* Matched clients */
2395 if (client->pid == raw_key) {
2396 mutex_lock(&client->lock);
2397 /* Lv1 - all buffers */
2398 for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) {
2399 handle = rb_entry(hn, struct ion_handle, node);
2400 buffer = handle->buffer;
2401 mutex_lock(&buffer->lock);
2402 seq_printf(s, "%-8s[%2d] size(%d) %12p\n",
2403 "buffer", buffer_cnt++, buffer->size, buffer);
2404 mutex_unlock(&buffer->lock);
2405 /* Lv2 - all buffer-mmaps */
2406 adr_rec = ion_get_list_from_buffer(buffer, ADDRESS_ALLOCATION_LIST);
2407 if(adr_rec != NULL)
2408 {
2409 seq_printf(s, "%10s\n","<ADDRESS_ALLOCATION_LIST_IN_KERNELSPACE>");
2410 }
2411 while (!!adr_rec) {
2412 seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x]%10s [%d]\n",
2413 "Process", adr_rec->tracking_info.recordID.pid,
2414 "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size),
2415 "Size", adr_rec->size);
2416 /* Show address allocation backtrace */
2417 ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2418 /* Next address record */
2419 adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT);
2420 }
2421
2422 adr_rec = ion_get_list_from_buffer(buffer, ADDRESS_FREE_LIST);
2423 if(adr_rec != NULL)
2424 {
2425 seq_printf(s, "%10s\n","<ADDRESS_FREE_LIST_IN_KERNELSPACE>");
2426 }
2427 while (!!adr_rec) {
2428 seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n",
2429 "Process", adr_rec->tracking_info.recordID.pid,
2430 "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size),
2431 "Size", adr_rec->size);
2432 /* Show address release backtrace */
2433 ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2434 /* Next address record */
2435 adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT);
2436 }
2437
2438 }
2439 client_rec = (struct ion_client_usage_record *)ion_get_client_record(client);
2440 if(client_rec != NULL)
2441 {
2442 adr_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, ADDRESS_ALLOCATION_LIST);
2443 if(adr_rec != NULL)
2444 seq_printf(s, "%10s\n","<ADDRESS_ALLOCATION_LIST_IN_USERSPACE>");
2445 while (!!adr_rec)
2446 {
2447 seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n",
2448 "Process", adr_rec->tracking_info.recordID.pid,
2449 "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size),
2450 "Size", adr_rec->size);
2451 /* Show address allocation backtrace */
2452 ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2453 /* Next address record */
2454 adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT);
2455 }
2456 adr_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, ADDRESS_FREE_LIST);
2457 if(adr_rec != NULL)
2458 seq_printf(s, "%10s\n","<ADDRESS_FREE_LIST_IN_USERSPACE>");
2459 while (!!adr_rec)
2460 {
2461 seq_printf(s, "%10s [%d] - %10s [0x%x]-[0x%x] %10s [%d]\n",
2462 "Process", adr_rec->tracking_info.recordID.pid,
2463 "Address", adr_rec->mapping_address,(adr_rec->mapping_address+adr_rec->size),
2464 "Size", adr_rec->size);
2465 /* Show address release backtrace */
2466 ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2467 /* Next address record */
2468 adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT);
2469 }
2470
2471 }
2472 mutex_unlock(&client->lock);
2473 }
2474 }
2475 }
2476 break;
2477 case DBCL_FD:
2478 /* Lv1 - all buffers
2479 * Lv2 - all buffer-fds
2480 */
2481 printk(KERN_INFO "DBCL_FD\n");
2482 {
2483 struct ion_fd_usage_record *fd_rec;
2484 struct ion_client_usage_record *client_rec;
2485
2486 /* Find matched clients */
2487 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2488 client = rb_entry(cn, struct ion_client, node);
2489 /* Matched clients */
2490 if (client->pid == raw_key) {
2491 mutex_lock(&client->lock);
2492 /* Lv1 - all buffers */
2493 client_rec = (struct ion_client_usage_record *)ion_get_client_record(client);
2494 //printk("[FD] client_rec %x input is %x groupd id is %d\n",client_rec,client,client->pid);
2495 if(client_rec != NULL)
2496 {
2497 //printk("[FD] client pid is %d\n",client_rec->tracking_info.recordID.pid);
2498 fd_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, FD_ALLOCATION_LIST);
2499 if(fd_rec != NULL)
2500 seq_printf(s, "%10s\n","<FD_ALLOCATION_LIST>");
2501 //printk("[FD] get fd_rec %x\n",fd_rec);
2502 while (!!fd_rec) {
2503 seq_printf(s, "%10s [%d] %10s [%d]\n",
2504 "Process", fd_rec->tracking_info.recordID.pid,
2505 "inused fd", fd_rec->fd);
2506 /* Show address allocation backtrace */
2507 ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2508 /* Next address record */
2509 fd_rec = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT);
2510 }
2511 #if 0
2512 fd_rec = ion_get_list_from_process(client_rec->tracking_info.recordID.pid, FD_FREE_LIST);
2513 if(fd_rec != NULL)
2514 seq_printf(s, "%10s\n","<FD_FREE_LIST>");
2515 while (!!fd_rec) {
2516 seq_printf(s, "%10s [%d] %10s [%d]\n",
2517 "Process", fd_rec->tracking_info.recordID.pid,
2518 "freed fd", fd_rec->fd);
2519 /* Show address release backtrace */
2520 ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2521 /* Next address record */
2522 fd_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT);
2523 }
2524 #endif
2525 }
2526 mutex_unlock(&client->lock);
2527 }
2528 }
2529 }
2530 break;
2531 default:
2532 break;
2533 }
2534
2535 return 0;
2536}
2537
2538static int ion_debug_dbcl_open(struct inode *inode, struct file *file)
2539{
2540 return single_open(file, ion_debug_dbcl_show, inode->i_private);
2541}
2542
2543
2544static const struct file_operations debug_dbcl_fops = {
2545 .open = ion_debug_dbcl_open,
2546 .read = seq_read,
2547 .llseek = seq_lseek,
2548 .release = single_release,
2549};
2550
2551static int ion_debug_dbis_show(struct seq_file *s, void *unused)
2552{
2553 unsigned long type = (unsigned long)s->private;
2554 unsigned long ori_type = type;
2555 struct ion_device *dev = g_ion_device;
2556 struct rb_node *cn, *hn;
2557 struct ion_client *client;
2558 struct ion_handle *handle;
2559 struct ion_buffer *buffer;
2560 int client_cnt = 0, buffer_cnt = 0,process_cnt = 0;
2561
2562 struct ion_buffer_record *buf_rec = NULL;
2563 struct ion_process_record *process_rec = NULL;
2564 struct ion_client_usage_record *client_rec = NULL;
2565
2566 /* History records */
2567 if (type >= (unsigned long)DBIS_DIR) {
2568 printk(KERN_INFO "ION Debug History Records\n");
2569
2570 type -= (unsigned long)DBIS_DIR;
2571 switch ((enum dbis_types)type)
2572 {
2573 case DBIS_CLIENTS:
2574 {
2575 client_rec = ion_get_freed_client_record();
2576 break;
2577 }
2578 case DBIS_BUFFERS:
2579 {
2580 buf_rec = ion_get_freed_buffer_record();
2581 break;
2582 }
2583 case DBIS_MMAPS:
2584 {
2585 buf_rec = ion_get_freed_buffer_record();
2586 }
2587 case DBIS_FDS:
2588 {
2589 process_rec = ion_get_freed_process_record();
2590 break;
2591 }
2592 case DBIS_PIDS:
2593 {
2594 client_rec = ion_get_inuse_client_record();
2595 process_rec = ion_get_inuse_process_usage_record2();
2596 break;
2597 }
2598 case _TOTAL_DBIS:
2599 case DBIS_FILE:
2600 case DBIS_DIR:
2601 {
2602 break;
2603 }
2604
2605 }
2606
2607 } else {
2608 printk(KERN_INFO "ION Debug Non-History Records\n");
2609 switch ((enum dbis_types)type)
2610 {
2611 case DBIS_CLIENTS:
2612 {
2613 client_rec = ion_get_inuse_client_record();
2614 break;
2615 }
2616 case DBIS_BUFFERS:
2617 {
2618 buf_rec = ion_get_inuse_buffer_record();
2619 break;
2620 }
2621 case DBIS_MMAPS:
2622 {
2623 buf_rec = ion_get_inuse_buffer_record();
2624 }
2625 case DBIS_FDS:
2626 {
2627 process_rec = ion_get_inuse_process_usage_record2();
2628 break;
2629 }
2630 case DBIS_PIDS:
2631 {
2632 client_rec = ion_get_inuse_client_record();
2633 process_rec = ion_get_inuse_process_usage_record2();
2634 break;
2635 }
2636 case _TOTAL_DBIS:
2637 case DBIS_FILE:
2638 case DBIS_DIR:
2639 {
2640 break;
2641 }
2642
2643 }
2644 }
2645
2646 /* Non-history records */
2647 switch ((enum dbis_types)type) {
2648 case DBIS_CLIENTS:
2649 printk(KERN_INFO "DBIS_CLIENTS\n");
2650 {
2651#if 0
2652 /* All clients */
2653 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2654 client = rb_entry(cn, struct ion_client, node);
2655 seq_printf(s, "\n%8s[%2d] 0x%p PID[%d]\n", "client", client_cnt++, client, client->pid);
2656 mutex_lock(&client->lock);
2657 /* All client-handles */
2658 for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) {
2659 handle = rb_entry(hn, struct ion_handle, node);
2660 seq_printf(s, "%10s[%2d] kmap_cnt(%d)\n", "handle", buffer_cnt, handle->kmap_cnt);
2661 /* All client-handle-buffers */
2662 buffer = handle->buffer;
2663 mutex_lock(&buffer->lock);
2664 seq_printf(s, "%10s[%2d] heap(%s) address(0x%x) flags(%d) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n",
2665 "buffer", buffer_cnt++, buffer->heap->name,buffer,buffer->flags,
2666 buffer->size, buffer->kmap_cnt, buffer->vaddr);
2667 mutex_unlock(&buffer->lock);
2668 }
2669 mutex_unlock(&client->lock);
2670 buffer_cnt = 0;
2671 }
2672#endif
2673 client_cnt = 0;
2674 while(client_rec != NULL)
2675 {
2676 seq_printf(s, "\n[%2d]%s: fd[%d] 0x%p PID[%d] GROUP_PID[%d]\n",client_cnt++,"client",client_rec->fd, client_rec->tracking_info.recordID.client,client_rec->tracking_info.recordID.pid,client_rec->tracking_info.recordID.group_pid);
2677 /* Show buffer allocation backtrace */
2678 seq_printf(s, " %s\n","<CLIENT_ALLOCATION_LIST>");
2679 ion_debugdb_show_backtrace(s, &client_rec->tracking_info,ALLOCATE_BACKTRACE_INFO);
2680 if (ori_type >= (unsigned long)DBIS_DIR)
2681 {
2682 seq_printf(s, " %s\n","<CLIENT_FREE_LIST>");
2683 ion_debugdb_show_backtrace(s, &client_rec->tracking_info,RELEASE_BACKTRACE_INFO);
2684 }
2685 client_rec = (struct ion_client_usage_record *)client_rec->next;
2686 }
2687 }
2688 break;
2689 case DBIS_BUFFERS:
2690 printk(KERN_INFO "DBIS_BUFFERS\n");
2691 {
2692 struct ion_buffer_usage_record *usg_rec;
2693
2694#if 0
2695 buf_rec = ion_get_inuse_buffer_record();
2696#endif
2697 while (buf_rec != NULL)
2698 {
2699 seq_printf(s, "%8s[%2d][0x%x] buffer structure: 0x%p size(%d)\n", "buffer", buffer_cnt++,(unsigned int)buf_rec->buffer,buf_rec->buffer_address, buf_rec->buffer->size);
2700 /* Allocation */
2701 usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_ALLOCATION_LIST);
2702 if(usg_rec)
2703 {
2704 seq_printf(s, "%30s\n","<BUFFER_ALLOCATION_LIST>");
2705 }
2706 while (!!usg_rec)
2707 {
2708 if(usg_rec->function_type == ION_FUNCTION_ALLOC)
2709 {
2710 seq_printf(s, "%15s [%d] (%s [%d]) %s (0x%x) FUNCTION %s\n","Process", usg_rec->tracking_info.recordID.pid,
2711 "GroupLeader", usg_rec->tracking_info.recordID.group_pid,"handle",(unsigned int)usg_rec->handle,"ION_ALLOC");
2712 }else
2713 {
2714 seq_printf(s, "%15s [%d] (%s [%d]) %s (0x%x) FUNCTION %s\n","Process", usg_rec->tracking_info.recordID.pid,
2715 "GroupLeader", usg_rec->tracking_info.recordID.group_pid,"handle",(unsigned int)usg_rec->handle,"ION_IMPORT");
2716 }
2717 /* Show buffer allocation backtrace */
2718 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2719 /* Next buffer usage record */
2720 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2721 }
2722#if 0
2723 /* Free */
2724 seq_printf(s, "%30s\n","<BUFFER_FREE_LIST>");
2725
2726 usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_FREE_LIST);
2727 while (!!usg_rec) {
2728 seq_printf(s, "%15s [%d] (%15s [%d])\n","Process", usg_rec->tracking_info.recordID.pid,
2729 "GroupLeader", usg_rec->tracking_info.recordID.group_pid);
2730 /* Show buffer free backtrace */
2731 ion_debugdb_show_backtrace(s, &usg_rec->tracking_info, RELEASE_BACKTRACE_INFO);
2732 /* Next buffer usage record */
2733 usg_rec = (struct ion_buffer_usage_record *)ion_get_data_from_record((void *)usg_rec, RECORD_NEXT);
2734 }
2735#endif
2736 /* Next record */
2737 buf_rec = buf_rec->next;
2738 }
2739 }
2740 break;
2741 case DBIS_MMAPS:
2742 printk(KERN_INFO "DBIS_MMAPS\n");
2743 {
2744 struct ion_address_usage_record *adr_rec = NULL;
2745 struct ion_address_usage_record *adr_rec_free = NULL;
2746 struct ion_address_usage_record *adr_rec_user = NULL;
2747 struct ion_address_usage_record *adr_rec_user_free = NULL;
2748 seq_printf(s, "%8s\n","<USERSPACE MAPPING>");
2749 while (process_rec != NULL) {
2750 /* USER MMAP */
2751 adr_rec_user = ion_get_list(LIST_PROCESS,process_rec, ADDRESS_ALLOCATION_LIST);
2752 adr_rec_user_free = ion_get_list(LIST_PROCESS,process_rec, ADDRESS_FREE_LIST);
2753 if((adr_rec_user == NULL) && (adr_rec_user_free == NULL))
2754 {
2755 process_rec = process_rec->next;
2756 continue;
2757 }
2758 if(process_rec == NULL)
2759 break;
2760 seq_printf(s, "[%2d]%8s[0x%x] [%d] group_id [%d]\n",process_cnt++,"process",(unsigned int)process_rec, process_rec->pid, process_rec->group_id);
2761 if(adr_rec_user != NULL)
2762 {
2763 seq_printf(s, " %s\n","<ADDRESS_ALLOCATION_LIST>");
2764 }
2765 else
2766 {
2767 seq_printf(s, " %s\n","<NO ADDRESS_ALLOCATION_LIST>");
2768 }
2769
2770 while (!!adr_rec_user) {
2771 seq_printf(s, " %s[0x%x] [%d] - %s [0x%x] - [0x%x] %10s [%d]\n",
2772 "Process",(unsigned int)process_rec, adr_rec_user->tracking_info.recordID.pid,
2773 "Address", adr_rec_user->mapping_address,(adr_rec_user->mapping_address + adr_rec_user->size),
2774 "Size", adr_rec_user->size);
2775
2776 /* Show fd allocation backtrace */
2777 ion_debugdb_show_backtrace(s, &adr_rec_user->tracking_info, ALLOCATE_BACKTRACE_INFO);
2778 /* Next fd record */
2779 adr_rec_user = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_user, RECORD_NEXT);
2780 }
2781
2782 if(adr_rec_user_free != NULL)
2783 {
2784 seq_printf(s, " %s\n","<ADDRESS_FREE_LIST>");
2785 }
2786 else
2787 {
2788 seq_printf(s, " %s\n","<NO_ADDRESS_FREE_LIST>");
2789 }
2790
2791 while (!!adr_rec_user_free) {
2792 seq_printf(s, " %s[0x%x] [%d] - %s [0x%x] - [0x%x]%10s [%d]\n",
2793 "Process",(unsigned int)process_rec, adr_rec_user_free->tracking_info.recordID.pid,
2794 "Address", adr_rec_user_free->mapping_address,(adr_rec_user_free->mapping_address + adr_rec_user_free->size),
2795 "Size", adr_rec_user_free->size);
2796
2797 /* Show fd release backtrace */
2798 ion_debugdb_show_backtrace(s, &adr_rec_user_free->tracking_info, RELEASE_BACKTRACE_INFO);
2799 /* Next fd record */
2800 adr_rec_user_free = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_user_free, RECORD_NEXT);
2801 }
2802 /* Next record */
2803 process_rec = process_rec->next;
2804 }
2805 seq_printf(s, "%s\n","<KENREL MAPPING>");
2806 mutex_lock(&buffer_lifecycle_mutex);
2807 while (buf_rec != NULL) {
2808 mutex_lock(&buf_rec->ion_address_usage_mutex);
2809 /* Mapping */
2810 adr_rec = ion_get_list(LIST_BUFFER,buf_rec, ADDRESS_ALLOCATION_LIST);
2811
2812 /* Unmapping */
2813 adr_rec_free = ion_get_list(LIST_BUFFER,buf_rec, ADDRESS_FREE_LIST);
2814 mutex_unlock(&buf_rec->ion_address_usage_mutex);
2815 if((adr_rec == NULL)&&(adr_rec_free == NULL))
2816 {
2817 buf_rec = buf_rec->next;
2818 continue;
2819 }
2820
2821 seq_printf(s, "%8s[%2d] size(%d) %12p\n", "buffer", buffer_cnt++, buf_rec->buffer->size, buf_rec->buffer);
2822 if(adr_rec != NULL)
2823 {
2824 seq_printf(s, " %s\n","<ADDRESS_ALLOCATION_LIST>");
2825 }
2826
2827 while (!!adr_rec) {
2828 seq_printf(s, "%8s [%d] - %20s [0x%x] - [0x%x] %10s [%d]\n",
2829 "Process", adr_rec->tracking_info.recordID.pid,
2830 "Address", adr_rec->mapping_address,(adr_rec_user->mapping_address + adr_rec_user->size),
2831 "Size", adr_rec->size);
2832 /* Show address allocation backtrace */
2833 ion_debugdb_show_backtrace(s, &adr_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2834 /* Next address record */
2835 adr_rec = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec, RECORD_NEXT);
2836 }
2837 if(adr_rec_free != NULL)
2838 {
2839 seq_printf(s, " %s\n","<ADDRESS_FREE_LIST>");
2840 }
2841
2842 while (!!adr_rec_free) {
2843 seq_printf(s, "%8s [%d] - %20s [0x%x] - [0x%x] %10s [%d]\n",
2844 "Process", adr_rec_free->tracking_info.recordID.pid,
2845 "Address", adr_rec_free->mapping_address,(adr_rec_free->mapping_address + adr_rec_free->size),
2846 "Size", adr_rec_free->size);
2847 /* Show address release backtrace */
2848 ion_debugdb_show_backtrace(s, &adr_rec_free->tracking_info, RELEASE_BACKTRACE_INFO);
2849 /* Next address record */
2850 adr_rec_free = (struct ion_address_usage_record *)ion_get_data_from_record((void *)adr_rec_free, RECORD_NEXT);
2851 }
2852 adr_rec = NULL;
2853 adr_rec_free = NULL;
2854 /* Next record */
2855 buf_rec = buf_rec->next;
2856 }
2857 mutex_unlock(&buffer_lifecycle_mutex);
2858 }
2859 break;
2860 case DBIS_FDS:
2861 printk(KERN_INFO "DBIS_FDS\n");
2862 {
2863 struct ion_fd_usage_record *fd_rec;
2864 while (process_rec != NULL) {
2865 /* FD */
2866 fd_rec = ion_get_list(LIST_PROCESS,process_rec, FD_ALLOCATION_LIST);
2867 //fd_rec2 = ion_get_list(LIST_PROCESS,process_rec, FD_FREE_LIST);
2868 if(fd_rec == NULL)
2869 {
2870 process_rec = process_rec->next;
2871 continue;
2872 }
2873 seq_printf(s, "[%2d] %8s[0x%x] [%d] group_id [%d]\n",process_cnt++, "process",(unsigned int)process_rec, process_rec->pid,process_rec->group_id);
2874 if(fd_rec != NULL)
2875 {
2876 seq_printf(s, " %s\n","<FD_ALLOCATION_LIST>");
2877 }
2878 else
2879 {
2880 seq_printf(s, " %s\n","<NO_FD_ALLOCATION_LIST>");
2881 }
2882
2883 while (!!fd_rec) {
2884 seq_printf(s, " %8s[0x%x] [%d] - %8s [%d]\n",
2885 "Process",(unsigned int)process_rec, fd_rec->tracking_info.recordID.pid,
2886 "inused Fd", fd_rec->fd);
2887 /* Show fd allocation backtrace */
2888 ion_debugdb_show_backtrace(s, &fd_rec->tracking_info, ALLOCATE_BACKTRACE_INFO);
2889 /* Next fd record */
2890 fd_rec = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec, RECORD_NEXT);
2891 }
2892 #if 0
2893 if(fd_rec2 != NULL)
2894 {
2895 seq_printf(s, " %s\n","<FD_FREE_LIST>");
2896 }
2897
2898 while (!!fd_rec2) {
2899 seq_printf(s, "%7s[0x%x] [%d] - %6s [%d]\n",
2900 "Process",process_rec, fd_rec2->tracking_info.recordID.pid,
2901 "freed Fd", fd_rec2->fd);
2902 /* Show fd release backtrace */
2903 ion_debugdb_show_backtrace(s, &fd_rec2->tracking_info, RELEASE_BACKTRACE_INFO);
2904 /* Next fd record */
2905 fd_rec2 = (struct ion_fd_usage_record *)ion_get_data_from_record((void *)fd_rec2, RECORD_NEXT);
2906 }
2907 #endif
2908 /* Next record */
2909 process_rec = process_rec->next;
2910 }
2911 }
2912 break;
2913 case DBIS_PIDS:
2914 printk(KERN_INFO "DBIS_PIDS\n");
2915 {
2916 struct dbis_process_entry proclist = {.pid = -1, .clients = NULL, .next = NULL};
2917 struct dbis_process_entry *pe = NULL;
2918 struct dbis_client_entry *ce = NULL;
2919 struct ion_process_record *current_process_rec = NULL;
2920 struct ion_client_usage_record *current_client_rec = NULL;
2921 struct ion_fd_usage_record *current_fd_usage_rec = NULL;
2922 struct ion_address_usage_record *current_mmap_usage_rec = NULL;
2923 process_rec = ion_get_inuse_process_usage_record2();
2924 /* Firstly, we should go through all clients. */
2925 for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) {
2926 client = rb_entry(cn, struct ion_client, node);
2927 dbis_insert_proc_clients(&proclist, client, client->pid);
2928 }
2929
2930 /* Now we can go through all processes using ION. */
2931 pe = proclist.next;
2932
2933 while (pe != NULL) {
2934 seq_printf(s, "%s[%d]\n","Process", pe->pid);
2935 current_process_rec = process_rec;
2936 while (current_process_rec != NULL) {
2937 if(current_process_rec->pid == pe->pid)
2938 {
2939 printk("found process pid %d in record\n",current_process_rec->pid);
2940 break;
2941 }
2942 current_process_rec = current_process_rec->next;
2943 }
2944 if(current_process_rec == NULL)
2945 {
2946 seq_printf(s, "ERROR!!!! can't find process pid %d in record \n",pe->pid);
2947 printk("ERROR!!!! can't find process pid %d in record\n",pe->pid);
2948 break;
2949 }
2950 /* Go through all clients for this pe */
2951 ce = pe->clients;
2952
2953 while (ce != NULL) {
2954 client = ce->client;
2955 current_client_rec = (struct ion_client_usage_record *)client_rec;
2956 while(current_client_rec != NULL)
2957 {
2958 if((current_client_rec->tracking_info.recordID.client_address == (unsigned int)client)&&(current_client_rec->tracking_info.recordID.pid == pe->pid))
2959 {
2960 printk("found client client address 0x%x",current_client_rec->tracking_info.recordID.client_address);
2961 break;
2962 }
2963 current_client_rec = (struct ion_client_usage_record *)current_client_rec->next;
2964 }
2965 /* Show all client information */
2966 if(current_client_rec != NULL)
2967 {
2968 seq_printf(s, "\n%8s[%2d] %12p fd[%d]\n", "client", client_cnt++, client,current_client_rec->fd);
2969 }
2970 else
2971 {
2972 seq_printf(s, "\n%8s[%2d] %12p\n", "client", client_cnt++, client);
2973 }
2974 mutex_lock(&client->lock);
2975 /* All client-handles */
2976 for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) {
2977 handle = rb_entry(hn, struct ion_handle, node);
2978 seq_printf(s, "%10s[%2d](0x%x) ref_count %d kmap_cnt(%d)\n", "handle", buffer_cnt,(unsigned int)handle,atomic_read(&handle->ref.refcount),handle->kmap_cnt);
2979 /* All client-handle-buffers */
2980 buffer = handle->buffer;
2981 current_fd_usage_rec = current_process_rec->fd_using_list;
2982 while(current_fd_usage_rec != NULL)
2983 {
2984 if((current_fd_usage_rec->buffer == buffer) && (current_fd_usage_rec->handle == handle))
2985 {
2986 break;
2987 }
2988 current_fd_usage_rec = (struct ion_fd_usage_record *)current_fd_usage_rec->next;
2989 }
2990 mutex_lock(&buffer->lock);
2991 if(current_fd_usage_rec != NULL)
2992 {
2993 seq_printf(s, "%14s[%2d] fd(%d) heap(%s) ref_count(%d)flags(%d) buffer(0x%x) addr(0x%x) size(%d) \n",
2994 "--buffer", buffer_cnt++,current_fd_usage_rec->fd, buffer->heap->name,(int)atomic_read(&buffer->ref.refcount),(int)buffer->flags,
2995 (unsigned int)buffer,(unsigned int)buffer->vaddr,(int)buffer->size );
2996 }
2997 else
2998 {
2999 seq_printf(s, "%14s[%2d] heap(%s) flags(%d) buffer (0x%x) addr(0x%x) size(%d) kmap_cnt(%d) kvaddr(0x%x)\n",
3000 "--buffer", buffer_cnt++,buffer->heap->name, (int)buffer->flags,
3001 (unsigned int)buffer,(unsigned int)buffer->vaddr ,(int)buffer->size,(int)buffer->kmap_cnt, (unsigned int)buffer->vaddr);
3002 }
3003 mutex_unlock(&buffer->lock);
3004 current_mmap_usage_rec = current_process_rec->address_using_list;
3005 while(current_mmap_usage_rec != NULL)
3006 {
3007 if(current_mmap_usage_rec->buffer == buffer)
3008 {
3009 seq_printf(s,"%16s mapping address[0x%x - 0x%x] size(%d)\n","----buffer",current_mmap_usage_rec->mapping_address,current_mmap_usage_rec->mapping_address+current_mmap_usage_rec->size,current_mmap_usage_rec->size);
3010 }
3011 current_mmap_usage_rec = current_mmap_usage_rec->next;
3012 }
3013 }
3014 mutex_unlock(&client->lock);
3015 buffer_cnt = 0;
3016
3017 /* Next ce */
3018 ce = ce->next;
3019 }
3020 /* Next pe */
3021 pe = pe->next;
3022 }
3023
3024 /* Finally, delete all entries in proclist */
3025 destroy_proclist(&proclist);
3026 }
3027 break;
3028#if 0
3029 case DBIS_MODS:
3030 printk(KERN_INFO "DBIS_MODS\n");
3031 break;
3032#endif
3033 default:
3034 break;
3035 }
3036
3037
3038 return 0;
3039}
3040
3041static int ion_debug_dbis_open(struct inode *inode, struct file *file)
3042{
3043 return single_open(file, ion_debug_dbis_show, inode->i_private);
3044}
3045
3046static const struct file_operations debug_dbis_fops = {
3047 .open = ion_debug_dbis_open,
3048 .read = seq_read,
3049 .llseek = seq_lseek,
3050 .release = single_release,
3051};
3052
3053static void ion_debug_create_db(struct dentry *root)
3054{
3055 int index;
3056
3057 /* Create checking_leakage folder */
3058 debug_db_root.checking_leakage = debugfs_create_dir("checking_leakage", root);
3059 INIT_LIST_HEAD(&debug_db_root.dbcl.child);
3060
3061 /* Create ion_statistics folder & its children */
3062 debug_db_root.ion_statistics = debugfs_create_dir("ion_statistics", root);
3063 for (index = 0; index <= _TOTAL_DBIS; ++index)
3064 {
3065 if (dbis_child_attr[index].attr == DBIS_FILE)
3066 {
3067 debug_db_root.dbis.child[index]
3068 = debugfs_create_file(dbis_child_attr[index].name, 0444, debug_db_root.ion_statistics,(void *)index, &debug_dbis_fops);
3069 }
3070 else
3071 {/* This is only for history now. */
3072 debug_db_root.dbis.child[index] = debugfs_create_dir(dbis_child_attr[index].name, debug_db_root.ion_statistics);
3073#if 0
3074 for (his_index = 0; his_index < _TOTAL_DBIS; ++his_index) {
3075 debug_db_root.dbis.history_record[his_index]
3076 = debugfs_create_file(dbis_child_attr[index+his_index+1].name, 0444, debug_db_root.dbis.child[index], his_index+index+1, &debug_dbis_fops);
3077 }
3078#endif
3079 /* client - Use (DBIS_CLIENTS + DBIS_DIR) to identify history/clients */
3080 debug_db_root.dbis.history_record[0]
3081 = debugfs_create_file(dbis_child_attr[DBIS_CLIENTS].name, 0444,
3082 debug_db_root.dbis.child[index], (void *)(DBIS_CLIENTS + DBIS_DIR), &debug_dbis_fops);
3083
3084 /* buffers - Use (DBIS_BUFFERS + DBIS_DIR) to identify history/buffers */
3085 debug_db_root.dbis.history_record[1]
3086 = debugfs_create_file(dbis_child_attr[DBIS_BUFFERS].name, 0444,
3087 debug_db_root.dbis.child[index], (void *)(DBIS_BUFFERS + DBIS_DIR), &debug_dbis_fops);
3088 /* mmaps - Use (DBIS_MMAPS + DBIS_DIR) to identify history/mmaps */
3089 debug_db_root.dbis.history_record[2]
3090 = debugfs_create_file(dbis_child_attr[DBIS_MMAPS].name, 0444,
3091 debug_db_root.dbis.child[index], (void *)(DBIS_MMAPS + DBIS_DIR), &debug_dbis_fops);
3092 /* fds - Use (DBIS_fdS + DBIS_DIR) to identify history/fds */
3093 debug_db_root.dbis.history_record[3]
3094 = debugfs_create_file(dbis_child_attr[DBIS_FDS].name, 0444,
3095 debug_db_root.dbis.child[index], (void *)(DBIS_FDS + DBIS_DIR), &debug_dbis_fops);
3096 /* pids - Use (DBIS_PIDS + DBIS_DIR) to identify history/pids */
3097 debug_db_root.dbis.history_record[4]
3098 = debugfs_create_file(dbis_child_attr[DBIS_PIDS].name, 0444,
3099 debug_db_root.dbis.child[index], (void *)(DBIS_PIDS + DBIS_DIR), &debug_dbis_fops);
3100 }
3101 }
3102}
3103
3104static void ion_debug_db_create_clentry(pid_t pid)
3105{
3106 struct list_head *pos, *n;
3107 struct dbcl_child *found;
3108 char process_id[6];
3109 int index;
3110
3111 /* Check whether pid is in the cl list*/
3112 list_for_each_safe(pos, n, &debug_db_root.dbcl.child) {
3113 found = list_entry(pos, struct dbcl_child, entry);
3114 if ((pid_t)found->raw_key == pid) {
3115 /* We have found one. */
3116 atomic_inc(&found->refcount);
3117 return;
3118 }
3119 }
3120
3121 /* No existing entry */
3122 found = kmalloc(sizeof(struct dbcl_child), GFP_KERNEL);
3123 found->raw_key = (void *)pid;
3124 snprintf(process_id, 6, "%d", pid);
3125 found->root = debugfs_create_dir(process_id, debug_db_root.checking_leakage);
3126 for (index = 0; index < _TOTAL_DBCL; ++index) {
3127 found->type[index] = debugfs_create_file(dbcl_child_name[index], 0444, found->root, (void *)((index << 16) | pid), &debug_dbcl_fops);
3128 }
3129 atomic_set(&found->refcount, 1);
3130 list_add_tail(&found->entry, &debug_db_root.dbcl.child);
3131}
3132
3133static void ion_debug_db_destroy_clentry(pid_t pid)
3134{
3135 struct list_head *pos, *n;
3136 struct dbcl_child *found;
3137
3138 /* Check whether pid is in the cl list*/
3139 list_for_each_safe(pos, n, &debug_db_root.dbcl.child) {
3140 found = list_entry(pos, struct dbcl_child, entry);
3141 if ((pid_t)found->raw_key == pid) {
3142 /* We have found one. */
3143 if (atomic_dec_and_test(&found->refcount)) {
3144 /* Delete list entry, remove corresponding debugfs dir/files, free memory. */
3145 list_del(&found->entry);
3146 debugfs_remove_recursive(found->root);
3147 kfree(found);
3148 }
3149 return;
3150 }
3151 }
3152
3153 printk(KERN_DEBUG "Oh!!!!!\n");
3154}
3155#endif
3156