[RAMEN9610-12171][COMMON] ion: add event log on debugfs
authorhyesoo.yu <hyesoo.yu@samsung.com>
Fri, 1 Jun 2018 09:36:58 +0000 (18:36 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:23:16 +0000 (20:23 +0300)
timestamp        type     heap            buffer_id  size       time   remarks
------------------------------------------------------------------------
[000113.120867]    alloc  ion_system_heap     1     32400        178   noncached
[000113.121074]     mmap  ion_system_heap     1     32400        521
[000113.121153]     free  ion_system_heap     1     32400      31555
[000113.526191]    alloc  ion_system_heap     2     32400        177   cached
[000113.526398]     mmap  ion_system_heap     2     32400        472

show the event for ion buffers when alloc, mmap,
free, cache maintenance, system mmu mapping with timestamp
until 1024 record.

Change-Id: Ia0a4c69e7509c2d4ae035ca8fa8ab90354708d9c
Signed-off-by: hyesoo.yu <hyesoo.yu@samsung.com>
drivers/staging/android/ion/Kconfig
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_debug.c
drivers/staging/android/ion/ion_debug.h [new file with mode: 0644]
drivers/staging/android/ion/ion_exynos.c

index cb620d3a8641b15660fb32f5e02a9310a268cf58..b335720d15fd9eac24350967e635a10d35e2927d 100644 (file)
@@ -64,3 +64,8 @@ config ION_HPA_HEAP
          Choose this option to enable HPA heaps with Ion. This heap is backed
          by the High-order Pages Allocator (HPA). If your system needs HPA for
          some reason, you should say Y here.
+
+config ION_DEBUG_EVENT_RECORD
+       bool "Ion event debug support"
+       default y
+       depends on ION_EXYNOS
index 134932e202e0e4cfc332420771736e4619a6c3eb..1303db4a34bd7536e17f88dfc10d175ced10d3cc 100644 (file)
@@ -40,6 +40,7 @@
 
 #include "ion.h"
 #include "ion_exynos.h"
+#include "ion_debug.h"
 
 static struct ion_device *internal_dev;
 static int heap_id;
@@ -134,6 +135,8 @@ err2:
 
 void ion_buffer_destroy(struct ion_buffer *buffer)
 {
+       ion_event_begin();
+
        exynos_ion_free_fixup(buffer);
        if (buffer->kmap_cnt > 0) {
                pr_warn_once("%s: buffer still mapped in the kernel\n",
@@ -141,6 +144,9 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
        }
        buffer->heap->ops->free(buffer);
+
+       ion_event_end(ION_EVENT_TYPE_FREE, buffer);
+
        kfree(buffer);
 }
 
@@ -163,6 +169,8 @@ void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
        void *vaddr;
 
+       ion_event_begin();
+
        if (buffer->kmap_cnt) {
                if (buffer->kmap_cnt == INT_MAX)
                        return ERR_PTR(-EOVERFLOW);
@@ -181,6 +189,9 @@ void *ion_buffer_kmap_get(struct ion_buffer *buffer)
        }
        buffer->vaddr = vaddr;
        buffer->kmap_cnt++;
+
+       ion_event_end(ION_EVENT_TYPE_KMAP, buffer);
+
        return vaddr;
 }
 
@@ -282,6 +293,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        struct ion_buffer *buffer = dmabuf->priv;
        int ret = 0;
 
+       ion_event_begin();
+
        if (!buffer->heap->ops->map_user) {
                perrfn("this heap does not define a method for mapping to userspace");
                return -EINVAL;
@@ -308,6 +321,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        if (ret)
                perrfn("failure mapping buffer to userspace");
 
+       ion_event_end(ION_EVENT_TYPE_MMAP, buffer);
+
        return ret;
 }
 
@@ -449,6 +464,8 @@ struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
        char expname[ION_EXPNAME_LEN];
        struct dma_buf *dmabuf;
 
+       ion_event_begin();
+
        pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
                 len, heap_id_mask, flags);
        /*
@@ -498,6 +515,8 @@ struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
                _ion_buffer_destroy(buffer);
        }
 
+       ion_event_end(ION_EVENT_TYPE_ALLOC, buffer);
+
        return dmabuf;
 }
 
index 71562ba956987ac3b39815172184186738bc9977..f2e97d717525af15983e17d3c67bbc4c39e96ec0 100644 (file)
 
 #include "ion.h"
 #include "ion_exynos.h"
+#include "ion_debug.h"
+
+#define ION_MAX_EVENT_LOG      1024
+#define ION_EVENT_CLAMP_ID(id) ((id) & (ION_MAX_EVENT_LOG - 1))
+
+static atomic_t eventid;
+
+static char * const ion_event_name[] = {
+       "alloc",
+       "free",
+       "mmap",
+       "kmap",
+       "map_dma_buf",
+       "unmap_dma_buf",
+       "begin_cpu_access",
+       "end_cpu_access",
+       "iovmm_map",
+};
+
+static struct ion_event {
+       struct ion_heap *heap;
+       unsigned long data;
+       ktime_t begin;
+       ktime_t done;
+       size_t size;
+       enum ion_event_type type;
+       int buffer_id;
+} eventlog[ION_MAX_EVENT_LOG];
+
+static void ion_buffer_dump_flags(struct seq_file *s, unsigned long flags)
+{
+       if (flags & ION_FLAG_CACHED)
+               seq_puts(s, "cached");
+       else
+               seq_puts(s, "noncached");
+
+       if (flags & ION_FLAG_NOZEROED)
+               seq_puts(s, "|nozeroed");
+
+       if (flags & ION_FLAG_PROTECTED)
+               seq_puts(s, "|protected");
+
+       if (flags & ION_FLAG_MAY_HWRENDER)
+               seq_puts(s, "|may_hwrender");
+}
+
+#ifdef CONFIG_ION_DEBUG_EVENT_RECORD
+void ion_event_record(enum ion_event_type type,
+                     struct ion_buffer *buffer, ktime_t begin)
+{
+       int idx = ION_EVENT_CLAMP_ID(atomic_inc_return(&eventid));
+       struct ion_event *event = &eventlog[idx];
+
+       event->buffer_id = buffer->id;
+       event->type = type;
+       event->begin = begin;
+       event->done = ktime_get();
+       event->heap = buffer->heap;
+       event->size = buffer->size;
+       event->data = (type == ION_EVENT_TYPE_FREE) ?
+               buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE :
+               buffer->flags;
+}
+#endif
+
+inline bool ion_debug_event_show_single(struct seq_file *s,
+                               struct ion_event *event)
+{
+       struct timeval tv = ktime_to_timeval(event->begin);
+       long elapsed = ktime_us_delta(event->done, event->begin);
+
+       if (elapsed == 0)
+               return false;
+
+       seq_printf(s, "[%06ld.%06ld] ", tv.tv_sec, tv.tv_usec);
+       seq_printf(s, "%17s %18s %10d %13zd %10ld",
+                  ion_event_name[event->type], event->heap->name,
+                  event->buffer_id, event->size / SZ_1K, elapsed);
+
+       if (elapsed > 100 * USEC_PER_MSEC)
+               seq_puts(s, " *");
+
+       switch (event->type) {
+       case ION_EVENT_TYPE_ALLOC:
+               seq_puts(s, "  ");
+               ion_buffer_dump_flags(s, event->data);
+               break;
+       case ION_EVENT_TYPE_FREE:
+               if (event->data)
+                       seq_puts(s, " shrinker");
+               break;
+       default:
+               break;
+       }
+
+       seq_puts(s, "\n");
+
+       return true;
+}
+
+static int ion_debug_event_show(struct seq_file *s, void *unused)
+{
+       int i;
+       int idx = ION_EVENT_CLAMP_ID(atomic_read(&eventid) + 1);
+
+       seq_printf(s, "%15s %17s %18s %10s %13s %10s %24s\n",
+                  "timestamp", "type", "heap", "buffer_id", "size (kb)",
+                  "time (us)", "remarks");
+       seq_puts(s, "-------------------------------------------");
+       seq_puts(s, "-------------------------------------------");
+       seq_puts(s, "-----------------------------------------\n");
+
+       for (i = idx; i < ION_MAX_EVENT_LOG; i++)
+               if (!ion_debug_event_show_single(s, &eventlog[i]))
+                       break;
+
+       for (i = 0; i < idx; i++)
+               if (!ion_debug_event_show_single(s, &eventlog[i]))
+                       break;
+
+       return 0;
+}
+
+static int ion_debug_event_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ion_debug_event_show, inode->i_private);
+}
+
+static const struct file_operations debug_event_fops = {
+       .open = ion_debug_event_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
 
 #define ION_MAX_LOGBUF 128
 
@@ -191,13 +325,20 @@ static struct ion_oom_notifier_struct ion_oom_notifier = {
 
 void ion_debug_initialize(struct ion_device *idev)
 {
-       struct dentry *buffer_file;
+       struct dentry *buffer_file, *event_file;
 
        buffer_file = debugfs_create_file("buffers", 0444, idev->debug_root,
                                          idev, &debug_buffers_fops);
        if (!buffer_file)
                perrfn("failed to create debugfs/ion/buffers");
 
+       event_file = debugfs_create_file("event", 0444, idev->debug_root,
+                                        idev, &debug_event_fops);
+       if (!event_file)
+               pr_err("%s: failed to create debugfs/ion/event\n", __func__);
+
        ion_oom_notifier.idev = idev;
        register_oom_notifier(&ion_oom_notifier.nb);
+
+       atomic_set(&eventid, -1);
 }
diff --git a/drivers/staging/android/ion/ion_debug.h b/drivers/staging/android/ion/ion_debug.h
new file mode 100644 (file)
index 0000000..d756a70
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * drivers/staging/android/ion/ion_debug.h
+ *
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_DEBUG_H_
+#define _ION_DEBUG_H_
+
+enum ion_event_type {
+       ION_EVENT_TYPE_ALLOC = 0,
+       ION_EVENT_TYPE_FREE,
+       ION_EVENT_TYPE_MMAP,
+       ION_EVENT_TYPE_KMAP,
+       ION_EVENT_TYPE_MAP_DMA_BUF,
+       ION_EVENT_TYPE_UNMAP_DMA_BUF,
+       ION_EVENT_TYPE_BEGIN_CPU_ACCESS,
+       ION_EVENT_TYPE_END_CPU_ACCESS,
+       ION_EVENT_TYPE_IOVMM_MAP,
+};
+
+#ifdef CONFIG_ION_DEBUG_EVENT_RECORD
+void ion_event_record(enum ion_event_type type,
+                     struct ion_buffer *buffer, ktime_t begin);
+#define ion_event_begin()      ktime_t begin = ktime_get()
+#define ion_event_end(type, buffer)    ion_event_record(type, buffer, begin)
+#else
+#define ion_event_record(type, buffer, begin)  do { } while (0)
+#define ion_event_begin()      do { } while (0)
+#define ion_event_end(type, buffer)    do { } while (0)
+#endif
+#endif /* _ION_DEBUG_H_ */
index 4bab048f03181dcc5865809035bcf4299b48bac7..a56855cc4794196e9d5de09a22b313b80a4d9519 100644 (file)
@@ -21,6 +21,7 @@
 
 #include "ion.h"
 #include "ion_exynos.h"
+#include "ion_debug.h"
 
 struct dma_buf *ion_alloc_dmabuf(const char *heap_name,
                                 size_t len, unsigned int flags)
@@ -85,6 +86,8 @@ dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment,
        struct ion_iovm_map *iovm_map;
        struct iommu_domain *domain;
 
+       ion_event_begin();
+
        BUG_ON(attachment->dmabuf->ops != &ion_dma_buf_ops);
 
        if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
@@ -126,6 +129,8 @@ dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment,
 
        mutex_unlock(&buffer->lock);
 
+       ion_event_end(ION_EVENT_TYPE_IOVMM_MAP, buffer);
+
        return iovm_map->iova;
 }
 
@@ -249,10 +254,15 @@ struct sg_table *ion_exynos_map_dma_buf(struct dma_buf_attachment *attachment,
 {
        struct ion_buffer *buffer = attachment->dmabuf->priv;
 
-       if (ion_buffer_cached(buffer) && direction != DMA_NONE)
+       if (ion_buffer_cached(buffer) && direction != DMA_NONE) {
+               ion_event_begin();
+
                dma_sync_sg_for_device(attachment->dev, buffer->sg_table->sgl,
                                       buffer->sg_table->nents, direction);
 
+               ion_event_end(ION_EVENT_TYPE_MAP_DMA_BUF, buffer);
+       }
+
        return buffer->sg_table;
 }
 
@@ -262,9 +272,14 @@ void ion_exynos_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
        struct ion_buffer *buffer = attachment->dmabuf->priv;
 
-       if (ion_buffer_cached(buffer) && direction != DMA_NONE)
+       if (ion_buffer_cached(buffer) && direction != DMA_NONE) {
+               ion_event_begin();
+
                dma_sync_sg_for_cpu(attachment->dev, table->sgl,
                                    table->nents, direction);
+
+               ion_event_end(ION_EVENT_TYPE_UNMAP_DMA_BUF, buffer);
+       }
 }
 
 static void exynos_flush_sg(struct device *dev,
@@ -287,6 +302,8 @@ int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
        struct ion_buffer *buffer = dmabuf->priv;
        void *vaddr;
 
+       ion_event_begin();
+
        if (buffer->heap->ops->map_kernel) {
                mutex_lock(&buffer->lock);
                vaddr = ion_buffer_kmap_get(buffer);
@@ -309,6 +326,8 @@ int ion_exynos_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
        }
        mutex_unlock(&buffer->lock);
 
+       ion_event_end(ION_EVENT_TYPE_BEGIN_CPU_ACCESS, buffer);
+
        return 0;
 }
 
@@ -317,6 +336,8 @@ int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 {
        struct ion_buffer *buffer = dmabuf->priv;
 
+       ion_event_begin();
+
        if (buffer->heap->ops->map_kernel) {
                mutex_lock(&buffer->lock);
                ion_buffer_kmap_put(buffer);
@@ -339,5 +360,7 @@ int ion_exynos_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
        }
        mutex_unlock(&buffer->lock);
 
+       ion_event_end(ION_EVENT_TYPE_END_CPU_ACCESS, buffer);
+
        return 0;
 }