Choose this option to enable HPA heaps with Ion. This heap is backed
by the High-order Pages Allocator (HPA). If your system needs HPA for
some reason, you should say Y here.
+
+config ION_DEBUG_EVENT_RECORD
+ bool "Ion event debug support"
+ default y
+ depends on ION_EXYNOS
#include "ion.h"
#include "ion_exynos.h"
+#include "ion_debug.h"
static struct ion_device *internal_dev;
static int heap_id;
void ion_buffer_destroy(struct ion_buffer *buffer)
{
+ ion_event_begin();
+
exynos_ion_free_fixup(buffer);
if (buffer->kmap_cnt > 0) {
pr_warn_once("%s: buffer still mapped in the kernel\n",
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
}
buffer->heap->ops->free(buffer);
+
+ ion_event_end(ION_EVENT_TYPE_FREE, buffer);
+
kfree(buffer);
}
{
void *vaddr;
+ ion_event_begin();
+
if (buffer->kmap_cnt) {
if (buffer->kmap_cnt == INT_MAX)
return ERR_PTR(-EOVERFLOW);
}
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
+
+ ion_event_end(ION_EVENT_TYPE_KMAP, buffer);
+
return vaddr;
}
struct ion_buffer *buffer = dmabuf->priv;
int ret = 0;
+ ion_event_begin();
+
if (!buffer->heap->ops->map_user) {
perrfn("this heap does not define a method for mapping to userspace");
return -EINVAL;
if (ret)
perrfn("failure mapping buffer to userspace");
+ ion_event_end(ION_EVENT_TYPE_MMAP, buffer);
+
return ret;
}
char expname[ION_EXPNAME_LEN];
struct dma_buf *dmabuf;
+ ion_event_begin();
+
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
len, heap_id_mask, flags);
/*
_ion_buffer_destroy(buffer);
}
+ ion_event_end(ION_EVENT_TYPE_ALLOC, buffer);
+
return dmabuf;
}
#include "ion.h"
#include "ion_exynos.h"
+#include "ion_debug.h"
+
+#define ION_MAX_EVENT_LOG 1024
+#define ION_EVENT_CLAMP_ID(id) ((id) & (ION_MAX_EVENT_LOG - 1))
+
+static atomic_t eventid;
+
+static char * const ion_event_name[] = {
+ "alloc",
+ "free",
+ "mmap",
+ "kmap",
+ "map_dma_buf",
+ "unmap_dma_buf",
+ "begin_cpu_access",
+ "end_cpu_access",
+ "iovmm_map",
+};
+
+static struct ion_event {
+ struct ion_heap *heap;
+ unsigned long data;
+ ktime_t begin;
+ ktime_t done;
+ size_t size;
+ enum ion_event_type type;
+ int buffer_id;
+} eventlog[ION_MAX_EVENT_LOG];
+
+static void ion_buffer_dump_flags(struct seq_file *s, unsigned long flags)
+{
+ if (flags & ION_FLAG_CACHED)
+ seq_puts(s, "cached");
+ else
+ seq_puts(s, "noncached");
+
+ if (flags & ION_FLAG_NOZEROED)
+ seq_puts(s, "|nozeroed");
+
+ if (flags & ION_FLAG_PROTECTED)
+ seq_puts(s, "|protected");
+
+ if (flags & ION_FLAG_MAY_HWRENDER)
+ seq_puts(s, "|may_hwrender");
+}
+
+#ifdef CONFIG_ION_DEBUG_EVENT_RECORD
+void ion_event_record(enum ion_event_type type,
+ struct ion_buffer *buffer, ktime_t begin)
+{
+ int idx = ION_EVENT_CLAMP_ID(atomic_inc_return(&eventid));
+ struct ion_event *event = &eventlog[idx];
+
+ event->buffer_id = buffer->id;
+ event->type = type;
+ event->begin = begin;
+ event->done = ktime_get();
+ event->heap = buffer->heap;
+ event->size = buffer->size;
+ event->data = (type == ION_EVENT_TYPE_FREE) ?
+ buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE :
+ buffer->flags;
+}
+#endif
+
+inline bool ion_debug_event_show_single(struct seq_file *s,
+ struct ion_event *event)
+{
+ struct timeval tv = ktime_to_timeval(event->begin);
+ long elapsed = ktime_us_delta(event->done, event->begin);
+
+ if (elapsed == 0)
+ return false;
+
+ seq_printf(s, "[%06ld.%06ld] ", tv.tv_sec, tv.tv_usec);
+ seq_printf(s, "%17s %18s %10d %13zd %10ld",
+ ion_event_name[event->type], event->heap->name,
+ event->buffer_id, event->size / SZ_1K, elapsed);
+
+ if (elapsed > 100 * USEC_PER_MSEC)
+ seq_puts(s, " *");
+
+ switch (event->type) {
+ case ION_EVENT_TYPE_ALLOC:
+ seq_puts(s, " ");
+ ion_buffer_dump_flags(s, event->data);
+ break;
+ case ION_EVENT_TYPE_FREE:
+ if (event->data)
+ seq_puts(s, " shrinker");
+ break;
+ default:
+ break;
+ }
+
+ seq_puts(s, "\n");
+
+ return true;
+}
+
+static int ion_debug_event_show(struct seq_file *s, void *unused)
+{
+ int i;
+ int idx = ION_EVENT_CLAMP_ID(atomic_read(&eventid) + 1);
+
+ seq_printf(s, "%15s %17s %18s %10s %13s %10s %24s\n",
+ "timestamp", "type", "heap", "buffer_id", "size (kb)",
+ "time (us)", "remarks");
+ seq_puts(s, "-------------------------------------------");
+ seq_puts(s, "-------------------------------------------");
+ seq_puts(s, "-----------------------------------------\n");
+
+ for (i = idx; i < ION_MAX_EVENT_LOG; i++)
+ if (!ion_debug_event_show_single(s, &eventlog[i]))
+ break;
+
+ for (i = 0; i < idx; i++)
+ if (!ion_debug_event_show_single(s, &eventlog[i]))
+ break;
+
+ return 0;
+}
+
+static int ion_debug_event_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_event_show, inode->i_private);
+}
+
+static const struct file_operations debug_event_fops = {
+ .open = ion_debug_event_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#define ION_MAX_LOGBUF 128
void ion_debug_initialize(struct ion_device *idev)
{
- struct dentry *buffer_file;
+ struct dentry *buffer_file, *event_file;
buffer_file = debugfs_create_file("buffers", 0444, idev->debug_root,
idev, &debug_buffers_fops);
if (!buffer_file)
perrfn("failed to create debugfs/ion/buffers");
+ event_file = debugfs_create_file("event", 0444, idev->debug_root,
+ idev, &debug_event_fops);
+ if (!event_file)
+ pr_err("%s: failed to create debugfs/ion/event\n", __func__);
+
ion_oom_notifier.idev = idev;
register_oom_notifier(&ion_oom_notifier.nb);
+
+ atomic_set(&eventid, -1);
}
--- /dev/null
+/*
+ * drivers/staging/android/ion/ion_debug.h
+ *
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_DEBUG_H_
+#define _ION_DEBUG_H_
+
+enum ion_event_type {
+ ION_EVENT_TYPE_ALLOC = 0,
+ ION_EVENT_TYPE_FREE,
+ ION_EVENT_TYPE_MMAP,
+ ION_EVENT_TYPE_KMAP,
+ ION_EVENT_TYPE_MAP_DMA_BUF,
+ ION_EVENT_TYPE_UNMAP_DMA_BUF,
+ ION_EVENT_TYPE_BEGIN_CPU_ACCESS,
+ ION_EVENT_TYPE_END_CPU_ACCESS,
+ ION_EVENT_TYPE_IOVMM_MAP,
+};
+
+#ifdef CONFIG_ION_DEBUG_EVENT_RECORD
+void ion_event_record(enum ion_event_type type,
+ struct ion_buffer *buffer, ktime_t begin);
+#define ion_event_begin() ktime_t begin = ktime_get()
+#define ion_event_end(type, buffer) ion_event_record(type, buffer, begin)
+#else
+#define ion_event_record(type, buffer, begin) do { } while (0)
+#define ion_event_begin() do { } while (0)
+#define ion_event_end(type, buffer) do { } while (0)
+#endif
+#endif /* _ION_DEBUG_H_ */
#include "ion.h"
#include "ion_exynos.h"
+#include "ion_debug.h"
struct dma_buf *ion_alloc_dmabuf(const char *heap_name,
size_t len, unsigned int flags)
struct ion_iovm_map *iovm_map;
struct iommu_domain *domain;
+ ion_event_begin();
+
BUG_ON(attachment->dmabuf->ops != &ion_dma_buf_ops);
if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
mutex_unlock(&buffer->lock);
+ ion_event_end(ION_EVENT_TYPE_IOVMM_MAP, buffer);
+
return iovm_map->iova;
}
{
struct ion_buffer *buffer = attachment->dmabuf->priv;
- if (ion_buffer_cached(buffer) && direction != DMA_NONE)
+ if (ion_buffer_cached(buffer) && direction != DMA_NONE) {
+ ion_event_begin();
+
dma_sync_sg_for_device(attachment->dev, buffer->sg_table->sgl,
buffer->sg_table->nents, direction);
+ ion_event_end(ION_EVENT_TYPE_MAP_DMA_BUF, buffer);
+ }
+
return buffer->sg_table;
}
{
struct ion_buffer *buffer = attachment->dmabuf->priv;
- if (ion_buffer_cached(buffer) && direction != DMA_NONE)
+ if (ion_buffer_cached(buffer) && direction != DMA_NONE) {
+ ion_event_begin();
+
dma_sync_sg_for_cpu(attachment->dev, table->sgl,
table->nents, direction);
+
+ ion_event_end(ION_EVENT_TYPE_UNMAP_DMA_BUF, buffer);
+ }
}
static void exynos_flush_sg(struct device *dev,
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
+ ion_event_begin();
+
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
}
mutex_unlock(&buffer->lock);
+ ion_event_end(ION_EVENT_TYPE_BEGIN_CPU_ACCESS, buffer);
+
return 0;
}
{
struct ion_buffer *buffer = dmabuf->priv;
+ ion_event_begin();
+
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
ion_buffer_kmap_put(buffer);
}
mutex_unlock(&buffer->lock);
+ ion_event_end(ION_EVENT_TYPE_END_CPU_ACCESS, buffer);
+
return 0;
}