obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOVMM) += exynos-iovmm.o
-obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
+obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o exynos-iommu-log.o
obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o
--- /dev/null
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Data structure definition for Exynos IOMMU driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+
+#include "exynos-iommu-log.h"
+
+int exynos_iommu_init_event_log(struct exynos_iommu_event_log *log,
+ unsigned int log_len)
+{
+ struct page *page;
+ int i, order;
+ size_t fit_size = PAGE_ALIGN(sizeof(*(log->log)) * log_len);
+ int fit_pages = fit_size / PAGE_SIZE;
+
+ /* log_len must be power of 2 */
+ BUG_ON((log_len - 1) & log_len);
+
+ atomic_set(&log->index, 0);
+ order = get_order(fit_size);
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
+
+ split_page(page, order);
+
+ if ((1 << order) > fit_pages) {
+ int extra = (1 << order) - fit_pages;
+
+ for (i = 0; i < extra; i++)
+ __free_pages(page + fit_pages + i, 0);
+ }
+
+ log->log = page_address(page);
+ log->log_len = log_len;
+
+ return 0;
+}
+
+static const char * const sysmmu_event_name[] = {
+ "n/a", /* not an event */
+ "ENABLE",
+ "DISABLE",
+ "TLB_INV_RANGE",
+ "TLB_INV_ALL",
+ "POWERON",
+ "POWEROFF",
+ "IOMMU_ATTACH",
+ "IOMMU_DETACH",
+ "IOMMU_MAP",
+ "IOMMU_UNMAP",
+ "IOMMU_ALLOCSLPD",
+ "IOMMU_FREESLPD",
+ "IOVMM_MAP",
+ "IOVMM_UNMAP"
+};
+
+static void exynos_iommu_debug_log_show(struct seq_file *s,
+ struct sysmmu_event_log *log)
+{
+ struct timeval tv = ktime_to_timeval(log->timestamp);
+
+ if (log->event == EVENT_SYSMMU_NONE)
+ return;
+
+ seq_printf(s, "%06ld.%06ld: %15s", tv.tv_sec, tv.tv_usec,
+ sysmmu_event_name[log->event]);
+
+ switch (log->event) {
+ case EVENT_SYSMMU_ENABLE:
+ case EVENT_SYSMMU_DISABLE:
+ case EVENT_SYSMMU_TLB_INV_ALL:
+ case EVENT_SYSMMU_POWERON:
+ case EVENT_SYSMMU_POWEROFF:
+ seq_puts(s, "\n");
+ break;
+ case EVENT_SYSMMU_IOMMU_ALLOCSLPD:
+ case EVENT_SYSMMU_IOMMU_FREESLPD:
+ seq_printf(s, " @ %#010x\n", log->eventdata.addr);
+ break;
+ case EVENT_SYSMMU_TLB_INV_RANGE:
+ case EVENT_SYSMMU_IOMMU_UNMAP:
+ case EVENT_SYSMMU_IOVMM_UNMAP:
+ seq_printf(s, " @ [%#010x, %#010x)\n",
+ log->eventdata.range.start,
+ log->eventdata.range.end);
+ break;
+ case EVENT_SYSMMU_IOVMM_MAP:
+ seq_printf(s, " [%#010x, %#010x(+%#x))\n",
+ log->eventdata.iovmm.start,
+ log->eventdata.iovmm.end,
+ log->eventdata.iovmm.dummy);
+ break;
+ case EVENT_SYSMMU_IOMMU_MAP:
+ seq_printf(s, " [%#010x, %#010x) for PFN %#x\n",
+ log->eventdata.iommu.start,
+ log->eventdata.iommu.end,
+ log->eventdata.iommu.pfn);
+ break;
+ case EVENT_SYSMMU_IOMMU_ATTACH:
+ case EVENT_SYSMMU_IOMMU_DETACH:
+ seq_printf(s, " of %s\n", dev_name(log->eventdata.dev));
+ break;
+ default:
+ BUG();
+ }
+}
+
+static int exynos_iommu_debugfs_log_show(struct seq_file *s, void *unused)
+{
+ struct exynos_iommu_event_log *plog = s->private;
+ unsigned int index = atomic_read(&plog->index) % plog->log_len;
+ unsigned int begin = index;
+
+ do {
+ exynos_iommu_debug_log_show(s, &plog->log[index++]);
+ if (index == plog->log_len)
+ index = 0;
+ } while (index != begin);
+
+ return 0;
+}
+
+static int exynos_iommu_debugfs_log_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, exynos_iommu_debugfs_log_show,
+ inode->i_private);
+}
+
+#define SYSMMU_DENTRY_LOG_ROOT_NAME "eventlog"
+static struct dentry *sysmmu_debugfs_log_root;
+static struct dentry *iommu_debugfs_log_root;
+
+static const struct file_operations exynos_iommu_debugfs_fops = {
+ .open = exynos_iommu_debugfs_log_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct dentry **debugfs_eventlog_root,
+ struct exynos_iommu_event_log *log, const char *name)
+{
+ if (!debugfs_root)
+ return;
+
+ if (!(*debugfs_eventlog_root)) {
+ *debugfs_eventlog_root = debugfs_create_dir(
+ SYSMMU_DENTRY_LOG_ROOT_NAME, debugfs_root);
+ if (!(*debugfs_eventlog_root)) {
+ pr_err("%s: Failed to create 'eventlog' entry\n",
+ __func__);
+ return;
+ }
+ }
+
+ log->debugfs_root = debugfs_create_file(name, 0400,
+ *debugfs_eventlog_root, log,
+ &exynos_iommu_debugfs_fops);
+ if (!log->debugfs_root)
+ pr_err("%s: Failed to create '%s' entry of 'eventlog'\n",
+ __func__, name);
+}
+
+void sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name)
+{
+ __sysmmu_add_log_to_debugfs(debugfs_root, &sysmmu_debugfs_log_root,
+ log, name);
+}
+
+void iommu_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name)
+{
+ __sysmmu_add_log_to_debugfs(debugfs_root, &iommu_debugfs_log_root,
+ log, name);
+}
+
+#if defined(CONFIG_EXYNOS_IOVMM)
+static struct dentry *iovmm_debugfs_log_root;
+
+void iovmm_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name)
+{
+ __sysmmu_add_log_to_debugfs(debugfs_root, &iovmm_debugfs_log_root,
+ log, name);
+}
+#endif
--- /dev/null
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Data structure definition for Exynos IOMMU driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _EXYNOS_IOMMU_LOG_H_
+#define _EXYNOS_IOMMU_LOG_H_
+
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/device.h>
+
+enum sysmmu_event_log_event {
+ EVENT_SYSMMU_NONE, /* initialized value */
+ EVENT_SYSMMU_ENABLE,
+ EVENT_SYSMMU_DISABLE,
+ EVENT_SYSMMU_TLB_INV_RANGE,
+ EVENT_SYSMMU_TLB_INV_ALL,
+ EVENT_SYSMMU_POWERON,
+ EVENT_SYSMMU_POWEROFF,
+ EVENT_SYSMMU_IOMMU_ATTACH,
+ EVENT_SYSMMU_IOMMU_DETACH,
+ EVENT_SYSMMU_IOMMU_MAP,
+ EVENT_SYSMMU_IOMMU_UNMAP,
+ EVENT_SYSMMU_IOMMU_ALLOCSLPD,
+ EVENT_SYSMMU_IOMMU_FREESLPD,
+ EVENT_SYSMMU_IOVMM_MAP,
+ EVENT_SYSMMU_IOVMM_UNMAP
+};
+
+struct sysmmu_event_range {
+ u32 start;
+ u32 end;
+};
+
+struct sysmmu_event_IOMMU_MAP {
+ u32 start;
+ u32 end;
+ unsigned int pfn;
+};
+
+struct sysmmu_event_IOVMM_MAP {
+ u32 start;
+ u32 end;
+ unsigned int dummy;
+};
+
+/**
+ * event must be updated before eventdata because of eventdata.dev
+ * sysmmu_event_log is not protected by any locks. That means it permits
+ * some data inconsistency by race condition between updating and reading.
+ * However the problem arises when event is either IOMMU_ATTACH or
+ * IOMMU_DETACH because they stores a pointer to device descriptor to
+ * eventdata.dev and reading the sysmmu_event_log of those events refers
+ * to values pointed by eventdata.dev.
+ * Therefore, eventdata must be updated before event not to access invalid
+ * pointer by reading debugfs entries.
+ */
+struct sysmmu_event_log {
+ ktime_t timestamp;
+ union {
+ struct sysmmu_event_range range;
+ struct sysmmu_event_IOMMU_MAP iommu;
+ struct sysmmu_event_IOVMM_MAP iovmm;
+ u32 addr;
+ struct device *dev;
+ } eventdata;
+ enum sysmmu_event_log_event event;
+};
+
+struct exynos_iommu_event_log {
+ atomic_t index;
+ unsigned int log_len;
+ struct sysmmu_event_log *log;
+ struct dentry *debugfs_root;
+};
+
+/* sizeof(struct sysmmu_event_log) = 8 + 4 * 3 + 4 = 24 bytes */
+#define SYSMMU_LOG_LEN 1024
+#define IOMMU_LOG_LEN 4096
+#define IOVMM_LOG_LEN 512
+
+#define SYSMMU_DRVDATA_TO_LOG(data) (&(data)->log)
+#define IOMMU_PRIV_TO_LOG(data) (&(data)->log)
+#define IOMMU_TO_LOG(data) (&(to_exynos_domain(data))->log)
+#define IOVMM_TO_LOG(data) (&(data)->log)
+
+static inline struct sysmmu_event_log *sysmmu_event_log_get(
+ struct exynos_iommu_event_log *plog)
+{
+ struct sysmmu_event_log *log;
+ unsigned int index =
+ (unsigned int)atomic_inc_return(&plog->index) - 1;
+ log = &plog->log[index % plog->log_len];
+ log->timestamp = ktime_get();
+ return log;
+}
+
+#define DEFINE_SYSMMU_EVENT_LOG(evt) \
+static inline void SYSMMU_EVENT_LOG_##evt(struct exynos_iommu_event_log *plog) \
+{ \
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
+ log->event = EVENT_SYSMMU_##evt; \
+}
+
+#define DEFINE_SYSMMU_EVENT_LOG_1ADDR(evt) \
+static inline void SYSMMU_EVENT_LOG_##evt( \
+ struct exynos_iommu_event_log *plog, u32 addr) \
+{ \
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
+ log->eventdata.addr = addr; \
+ log->event = EVENT_SYSMMU_##evt; \
+}
+
+#define DEFINE_SYSMMU_EVENT_LOG_2ADDR(evt) \
+static inline void SYSMMU_EVENT_LOG_##evt(struct exynos_iommu_event_log *plog, \
+ u32 start, u32 end) \
+{ \
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog); \
+ log->eventdata.range.start = start; \
+ log->eventdata.range.end = end; \
+ log->event = EVENT_SYSMMU_##evt; \
+}
+
+static inline void SYSMMU_EVENT_LOG_IOVMM_MAP(
+ struct exynos_iommu_event_log *plog,
+ u32 start, u32 end, unsigned int dummy)
+{
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
+ log->eventdata.iovmm.start = start;
+ log->eventdata.iovmm.end = end;
+ log->eventdata.iovmm.dummy = dummy;
+ log->event = EVENT_SYSMMU_IOVMM_MAP;
+}
+
+static inline void SYSMMU_EVENT_LOG_IOMMU_ATTACH(
+ struct exynos_iommu_event_log *plog, struct device *dev)
+{
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
+ log->eventdata.dev = dev;
+ log->event = EVENT_SYSMMU_IOMMU_ATTACH;
+}
+
+static inline void SYSMMU_EVENT_LOG_IOMMU_DETACH(
+ struct exynos_iommu_event_log *plog, struct device *dev)
+{
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
+ log->eventdata.dev = dev;
+ log->event = EVENT_SYSMMU_IOMMU_DETACH;
+}
+
+static inline void SYSMMU_EVENT_LOG_IOMMU_MAP(
+ struct exynos_iommu_event_log *plog,
+ u32 start, u32 end, unsigned int pfn)
+{
+ struct sysmmu_event_log *log = sysmmu_event_log_get(plog);
+ log->event = EVENT_SYSMMU_IOMMU_MAP;
+ log->eventdata.iommu.start = start;
+ log->eventdata.iommu.end = end;
+ log->eventdata.iommu.pfn = pfn;
+}
+
+int exynos_iommu_init_event_log(struct exynos_iommu_event_log *log,
+ unsigned int log_len);
+
+void sysmmu_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name);
+
+void iommu_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name);
+
+#if defined(CONFIG_EXYNOS_IOVMM)
+void iovmm_add_log_to_debugfs(struct dentry *debugfs_root,
+ struct exynos_iommu_event_log *log, const char *name);
+#else
+#define iovmm_add_log_to_debugfs(debugfs_root, log, name) do { } while (0)
+#endif
+
+DEFINE_SYSMMU_EVENT_LOG(ENABLE)
+DEFINE_SYSMMU_EVENT_LOG(DISABLE)
+DEFINE_SYSMMU_EVENT_LOG(TLB_INV_ALL)
+DEFINE_SYSMMU_EVENT_LOG(POWERON)
+DEFINE_SYSMMU_EVENT_LOG(POWEROFF)
+
+DEFINE_SYSMMU_EVENT_LOG_1ADDR(IOMMU_ALLOCSLPD)
+DEFINE_SYSMMU_EVENT_LOG_1ADDR(IOMMU_FREESLPD)
+
+DEFINE_SYSMMU_EVENT_LOG_2ADDR(TLB_INV_RANGE)
+DEFINE_SYSMMU_EVENT_LOG_2ADDR(IOMMU_UNMAP)
+DEFINE_SYSMMU_EVENT_LOG_2ADDR(IOVMM_UNMAP)
+
+#endif /*_EXYNOS_IOMMU_LOG_H_*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/io.h>
static struct sysmmu_drvdata *sysmmu_drvdata_list;
static struct exynos_iommu_owner *sysmmu_owner_list;
-static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
-{
- return container_of(dom, struct exynos_iommu_domain, domain);
-}
-
struct sysmmu_list_data {
struct device *sysmmu;
struct list_head node;
struct notifier_block nb;
};
+static struct dentry *exynos_sysmmu_debugfs_root;
+
int exynos_client_add(struct device_node *np, struct exynos_iovmm *vmm_data)
{
struct exynos_client *client = kzalloc(sizeof(*client), GFP_KERNEL);
return 0;
}
+static int iova_from_sent(sysmmu_pte_t *base, sysmmu_pte_t *sent)
+{
+ return ((unsigned long)sent - (unsigned long)base) *
+ (SECT_SIZE / sizeof(sysmmu_pte_t));
+}
+
#define has_sysmmu(dev) ((dev)->archdata.iommu != NULL)
/* For ARM64 only */
__raw_writel(iova, sfrbase + REG_FLUSH_RANGE_START);
__raw_writel(size - 1 + iova, sfrbase + REG_FLUSH_RANGE_END);
writel(0x1, sfrbase + REG_MMU_FLUSH_RANGE);
+ SYSMMU_EVENT_LOG_TLB_INV_RANGE(SYSMMU_DRVDATA_TO_LOG(drvdata),
+ iova, iova + size);
}
-static void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable)
+static void __sysmmu_set_ptbase(struct sysmmu_drvdata *drvdata,
+ phys_addr_t pfn_pgtable)
{
+ void * __iomem sfrbase = drvdata->sfrbase;
+
writel_relaxed(pfn_pgtable, sfrbase + REG_PT_BASE_PPN);
__sysmmu_tlb_invalidate_all(sfrbase);
+ SYSMMU_EVENT_LOG_TLB_INV_ALL(
+ SYSMMU_DRVDATA_TO_LOG(drvdata));
}
void exynos_sysmmu_tlb_invalidate(struct iommu_domain *iommu_domain,
pm_runtime_enable(dev);
+ ret = exynos_iommu_init_event_log(SYSMMU_DRVDATA_TO_LOG(data),
+ SYSMMU_LOG_LEN);
+ if (!ret)
+ sysmmu_add_log_to_debugfs(exynos_sysmmu_debugfs_root,
+ SYSMMU_DRVDATA_TO_LOG(data), dev_name(dev));
+ else
+ return ret;
+
ret = sysmmu_get_hw_info(data);
if (ret) {
dev_err(dev, "Failed to get h/w info\n");
BUG_ON(readl_relaxed(drvdata->sfrbase + REG_MMU_CTRL) != CTRL_BLOCK_DISABLE);
clk_disable(drvdata->clk);
+
+ SYSMMU_EVENT_LOG_DISABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
}
static bool __sysmmu_disable(struct sysmmu_drvdata *drvdata)
__sysmmu_init_config(drvdata);
- __sysmmu_set_ptbase(drvdata->sfrbase, drvdata->pgtable / PAGE_SIZE);
+ __sysmmu_set_ptbase(drvdata, drvdata->pgtable / PAGE_SIZE);
writel(CTRL_ENABLE, drvdata->sfrbase + REG_MMU_CTRL);
+
+ SYSMMU_EVENT_LOG_ENABLE(SYSMMU_DRVDATA_TO_LOG(drvdata));
}
static int __sysmmu_enable(struct sysmmu_drvdata *drvdata, phys_addr_t pgtable)
unsigned long flags;
struct sysmmu_drvdata *drvdata = dev_get_drvdata(sysmmu);
+ SYSMMU_EVENT_LOG_POWEROFF(SYSMMU_DRVDATA_TO_LOG(drvdata));
spin_lock_irqsave(&drvdata->lock, flags);
if (put_sysmmu_runtime_active(drvdata) && is_sysmmu_active(drvdata))
__sysmmu_disable_nocount(drvdata);
unsigned long flags;
struct sysmmu_drvdata *drvdata = dev_get_drvdata(sysmmu);
+ SYSMMU_EVENT_LOG_POWERON(SYSMMU_DRVDATA_TO_LOG(drvdata));
spin_lock_irqsave(&drvdata->lock, flags);
if (get_sysmmu_runtime_active(drvdata) && is_sysmmu_active(drvdata))
__sysmmu_enable_nocount(drvdata);
if (!domain->lv2entcnt)
goto err_counter;
+ if (exynos_iommu_init_event_log(IOMMU_PRIV_TO_LOG(domain), IOMMU_LOG_LEN))
+ goto err_init_event_log;
+
pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
spin_lock_init(&domain->lock);
return &domain->domain;
+err_init_event_log:
+ free_pages((unsigned long)domain->lv2entcnt, 2);
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
err_pgtable:
dev_dbg(master, "%s: Attached IOMMU with pgtable %pa %s\n",
__func__, &pagetable, (ret == 0) ? "" : ", again");
+ SYSMMU_EVENT_LOG_IOMMU_ATTACH(IOMMU_PRIV_TO_LOG(domain), master);
return 0;
}
}
spin_unlock_irqrestore(&domain->lock, flags);
- if (found)
+ if (found) {
dev_dbg(master, "%s: Detached IOMMU with pgtable %pa\n",
__func__, &pagetable);
- else
+ SYSMMU_EVENT_LOG_IOMMU_DETACH(IOMMU_PRIV_TO_LOG(domain), master);
+ } else {
dev_err(master, "%s: No IOMMU is attached\n", __func__);
+ }
}
static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
atomic_set(pgcounter, NUM_LV2ENTRIES);
pgtable_flush(pent, pent + NUM_LV2ENTRIES);
pgtable_flush(sent, sent + 1);
+ SYSMMU_EVENT_LOG_IOMMU_ALLOCSLPD(IOMMU_PRIV_TO_LOG(domain),
+ iova & SECT_MASK);
} else {
/* Pre-allocated entry is not used, so free it. */
kmem_cache_free(lv2table_kmem_cache, pent);
page_entry(sent, 0));
atomic_set(lv2entcnt, 0);
*sent = 0;
+
+ SYSMMU_EVENT_LOG_IOMMU_FREESLPD(
+ IOMMU_PRIV_TO_LOG(domain),
+ iova_from_sent(domain->pgtable, sent));
}
spin_unlock_irqrestore(&domain->pgtablelock, flags);
}
return -ENOMEM;
}
+ exynos_sysmmu_debugfs_root = debugfs_create_dir("sysmmu", NULL);
+ if (!exynos_sysmmu_debugfs_root)
+ pr_err("%s: Failed to create debugfs entry\n", __func__);
+
ret = platform_driver_register(&exynos_sysmmu_driver);
if (ret) {
pr_err("%s: Failed to register driver\n", __func__);
#include <linux/exynos_iovmm.h>
+#include "exynos-iommu-log.h"
+
typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t;
struct list_head clients_list; /* list of exynos_iommu_owner.client */
atomic_t *lv2entcnt; /* free lv2 entry counter for each section */
spinlock_t lock; /* lock for modifying clients_list */
+ struct exynos_iommu_event_log log;
};
/*
struct atomic_notifier_head fault_notifiers;
struct tlb_props tlb_props;
bool is_suspended;
+ struct exynos_iommu_event_log log;
};
struct exynos_vm_region {
unsigned int num_unmap;
const char *domain_name;
struct iommu_group *group;
+ struct exynos_iommu_event_log log;
};
void exynos_sysmmu_tlb_invalidate(struct iommu_domain *domain, dma_addr_t start,
return (sysmmu_pte_t *)(pgtable + lv1ent_offset(iova));
}
+static inline struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
+{
+ return container_of(dom, struct exynos_iommu_domain, domain);
+}
+
#if defined(CONFIG_EXYNOS_IOVMM)
static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
{
region->size >> PAGE_SHIFT);
spin_unlock(&vmm->bitmap_lock);
+ SYSMMU_EVENT_LOG_IOVMM_UNMAP(IOVMM_TO_LOG(vmm),
+ region->start, region->start + region->size);
+
kfree(region);
}
dev_dbg(dev, "IOVMM: Allocated VM region @ %#x/%#x bytes.\n",
(unsigned int)start, (unsigned int)size);
+ SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), start, start + size,
+ region->size - size);
+
return start;
err_map_map:
region = find_iovm_region(vmm, start);
BUG_ON(!region);
+ SYSMMU_EVENT_LOG_IOVMM_MAP(IOVMM_TO_LOG(vmm), start, start + size,
+ region->size - size);
return start;
err_map:
free_iovm_region(vmm, remove_iovm_region(vmm, start));
return 0;
}
-arch_initcall(exynos_iovmm_create_debugfs);
+core_initcall(exynos_iovmm_create_debugfs);
static int iovmm_debug_show(struct seq_file *s, void *unused)
{
goto err_setup_domain;
}
+ ret = exynos_iommu_init_event_log(IOVMM_TO_LOG(vmm), IOVMM_LOG_LEN);
+ if (!ret) {
+ iovmm_add_log_to_debugfs(exynos_iovmm_debugfs_root,
+ IOVMM_TO_LOG(vmm), name);
+ iommu_add_log_to_debugfs(exynos_iommu_debugfs_root,
+ IOMMU_TO_LOG(vmm->domain), name);
+ } else {
+ goto err_init_event_log;
+ }
+
spin_lock_init(&vmm->vmlist_lock);
spin_lock_init(&vmm->bitmap_lock);
name, vmm->iovm_size, vmm->iova_start);
return vmm;
+err_init_event_log:
+ iommu_domain_free(vmm->domain);
err_setup_domain:
kfree(vmm);
err_alloc_vmm: