#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/interrupt.h>
core_initcall(exynos_iommu_init);
IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
+
+typedef void (*syncop)(const void *, size_t, int);
+
+static size_t sysmmu_dma_sync_page(phys_addr_t phys, off_t off,
+ size_t pgsize, size_t size,
+ syncop op, enum dma_data_direction dir)
+{
+ size_t len;
+ size_t skip_pages = off >> PAGE_SHIFT;
+ struct page *page;
+
+ off = off & ~PAGE_MASK;
+ page = phys_to_page(phys) + skip_pages;
+ len = min(pgsize - off, size);
+ size = len;
+
+ while (len > 0) {
+ size_t sz;
+
+ sz = min(PAGE_SIZE, len + off) - off;
+ op(kmap(page) + off, sz, dir);
+ kunmap(page++);
+ len -= sz;
+ off = 0;
+ }
+
+ return size;
+}
+
+static void exynos_iommu_sync(sysmmu_pte_t *pgtable, dma_addr_t iova,
+ size_t len, syncop op, enum dma_data_direction dir)
+{
+ while (len > 0) {
+ sysmmu_pte_t *entry;
+ size_t done;
+
+ entry = section_entry(pgtable, iova);
+ switch (*entry & FLPD_FLAG_MASK) {
+ case SECT_FLAG:
+ done = sysmmu_dma_sync_page(section_phys(entry),
+ section_offs(iova), SECT_SIZE,
+ len, op, dir);
+ break;
+ case SLPD_FLAG:
+ entry = page_entry(entry, iova);
+ switch (*entry & SLPD_FLAG_MASK) {
+ case LPAGE_FLAG:
+ done = sysmmu_dma_sync_page(lpage_phys(entry),
+ lpage_offs(iova), LPAGE_SIZE,
+ len, op, dir);
+ break;
+ case SPAGE_FLAG:
+ done = sysmmu_dma_sync_page(spage_phys(entry),
+ spage_offs(iova), SPAGE_SIZE,
+ len, op, dir);
+ break;
+ default: /* fault */
+ return;
+ }
+ break;
+ default: /* fault */
+ return;
+ }
+
+ iova += done;
+ len -= done;
+ }
+}
+
+static sysmmu_pte_t *sysmmu_get_pgtable(struct device *dev)
+{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+ struct exynos_iommu_domain *domain = to_exynos_domain(owner->domain);
+
+ return domain->pgtable;
+}
+
+void exynos_iommu_sync_for_device(struct device *dev, dma_addr_t iova,
+ size_t len, enum dma_data_direction dir)
+{
+ exynos_iommu_sync(sysmmu_get_pgtable(dev),
+ iova, len, __dma_map_area, dir);
+}
+
+void exynos_iommu_sync_for_cpu(struct device *dev, dma_addr_t iova, size_t len,
+ enum dma_data_direction dir)
+{
+ if (dir == DMA_TO_DEVICE)
+ return;
+
+ exynos_iommu_sync(sysmmu_get_pgtable(dev),
+ iova, len, __dma_unmap_area, dir);
+}
#define LPAGE_MASK (~(LPAGE_SIZE - 1))
#define SPAGE_MASK (~(SPAGE_SIZE - 1))
+#define SECT_ENT_MASK ~((SECT_SIZE >> PG_ENT_SHIFT) - 1)
+#define LPAGE_ENT_MASK ~((LPAGE_SIZE >> PG_ENT_SHIFT) - 1)
+#define SPAGE_ENT_MASK ~((SPAGE_SIZE >> PG_ENT_SHIFT) - 1)
+
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
#define PGBASE_TO_PHYS(pgent) ((phys_addr_t)(pgent) << PG_ENT_SHIFT)
#define LPAGE_FLAG 1
#define SPAGE_FLAG 2
+#define ENT_TO_PHYS(ent) (phys_addr_t)(*(ent))
+#define section_phys(sent) PGBASE_TO_PHYS(ENT_TO_PHYS(sent) & SECT_ENT_MASK)
+#define section_offs(iova) ((iova) & (SECT_SIZE - 1))
+#define lpage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & LPAGE_ENT_MASK)
+#define lpage_offs(iova) ((iova) & (LPAGE_SIZE - 1))
+#define spage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & SPAGE_ENT_MASK)
+#define spage_offs(iova) ((iova) & (SPAGE_SIZE - 1))
+
#define lv1ent_section(sent) ((*(sent) & FLPD_FLAG_MASK) == SECT_FLAG)
#define lv2table_base(sent) ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
#define lv2ent_fault(pent) ((*(pent) & SLPD_FLAG_MASK) == 0)