[COMMON] iommu/exynos: add cache sync operation
authorJanghyuck Kim <janghyuck.kim@samsung.com>
Thu, 28 Apr 2016 06:49:40 +0000 (15:49 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:18 +0000 (19:45 +0900)
Cache sync operation for device and cpu is added.

Change-Id: I6167a51997d50978e1bc22bbe2c1c33a18cf0666
Signed-off-by: Janghyuck Kim <janghyuck.kim@samsung.com>
drivers/iommu/exynos-iommu.c
drivers/iommu/exynos-iommu.h

index a751cf605a2febd63c393aee93fa28f249e2ee14..009e70896d1b9c97467e781143ea6c79bf6d538d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
+#include <linux/highmem.h>
 #include <linux/io.h>
 #include <linux/iommu.h>
 #include <linux/interrupt.h>
@@ -1052,3 +1053,96 @@ err_reg_driver:
 core_initcall(exynos_iommu_init);
 
 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
+
+typedef void (*syncop)(const void *, size_t, int);
+
+static size_t sysmmu_dma_sync_page(phys_addr_t phys, off_t off,
+                                 size_t pgsize, size_t size,
+                                 syncop op, enum dma_data_direction dir)
+{
+       size_t len;
+       size_t skip_pages = off >> PAGE_SHIFT;
+       struct page *page;
+
+       off = off & ~PAGE_MASK;
+       page = phys_to_page(phys) + skip_pages;
+       len = min(pgsize - off, size);
+       size = len;
+
+       while (len > 0) {
+               size_t sz;
+
+               sz = min(PAGE_SIZE, len + off) - off;
+               op(kmap(page) + off, sz, dir);
+               kunmap(page++);
+               len -= sz;
+               off = 0;
+       }
+
+       return size;
+}
+
+static void exynos_iommu_sync(sysmmu_pte_t *pgtable, dma_addr_t iova,
+                       size_t len, syncop op, enum dma_data_direction dir)
+{
+       while (len > 0) {
+               sysmmu_pte_t *entry;
+               size_t done;
+
+               entry = section_entry(pgtable, iova);
+               switch (*entry & FLPD_FLAG_MASK) {
+               case SECT_FLAG:
+                       done = sysmmu_dma_sync_page(section_phys(entry),
+                                       section_offs(iova), SECT_SIZE,
+                                       len, op, dir);
+                       break;
+               case SLPD_FLAG:
+                       entry = page_entry(entry, iova);
+                       switch (*entry & SLPD_FLAG_MASK) {
+                       case LPAGE_FLAG:
+                               done = sysmmu_dma_sync_page(lpage_phys(entry),
+                                               lpage_offs(iova), LPAGE_SIZE,
+                                               len, op, dir);
+                               break;
+                       case SPAGE_FLAG:
+                               done = sysmmu_dma_sync_page(spage_phys(entry),
+                                               spage_offs(iova), SPAGE_SIZE,
+                                               len, op, dir);
+                               break;
+                       default: /* fault */
+                               return;
+                       }
+                       break;
+               default: /* fault */
+                       return;
+               }
+
+               iova += done;
+               len -= done;
+       }
+}
+
+static sysmmu_pte_t *sysmmu_get_pgtable(struct device *dev)
+{
+       struct exynos_iommu_owner *owner = dev->archdata.iommu;
+       struct exynos_iommu_domain *domain = to_exynos_domain(owner->domain);
+
+       return domain->pgtable;
+}
+
+void exynos_iommu_sync_for_device(struct device *dev, dma_addr_t iova,
+                                 size_t len, enum dma_data_direction dir)
+{
+       exynos_iommu_sync(sysmmu_get_pgtable(dev),
+                       iova, len, __dma_map_area, dir);
+}
+
+void exynos_iommu_sync_for_cpu(struct device *dev, dma_addr_t iova, size_t len,
+                               enum dma_data_direction dir)
+{
+       if (dir == DMA_TO_DEVICE)
+               return;
+
+       exynos_iommu_sync(sysmmu_get_pgtable(dev),
+                       iova, len, __dma_unmap_area, dir);
+}
index 37bd63c2f00e811c2a756f3b706f69f9e1ce10b1..0c44ff87b01032585a7e83229e9b9d3c7cf49856 100644 (file)
@@ -40,6 +40,10 @@ typedef u32 sysmmu_pte_t;
 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
 
+#define SECT_ENT_MASK  ~((SECT_SIZE >> PG_ENT_SHIFT) - 1)
+#define LPAGE_ENT_MASK ~((LPAGE_SIZE >> PG_ENT_SHIFT) - 1)
+#define SPAGE_ENT_MASK ~((SPAGE_SIZE >> PG_ENT_SHIFT) - 1)
+
 #define SPAGES_PER_LPAGE       (LPAGE_SIZE / SPAGE_SIZE)
 
 #define PGBASE_TO_PHYS(pgent)  ((phys_addr_t)(pgent) << PG_ENT_SHIFT)
@@ -64,6 +68,14 @@ typedef u32 sysmmu_pte_t;
 #define LPAGE_FLAG     1
 #define SPAGE_FLAG     2
 
+#define ENT_TO_PHYS(ent) (phys_addr_t)(*(ent))
+#define section_phys(sent) PGBASE_TO_PHYS(ENT_TO_PHYS(sent) & SECT_ENT_MASK)
+#define section_offs(iova) ((iova) & (SECT_SIZE - 1))
+#define lpage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & LPAGE_ENT_MASK)
+#define lpage_offs(iova) ((iova) & (LPAGE_SIZE - 1))
+#define spage_phys(pent) PGBASE_TO_PHYS(ENT_TO_PHYS(pent) & SPAGE_ENT_MASK)
+#define spage_offs(iova) ((iova) & (SPAGE_SIZE - 1))
+
 #define lv1ent_section(sent) ((*(sent) & FLPD_FLAG_MASK) == SECT_FLAG)
 #define lv2table_base(sent)    ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
 #define lv2ent_fault(pent) ((*(pent) & SLPD_FLAG_MASK) == 0)