[COMMON] iommu/exynos: add shareablity setting in iommu_map
authorJanghyuck Kim <janghyuck.kim@samsung.com>
Thu, 11 Aug 2016 04:23:33 +0000 (13:23 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:19 +0000 (19:45 +0900)
Current exynos iommu driver supports shareability setting by userptr
case only, and prot was ignored in case of iommu_map().
This patch enables shareability setting for iommu_map().

Change-Id: I5431a029746b5d9b89ceb8d4b0a6b2190e8d43e6
Signed-off-by: Janghyuck Kim <janghyuck.kim@samsung.com>
drivers/iommu/exynos-iommu.c

index 87ee91d4bb319831cd2ab44c09816fb0fb5b7513..6816d48c830fbde213bf555d2b16f1d0fe3fc1a4 100644 (file)
@@ -1244,8 +1244,10 @@ static void clear_lv2_page_table(sysmmu_pte_t *ent, int n)
 
 static int lv1set_section(struct exynos_iommu_domain *domain,
                          sysmmu_pte_t *sent, sysmmu_iova_t iova,
-                         phys_addr_t paddr, atomic_t *pgcnt)
+                         phys_addr_t paddr, int prot, atomic_t *pgcnt)
 {
+       bool shareable = !!(prot & IOMMU_CACHE);
+
        if (lv1ent_section(sent)) {
                WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
                        iova);
@@ -1262,19 +1264,25 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
        }
 
        *sent = mk_lv1ent_sect(paddr);
+       if (shareable)
+               set_lv1ent_shareable(sent);
        pgtable_flush(sent, sent + 1);
 
        return 0;
 }
 
 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
-                                                               atomic_t *pgcnt)
+                                               int prot, atomic_t *pgcnt)
 {
+       bool shareable = !!(prot & IOMMU_CACHE);
+
        if (size == SPAGE_SIZE) {
                if (WARN_ON(!lv2ent_fault(pent)))
                        return -EADDRINUSE;
 
                *pent = mk_lv2ent_spage(paddr);
+               if (shareable)
+                       set_lv2ent_shareable(pent);
                pgtable_flush(pent, pent + 1);
                atomic_dec(pgcnt);
        } else { /* size == LPAGE_SIZE */
@@ -1287,6 +1295,8 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
                        }
 
                        *pent = mk_lv2ent_lpage(paddr);
+                       if (shareable)
+                               set_lv2ent_shareable(pent);
                }
                pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
                atomic_sub(SPAGES_PER_LPAGE, pgcnt);
@@ -1308,7 +1318,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
        entry = section_entry(domain->pgtable, iova);
 
        if (size == SECT_SIZE) {
-               ret = lv1set_section(domain, entry, iova, paddr,
+               ret = lv1set_section(domain, entry, iova, paddr, prot,
                                     &domain->lv2entcnt[lv1ent_offset(iova)]);
        } else {
                sysmmu_pte_t *pent;
@@ -1319,7 +1329,7 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
                if (IS_ERR(pent))
                        ret = PTR_ERR(pent);
                else
-                       ret = lv2set_page(pent, paddr, size,
+                       ret = lv2set_page(pent, paddr, size, prot,
                                       &domain->lv2entcnt[lv1ent_offset(iova)]);
        }