[COMMON] iommu/exynos: add userptr map/unmap
authorJanghyuck Kim <janghyuck.kim@samsung.com>
Tue, 3 May 2016 11:52:34 +0000 (20:52 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:21:51 +0000 (20:21 +0300)
user virtual address mapping/unmapping should be supported.

Change-Id: Iff0398588a1d295d86ea9285303a558453f0baba
Signed-off-by: Janghyuck Kim <janghyuck.kim@samsung.com>
drivers/iommu/exynos-iommu.c
drivers/iommu/exynos-iommu.h

index b081c6c78b5a16c70c360b8addda49ad27edf7da..580196df0e84fba22e5e9d1759c0cbf023ee7f1e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
 
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
@@ -163,18 +165,6 @@ void exynos_sysmmu_tlb_invalidate(struct iommu_domain *iommu_domain,
        spin_unlock_irqrestore(&domain->lock, flags);
 }
 
-int exynos_iommu_map_userptr(struct iommu_domain *dom, unsigned long addr,
-                             dma_addr_t d_iova, size_t size, int prot)
-{
-       return 0;
-}
-
-void exynos_iommu_unmap_userptr(struct iommu_domain *dom,
-                               dma_addr_t d_iova, size_t size)
-{
-       return;
-}
-
 static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
        "PTW ACCESS FAULT",
        "PAGE FAULT",
@@ -1193,6 +1183,245 @@ core_initcall(exynos_iommu_init);
 
 IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", NULL);
 
+static int mm_fault_translate(int fault)
+{
+       if (fault & VM_FAULT_OOM)
+               return -ENOMEM;
+       else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
+               return -EBUSY;
+       else if (fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
+               return -EFAULT;
+       else if (fault & VM_FAULT_FALLBACK)
+               return -EAGAIN;
+
+       return -EFAULT;
+}
+
+static sysmmu_pte_t *alloc_lv2entry_userptr(struct exynos_iommu_domain *domain,
+                                               sysmmu_iova_t iova)
+{
+       return alloc_lv2entry(domain, section_entry(domain->pgtable, iova),
+                               iova, &domain->lv2entcnt[lv1ent_offset(iova)]);
+}
+
+static int sysmmu_map_pte(struct mm_struct *mm,
+               pmd_t *pmd, unsigned long addr, unsigned long end,
+               struct exynos_iommu_domain *domain, sysmmu_iova_t iova, int prot)
+{
+       pte_t *pte;
+       int ret = 0;
+       spinlock_t *ptl;
+       bool write = !!(prot & IOMMU_WRITE);
+       bool pfnmap = !!(prot & IOMMU_PFNMAP);
+       bool shareable = !!(prot & IOMMU_CACHE);
+       unsigned int fault_flag = write ? FAULT_FLAG_WRITE : 0;
+       sysmmu_pte_t *ent, *ent_beg;
+
+       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+       if (!pte)
+               return -ENOMEM;
+
+       ent = alloc_lv2entry_userptr(domain, iova);
+       if (IS_ERR(ent)) {
+               ret = PTR_ERR(ent);
+               goto err;
+       }
+
+       ent_beg = ent;
+
+       do {
+               if (pte_none(*pte) || !pte_present(*pte) ||
+                                       (write && !pte_write(*pte))) {
+                       int cnt = 0;
+                       int maxcnt = 1;
+
+                       if (pfnmap) {
+                               ret = -EFAULT;
+                               goto err;
+                       }
+
+                       while (cnt++ < maxcnt) {
+                               spin_unlock(ptl);
+                               /* find_vma() always successes */
+                               ret = handle_mm_fault(find_vma(mm, addr),
+                                               addr, fault_flag);
+                               spin_lock(ptl);
+                               if (ret & VM_FAULT_ERROR) {
+                                       ret = mm_fault_translate(ret);
+                                       goto err;
+                               } else {
+                                       ret = 0;
+                               }
+                               /*
+                                * the racing between handle_mm_fault() and the
+                                * page reclamation may cause handle_mm_fault()
+                                * to return 0 even though it failed to page in.
+                                * This behavior expect the process to access
+                                * the paged out entry again then give
+                                * handle_mm_fault() a chance again to page in
+                                * the entry.
+                                */
+                               if (is_swap_pte(*pte)) {
+                                       BUG_ON(maxcnt > 8);
+                                       maxcnt++;
+                               }
+                       }
+               }
+
+               BUG_ON(!lv2ent_fault(ent));
+
+               *ent = mk_lv2ent_spage(pte_pfn(*pte) << PAGE_SHIFT);
+
+               if (!pfnmap)
+                       get_page(pte_page(*pte));
+               else
+                       mk_lv2ent_pfnmap(ent);
+
+               if (shareable)
+                       set_lv2ent_shareable(ent);
+
+               ent++;
+               iova += PAGE_SIZE;
+
+               if ((iova & SECT_MASK) != ((iova - 1) & SECT_MASK)) {
+                       pgtable_flush(ent_beg, ent);
+
+                       ent = alloc_lv2entry_userptr(domain, iova);
+                       if (IS_ERR(ent)) {
+                               ret = PTR_ERR(ent);
+                               goto err;
+                       }
+                       ent_beg = ent;
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       pgtable_flush(ent_beg, ent);
+err:
+       pte_unmap_unlock(pte - 1, ptl);
+       return ret;
+}
+
+static inline int sysmmu_map_pmd(struct mm_struct *mm,
+               pud_t *pud, unsigned long addr, unsigned long end,
+               struct exynos_iommu_domain *domain, sysmmu_iova_t iova, int prot)
+{
+       pmd_t *pmd;
+       unsigned long next;
+
+       pmd = pmd_alloc(mm, pud, addr);
+       if (!pmd)
+               return -ENOMEM;
+
+       do {
+               next = pmd_addr_end(addr, end);
+               if (sysmmu_map_pte(mm, pmd, addr, next, domain, iova, prot))
+                       return -ENOMEM;
+               iova += (next - addr);
+       } while (pmd++, addr = next, addr != end);
+       return 0;
+}
+static inline int sysmmu_map_pud(struct mm_struct *mm,
+               pgd_t *pgd, unsigned long addr, unsigned long end,
+               struct exynos_iommu_domain *domain, sysmmu_iova_t iova, int prot)
+{
+       pud_t *pud;
+       unsigned long next;
+
+       pud = pud_alloc(mm, pgd, addr);
+       if (!pud)
+               return -ENOMEM;
+       do {
+               next = pud_addr_end(addr, end);
+               if (sysmmu_map_pmd(mm, pud, addr, next, domain, iova, prot))
+                       return -ENOMEM;
+               iova += (next - addr);
+       } while (pud++, addr = next, addr != end);
+       return 0;
+}
+int exynos_iommu_map_userptr(struct iommu_domain *dom, unsigned long addr,
+                             dma_addr_t d_iova, size_t size, int prot)
+{
+       struct exynos_iommu_domain *domain = to_exynos_domain(dom);
+       struct mm_struct *mm = current->mm;
+       unsigned long end = addr + size;
+       dma_addr_t start = d_iova;
+       sysmmu_iova_t iova = (sysmmu_iova_t)d_iova;
+       unsigned long next;
+       pgd_t *pgd;
+       int ret;
+
+       BUG_ON(!!((iova | addr | size) & ~PAGE_MASK));
+
+       pgd = pgd_offset(mm, addr);
+
+       do {
+               next = pgd_addr_end(addr, end);
+               ret = sysmmu_map_pud(mm, pgd, addr, next, domain, iova, prot);
+               if (ret)
+                       goto err;
+               iova += (next - addr);
+       } while (pgd++, addr = next, addr != end);
+
+       return 0;
+err:
+       /* unroll */
+       exynos_iommu_unmap_userptr(dom, start, size);
+       return ret;
+}
+
+#define sect_offset(iova)      ((iova) & ~SECT_MASK)
+#define lv2ents_within(iova)   ((SECT_SIZE - sect_offset(iova)) >> SPAGE_ORDER)
+
+void exynos_iommu_unmap_userptr(struct iommu_domain *dom,
+                               dma_addr_t d_iova, size_t size)
+{
+       struct exynos_iommu_domain *domain = to_exynos_domain(dom);
+       sysmmu_iova_t iova = (sysmmu_iova_t)d_iova;
+       sysmmu_pte_t *sent = section_entry(domain->pgtable, iova);
+       unsigned int entries = size >> SPAGE_ORDER;
+       dma_addr_t start = d_iova;
+
+       while (entries > 0) {
+               unsigned int lv2ents, i;
+               sysmmu_pte_t *pent;
+
+               /* ignore fault entries */
+               if (lv1ent_fault(sent)) {
+                       lv2ents = min_t(unsigned int, entries, NUM_LV1ENTRIES);
+                       entries -= lv2ents;
+                       iova += lv2ents << SPAGE_ORDER;
+                       sent++;
+                       continue;
+               }
+
+               BUG_ON(!lv1ent_page(sent));
+
+               lv2ents = min_t(unsigned int, lv2ents_within(iova), entries);
+
+               pent = page_entry(sent, iova);
+               for (i = 0; i < lv2ents; i++, pent++) {
+                       /* ignore fault entries */
+                       if (lv2ent_fault(pent))
+                               continue;
+
+                       BUG_ON(!lv2ent_small(pent));
+
+                       if (!lv2ent_pfnmap(pent))
+                               put_page(phys_to_page(spage_phys(pent)));
+
+                       *pent = 0;
+               }
+
+               pgtable_flush(pent - lv2ents, pent);
+
+               entries -= lv2ents;
+               iova += lv2ents << SPAGE_ORDER;
+               sent++;
+       }
+
+       exynos_sysmmu_tlb_invalidate(dom, start, size);
+}
+
 typedef void (*syncop)(const void *, size_t, int);
 
 static size_t sysmmu_dma_sync_page(phys_addr_t phys, off_t off,
index 62f0899dbe9f157ab86fa0516cf7beaeffef0ff1..03b8afba201fcf6eadf907adbdc7a3ce1c48cb63 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/irq.h>
 #include <linux/clk.h>
 
+#include <linux/exynos_iovmm.h>
+
 typedef u32 sysmmu_iova_t;
 typedef u32 sysmmu_pte_t;
 
@@ -86,6 +88,10 @@ typedef u32 sysmmu_pte_t;
 #define mk_lv1ent_page(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
 #define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
 #define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
+#define set_lv1ent_shareable(sent) (*(sent) |= (1 << 6))
+#define set_lv2ent_shareable(sent) (*(sent) |= (1 << 4))
+#define mk_lv2ent_pfnmap(pent) (*(pent) |= (1 << 5)) /* unused field */
+#define lv2ent_pfnmap(pent) ((*(pent) & (1 << 5)) == (1 << 5))
 
 #define SYSMMU_BLOCK_POLLING_COUNT 4096