[COMMON] iommu/exynos: add domain alloc/free
authorJanghyuck Kim <janghyuck.kim@samsung.com>
Sat, 23 Apr 2016 07:00:10 +0000 (16:00 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:18 +0000 (19:45 +0900)
iommu domain represents virtual address spaces for client device.
domain_alloc creates page table for each domain and initialize domain
related data structures.
domain_free cleans up attached client list and releases domain related
information.

Change-Id: I61d6621c5598be459e963ecdb58711fc76d083ed
Signed-off-by: Janghyuck Kim <janghyuck.kim@samsung.com>
drivers/iommu/exynos-iommu.c
drivers/iommu/exynos-iommu.h

index aa368c1a1d72961eed789ddbe7213cb0d448fd59..b970b114a132f39657b4d707a110a575b9839a6e 100644 (file)
@@ -32,6 +32,11 @@ static struct kmem_cache *lv2table_kmem_cache;
 static struct sysmmu_drvdata *sysmmu_drvdata_list;
 static struct exynos_iommu_owner *sysmmu_owner_list;
 
+static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
+{
+       return container_of(dom, struct exynos_iommu_domain, domain);
+}
+
 struct sysmmu_list_data {
        struct device *sysmmu;
        struct list_head node;
@@ -62,6 +67,14 @@ int exynos_client_add(struct device_node *np, struct exynos_iovmm *vmm_data)
        return 0;
 }
 
+#define has_sysmmu(dev)                ((dev)->archdata.iommu != NULL)
+
+/* For ARM64 only */
+static inline void pgtable_flush(void *vastart, void *vaend)
+{
+       __dma_flush_area(vastart, vaend - vastart);
+}
+
 void exynos_sysmmu_tlb_invalidate(struct iommu_domain *iommu_domain,
                                        dma_addr_t d_start, size_t size)
 {
@@ -181,6 +194,29 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
        return 0;
 }
 
+static bool __sysmmu_disable(struct sysmmu_drvdata *drvdata)
+{
+       /* DUMMY */
+       return true;
+}
+
+static void sysmmu_disable_from_master(struct device *master)
+{
+       unsigned long flags;
+       struct exynos_iommu_owner *owner = master->archdata.iommu;
+       struct sysmmu_list_data *list;
+       struct sysmmu_drvdata *drvdata;
+
+       BUG_ON(!has_sysmmu(master));
+
+       spin_lock_irqsave(&owner->lock, flags);
+       list_for_each_entry(list, &owner->sysmmu_list, node) {
+               drvdata = dev_get_drvdata(list->sysmmu);
+               __sysmmu_disable(drvdata);
+       }
+       spin_unlock_irqrestore(&owner->lock, flags);
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int exynos_sysmmu_suspend(struct device *dev)
 {
@@ -213,11 +249,70 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
 
 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 {
+       struct exynos_iommu_domain *domain;
+
+       if (type != IOMMU_DOMAIN_UNMANAGED)
+               return NULL;
+
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
+       domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
+       if (!domain->pgtable)
+               goto err_pgtable;
+
+       domain->lv2entcnt = (atomic_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
+       if (!domain->lv2entcnt)
+               goto err_counter;
+
+       pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES);
+
+       spin_lock_init(&domain->lock);
+       spin_lock_init(&domain->pgtablelock);
+       INIT_LIST_HEAD(&domain->clients_list);
+
+       /* TODO: get geometry from device tree */
+       domain->domain.geometry.aperture_start = 0;
+       domain->domain.geometry.aperture_end   = ~0UL;
+       domain->domain.geometry.force_aperture = true;
+
+       return &domain->domain;
+
+err_counter:
+       free_pages((unsigned long)domain->pgtable, 2);
+err_pgtable:
+       kfree(domain);
        return NULL;
 }
 
 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 {
+       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+       struct exynos_iommu_owner *owner;
+       unsigned long flags;
+       int i;
+
+       WARN_ON(!list_empty(&domain->clients_list));
+
+       spin_lock_irqsave(&domain->lock, flags);
+
+       list_for_each_entry(owner, &domain->clients_list, client)
+               sysmmu_disable_from_master(owner->master);
+
+       while (!list_empty(&domain->clients_list))
+               list_del_init(domain->clients_list.next);
+
+       spin_unlock_irqrestore(&domain->lock, flags);
+
+       for (i = 0; i < NUM_LV1ENTRIES; i++)
+               if (lv1ent_page(domain->pgtable + i))
+                       kmem_cache_free(lv2table_kmem_cache,
+                               phys_to_virt(lv2table_base(domain->pgtable + i)));
+
+       free_pages((unsigned long)domain->pgtable, 2);
+       free_pages((unsigned long)domain->lv2entcnt, 2);
+       kfree(domain);
 }
 
 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
index 81b305338db532f0a0e859af9a43c69a1ac530b2..00cfadf70c255bb210b1b6a9e04a94176a299817 100644 (file)
@@ -39,9 +39,16 @@ typedef u32 sysmmu_pte_t;
 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
 
+#define NUM_LV1ENTRIES 4096
 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
 
+#define PG_ENT_SHIFT   4
+#define lv1ent_fault(sent)     ((*(sent) & 7) == 0)
+#define lv1ent_page(sent)      ((*(sent) & 7) == 1)
+
+#define lv2table_base(sent)    ((phys_addr_t)(*(sent) & ~0x3F) << PG_ENT_SHIFT)
+
 #define REG_MMU_CTRL           0x000
 #define REG_MMU_CFG            0x004
 #define REG_MMU_STATUS         0x008