writel(0x1, sfrbase + REG_MMU_FLUSH);
}
+static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *drvdata,
+ dma_addr_t iova, size_t size)
+{
+ void * __iomem sfrbase = drvdata->sfrbase;
+
+ __raw_writel(iova, sfrbase + REG_FLUSH_RANGE_START);
+ __raw_writel(size - 1 + iova, sfrbase + REG_FLUSH_RANGE_END);
+ writel(0x1, sfrbase + REG_MMU_FLUSH_RANGE);
+}
+
static void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable)
{
writel_relaxed(pfn_pgtable, sfrbase + REG_PT_BASE_PPN);
void exynos_sysmmu_tlb_invalidate(struct iommu_domain *iommu_domain,
dma_addr_t d_start, size_t size)
{
- return;
+ struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
+ struct exynos_iommu_owner *owner;
+ struct sysmmu_list_data *list;
+ sysmmu_iova_t start = (sysmmu_iova_t)d_start;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ list_for_each_entry(owner, &domain->clients_list, client) {
+ list_for_each_entry(list, &owner->sysmmu_list, node) {
+ struct sysmmu_drvdata *drvdata = dev_get_drvdata(list->sysmmu);
+
+ spin_lock(&drvdata->lock);
+ if (!is_sysmmu_active(drvdata) ||
+ !is_sysmmu_runtime_active(drvdata)) {
+ spin_unlock(&drvdata->lock);
+ dev_dbg(drvdata->sysmmu,
+ "Skip TLB invalidation %#zx@%#x\n",
+ size, start);
+ continue;
+ }
+
+ dev_dbg(drvdata->sysmmu,
+ "TLB invalidation %#zx@%#x\n", size, start);
+
+ __sysmmu_tlb_invalidate(drvdata, start, size);
+
+ spin_unlock(&drvdata->lock);
+ }
+ }
+ spin_unlock_irqrestore(&domain->lock, flags);
}
int exynos_iommu_map_userptr(struct iommu_domain *dom, unsigned long addr,
#define mk_lv2ent_lpage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 1)
#define mk_lv2ent_spage(pa) ((sysmmu_pte_t) ((pa) >> PG_ENT_SHIFT) | 2)
+#define SYSMMU_BLOCK_POLLING_COUNT 4096
+
#define REG_MMU_CTRL 0x000
#define REG_MMU_CFG 0x004
#define REG_MMU_STATUS 0x008
return data->activations > 0;
}
+static inline void __raw_sysmmu_enable(void __iomem *sfrbase)
+{
+ __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
+}
+
+#define sysmmu_unblock __raw_sysmmu_enable
+
+void dump_sysmmu_tlb_pb(void __iomem *sfrbase);
+
+static inline bool sysmmu_block(void __iomem *sfrbase)
+{
+ int i = SYSMMU_BLOCK_POLLING_COUNT;
+
+ __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
+ while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
+ --i;
+
+ if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
+ /*
+ * TODO: dump_sysmmu_tlb_pb(sfrbase);
+ */
+ panic("Failed to block System MMU!");
+ sysmmu_unblock(sfrbase);
+ return false;
+ }
+
+ return true;
+}
+
static inline sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
{
return (sysmmu_pte_t *)(phys_to_virt(lv2table_base(sent))) +