__dma_flush_area(vastart, vaend - vastart);
}
+static bool has_sysmmu_capable_pbuf(void __iomem *sfrbase)
+{
+ unsigned long cfg = readl_relaxed(sfrbase + REG_MMU_CAPA);
+
+ return MMU_HAVE_PB(cfg);
+}
+
+static bool has_sysmmu_set_associative_tlb(void __iomem *sfrbase)
+{
+ u32 cfg = readl_relaxed(sfrbase + REG_MMU_CAPA_1);
+
+ return MMU_IS_TLB_CONFIGURABLE(cfg);
+}
+
+static void __sysmmu_set_tlb_line_size(void __iomem *sfrbase)
+{
+ u32 cfg = readl_relaxed(sfrbase + REG_L2TLB_CFG);
+ cfg &= ~MMU_MASK_LINE_SIZE;
+ cfg |= MMU_DEFAULT_LINE_SIZE;
+ writel_relaxed(cfg, sfrbase + REG_L2TLB_CFG);
+}
+
+static void __exynos_sysmmu_set_prefbuf_axi_id(struct sysmmu_drvdata *drvdata)
+{
+ if (!has_sysmmu_capable_pbuf(drvdata->sfrbase))
+ return;
+
+ /* TODO: implement later */
+}
+
+static void __sysmmu_tlb_invalidate_all(void __iomem *sfrbase)
+{
+ writel(0x1, sfrbase + REG_MMU_FLUSH);
+}
+
+static void __sysmmu_set_ptbase(void __iomem *sfrbase, phys_addr_t pfn_pgtable)
+{
+ writel_relaxed(pfn_pgtable, sfrbase + REG_PT_BASE_PPN);
+
+ __sysmmu_tlb_invalidate_all(sfrbase);
+}
+
void exynos_sysmmu_tlb_invalidate(struct iommu_domain *iommu_domain,
dma_addr_t d_start, size_t size)
{
return 0;
}
+static void __sysmmu_disable_nocount(struct sysmmu_drvdata *drvdata)
+{
+ writel_relaxed(0, drvdata->sfrbase + REG_MMU_CFG);
+ writel_relaxed(CTRL_BLOCK_DISABLE, drvdata->sfrbase + REG_MMU_CTRL);
+ BUG_ON(readl_relaxed(drvdata->sfrbase + REG_MMU_CTRL) != CTRL_BLOCK_DISABLE);
+
+ clk_disable(drvdata->clk);
+}
+
static bool __sysmmu_disable(struct sysmmu_drvdata *drvdata)
{
- /* DUMMY */
- return true;
+ bool disabled;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->lock, flags);
+
+ disabled = set_sysmmu_inactive(drvdata);
+
+ if (disabled) {
+ drvdata->pgtable = 0;
+
+ if (is_sysmmu_runtime_active(drvdata))
+ __sysmmu_disable_nocount(drvdata);
+
+ dev_dbg(drvdata->sysmmu, "Disabled\n");
+ } else {
+ dev_dbg(drvdata->sysmmu, "%d times left to disable\n",
+ drvdata->activations);
+ }
+
+ spin_unlock_irqrestore(&drvdata->lock, flags);
+
+ return disabled;
}
static void sysmmu_disable_from_master(struct device *master)
spin_unlock_irqrestore(&owner->lock, flags);
}
+static void __sysmmu_init_config(struct sysmmu_drvdata *drvdata)
+{
+ unsigned long cfg;
+
+ writel_relaxed(CTRL_BLOCK, drvdata->sfrbase + REG_MMU_CTRL);
+
+ if (MMU_MAJ_VER(drvdata->version) >= 7) {
+ /* TODO: implement later */
+ } else {
+ __exynos_sysmmu_set_prefbuf_axi_id(drvdata);
+ if (has_sysmmu_set_associative_tlb(drvdata->sfrbase))
+ __sysmmu_set_tlb_line_size(drvdata->sfrbase);
+ }
+
+ cfg = CFG_FLPDCACHE | CFG_ACGEN;
+ cfg |= __raw_readl(drvdata->sfrbase + REG_MMU_CFG) & ~CFG_MASK;
+ writel_relaxed(cfg, drvdata->sfrbase + REG_MMU_CFG);
+}
+
+static void __sysmmu_enable_nocount(struct sysmmu_drvdata *drvdata)
+{
+ clk_enable(drvdata->clk);
+
+ __sysmmu_init_config(drvdata);
+
+ __sysmmu_set_ptbase(drvdata->sfrbase, drvdata->pgtable / PAGE_SIZE);
+
+ writel(CTRL_ENABLE, drvdata->sfrbase + REG_MMU_CTRL);
+}
+
static int __sysmmu_enable(struct sysmmu_drvdata *drvdata, phys_addr_t pgtable)
{
- /* DUMMY */
- return 0;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drvdata->lock, flags);
+ if (set_sysmmu_active(drvdata)) {
+ drvdata->pgtable = pgtable;
+
+ if (is_sysmmu_runtime_active(drvdata))
+ __sysmmu_enable_nocount(drvdata);
+
+ dev_dbg(drvdata->sysmmu, "Enabled\n");
+ } else {
+ ret = (pgtable == drvdata->pgtable) ? 1 : -EBUSY;
+
+ dev_dbg(drvdata->sysmmu, "already enabled\n");
+ }
+
+ if (WARN_ON(ret < 0))
+ set_sysmmu_inactive(drvdata); /* decrement count */
+
+ spin_unlock_irqrestore(&drvdata->lock, flags);
+
+ return ret;
}
static int sysmmu_enable_from_master(struct device *master,
#define REG_MMU_STATUS 0x008
#define REG_MMU_VERSION 0x034
+#define CTRL_ENABLE 0x5
+#define CTRL_BLOCK 0x7
+#define CTRL_DISABLE 0x0
+#define CTRL_BLOCK_DISABLE 0x3
+
+#define CFG_MASK 0x01101FBC /* Selecting bit 24, 20, 12-7, 5-2 */
+#define CFG_ACGEN (1 << 24)
+#define CFG_FLPDCACHE (1 << 20)
+
+#define REG_PT_BASE_PPN 0x00C
+#define REG_MMU_FLUSH 0x010
+#define REG_MMU_FLUSH_ENTRY 0x014
+#define REG_MMU_FLUSH_RANGE 0x018
+#define REG_FLUSH_RANGE_START 0x020
+#define REG_FLUSH_RANGE_END 0x024
+#define REG_MMU_CAPA 0x030
+#define REG_MMU_CAPA_1 0x038
+#define REG_INT_STATUS 0x060
+#define REG_INT_CLEAR 0x064
+
+#define REG_L2TLB_CFG 0x200
+
+#define MMU_HAVE_PB(reg) (!!((reg >> 20) & 0xF))
+#define MMU_IS_TLB_CONFIGURABLE(reg) (!!((reg >> 16) & 0xFF))
+
+#define MMU_MASK_LINE_SIZE 0x7
+#define MMU_DEFAULT_LINE_SIZE (0x2 << 4)
+
#define MMU_MAJ_VER(val) ((val) >> 11)
#define MMU_MIN_VER(val) ((val >> 4) & 0x7F)
#define MMU_REV_VER(val) ((val) & 0xF)
void exynos_iommu_unmap_userptr(struct iommu_domain *dom,
dma_addr_t iova, size_t size);
+static inline bool get_sysmmu_runtime_active(struct sysmmu_drvdata *data)
+{
+ return ++data->runtime_active == 1;
+}
+
+static inline bool put_sysmmu_runtime_active(struct sysmmu_drvdata *data)
+{
+ BUG_ON(data->runtime_active < 1);
+ return --data->runtime_active == 0;
+}
+
+static inline bool is_sysmmu_runtime_active(struct sysmmu_drvdata *data)
+{
+ return data->runtime_active > 0;
+}
+
+static inline bool set_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU was not active previously
+ and it needs to be initialized */
+ return ++data->activations == 1;
+}
+
+static inline bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
+{
+ /* return true if the System MMU is needed to be disabled */
+ BUG_ON(data->activations < 1);
+ return --data->activations == 0;
+}
+
+static inline bool is_sysmmu_active(struct sysmmu_drvdata *data)
+{
+ return data->activations > 0;
+}
+
#if defined(CONFIG_EXYNOS_IOVMM)
static inline struct exynos_iovmm *exynos_get_iovmm(struct device *dev)
{