From 61128f08fc110fc27cd50f5a8ca57c6a8c3bcddc Mon Sep 17 00:00:00 2001 From: Cho KyongHo Date: Mon, 12 May 2014 11:44:47 +0530 Subject: [PATCH] iommu/exynos: Change error handling when page table update is failed This patch changes not to panic on any error when updating page table. Instead prints error messages with callstack. Signed-off-by: Cho KyongHo Signed-off-by: Shaik Ameer Basha Signed-off-by: Joerg Roedel --- drivers/iommu/exynos-iommu.c | 58 +++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 8d7c3f9632f8..aec7fd75f400 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -728,13 +728,18 @@ finish: static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, short *pgcounter) { + if (lv1ent_section(sent)) { + WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova); + return ERR_PTR(-EADDRINUSE); + } + if (lv1ent_fault(sent)) { unsigned long *pent; pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); if (!pent) - return NULL; + return ERR_PTR(-ENOMEM); *sent = mk_lv1ent_page(virt_to_phys(pent)); *pgcounter = NUM_LV2ENTRIES; @@ -745,14 +750,21 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, return page_entry(sent, iova); } -static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) +static int lv1set_section(unsigned long *sent, unsigned long iova, + phys_addr_t paddr, short *pgcnt) { - if (lv1ent_section(sent)) + if (lv1ent_section(sent)) { + WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped", + iova); return -EADDRINUSE; + } if (lv1ent_page(sent)) { - if (*pgcnt != NUM_LV2ENTRIES) + if (*pgcnt != NUM_LV2ENTRIES) { + WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped", + iova); return -EADDRINUSE; + } kfree(page_entry(sent, 0)); @@ -770,8 +782,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, short *pgcnt) { if (size == SPAGE_SIZE) { - if (!lv2ent_fault(pent)) + if (!lv2ent_fault(pent)) { + WARN(1, "Trying mapping on 4KiB where mapping exists"); return -EADDRINUSE; + } *pent = mk_lv2ent_spage(paddr); pgtable_flush(pent, pent + 1); @@ -780,7 +794,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, int i; for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { if (!lv2ent_fault(pent)) { - memset(pent, 0, sizeof(*pent) * i); + WARN(1, + "Trying mapping on 64KiB where mapping exists"); + if (i > 0) + memset(pent - i, 0, sizeof(*pent) * i); return -EADDRINUSE; } @@ -808,7 +825,7 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, entry = section_entry(priv->pgtable, iova); if (size == SECT_SIZE) { - ret = lv1set_section(entry, paddr, + ret = lv1set_section(entry, iova, paddr, &priv->lv2entcnt[lv1ent_offset(iova)]); } else { unsigned long *pent; @@ -816,17 +833,16 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, pent = alloc_lv2entry(entry, iova, &priv->lv2entcnt[lv1ent_offset(iova)]); - if (!pent) - ret = -ENOMEM; + if (IS_ERR(pent)) + ret = PTR_ERR(pent); else ret = lv2set_page(pent, paddr, size, &priv->lv2entcnt[lv1ent_offset(iova)]); } - if (ret) { + if (ret) pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", __func__, iova, size); - } spin_unlock_irqrestore(&priv->pgtablelock, flags); @@ -840,6 +856,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, struct sysmmu_drvdata *data; unsigned long flags; unsigned long *ent; + size_t err_pgsize; BUG_ON(priv->pgtable == NULL); @@ -848,7 +865,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ent = section_entry(priv->pgtable, iova); if (lv1ent_section(ent)) { - BUG_ON(size < SECT_SIZE); + if (size < SECT_SIZE) { + err_pgsize = SECT_SIZE; + goto err; + } *ent = 0; pgtable_flush(ent, ent + 1); @@ -879,7 +899,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, } /* lv1ent_large(ent) == true here */ - BUG_ON(size < LPAGE_SIZE); + if (size < LPAGE_SIZE) { + err_pgsize = LPAGE_SIZE; + goto err; + } memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); @@ -893,8 +916,15 @@ done: sysmmu_tlb_invalidate_entry(data->dev, iova); spin_unlock_irqrestore(&priv->lock, flags); - return size; +err: + spin_unlock_irqrestore(&priv->pgtablelock, flags); + + WARN(1, + "%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n", + __func__, size, iova, err_pgsize); + + return 0; } static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, -- 2.20.1