AMD IOMMU: move TLB flushing to the map/unmap helper functions
authorJoerg Roedel <joerg.roedel@amd.com>
Thu, 4 Sep 2008 13:49:46 +0000 (15:49 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 19 Sep 2008 10:59:04 +0000 (12:59 +0200)
This patch moves the invocation of the flushing functions to the
map/unmap helpers because its common code in all dma_ops relevant
mapping/unmapping code.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/amd_iommu.c

index 695e0fc41b108389242ceb41fbf872af004f75df..691e023695ad07e1ddda205e0d09c2678be8a0a2 100644 (file)
@@ -795,6 +795,9 @@ static dma_addr_t __map_single(struct device *dev,
        }
        address += offset;
 
+       if (unlikely(iommu_has_npcache(iommu)))
+               iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
+
 out:
        return address;
 }
@@ -825,6 +828,8 @@ static void __unmap_single(struct amd_iommu *iommu,
        }
 
        dma_ops_free_addresses(dma_dom, dma_addr, pages);
+
+       iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
 }
 
 /*
@@ -853,9 +858,6 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
        if (addr == bad_dma_address)
                goto out;
 
-       if (iommu_has_npcache(iommu))
-               iommu_flush_pages(iommu, domain->id, addr, size);
-
        if (iommu->need_sync)
                iommu_completion_wait(iommu);
 
@@ -885,8 +887,6 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
 
        __unmap_single(iommu, domain->priv, dma_addr, size, dir);
 
-       iommu_flush_pages(iommu, domain->id, dma_addr, size);
-
        if (iommu->need_sync)
                iommu_completion_wait(iommu);
 
@@ -948,9 +948,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
                        mapped_elems++;
                } else
                        goto unmap;
-               if (iommu_has_npcache(iommu))
-                       iommu_flush_pages(iommu, domain->id, s->dma_address,
-                                         s->dma_length);
        }
 
        if (iommu->need_sync)
@@ -996,8 +993,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
        for_each_sg(sglist, s, nelems, i) {
                __unmap_single(iommu, domain->priv, s->dma_address,
                               s->dma_length, dir);
-               iommu_flush_pages(iommu, domain->id, s->dma_address,
-                                 s->dma_length);
                s->dma_address = s->dma_length = 0;
        }
 
@@ -1048,9 +1043,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
                goto out;
        }
 
-       if (iommu_has_npcache(iommu))
-               iommu_flush_pages(iommu, domain->id, *dma_addr, size);
-
        if (iommu->need_sync)
                iommu_completion_wait(iommu);
 
@@ -1082,7 +1074,6 @@ static void free_coherent(struct device *dev, size_t size,
        spin_lock_irqsave(&domain->lock, flags);
 
        __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
-       iommu_flush_pages(iommu, domain->id, dma_addr, size);
 
        if (iommu->need_sync)
                iommu_completion_wait(iommu);