AMD IOMMU: fix possible race while accessing iommu->need_sync
authorJoerg Roedel <joerg.roedel@amd.com>
Wed, 3 Dec 2008 11:19:27 +0000 (12:19 +0100)
committerJoerg Roedel <joerg.roedel@amd.com>
Wed, 3 Dec 2008 11:20:46 +0000 (12:20 +0100)
The access to the iommu->need_sync member needs to be protected by the
iommu->lock. Otherwise this is a possible race condition. Fix it with
this patch.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
arch/x86/kernel/amd_iommu.c

index a232e5a85d489914b85cccde72e3f07ad6d56aaa..5662e226b0c922c1d2622e9173687bd6285b3e52 100644 (file)
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
 
        spin_lock_irqsave(&iommu->lock, flags);
        ret = __iommu_queue_command(iommu, cmd);
+       if (!ret)
+               iommu->need_sync = 1;
        spin_unlock_irqrestore(&iommu->lock, flags);
 
        return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
        cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
        CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
 
-       iommu->need_sync = 0;
-
        spin_lock_irqsave(&iommu->lock, flags);
 
+       if (!iommu->need_sync)
+               goto out;
+
+       iommu->need_sync = 0;
+
        ret = __iommu_queue_command(iommu, &cmd);
 
        if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
 
        ret = iommu_queue_command(iommu, &cmd);
 
-       iommu->need_sync = 1;
-
        return ret;
 }
 
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
 
        ret = iommu_queue_command(iommu, &cmd);
 
-       iommu->need_sync = 1;
-
        return ret;
 }
 
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 
        iommu_queue_inv_dev_entry(iommu, devid);
-
-       iommu->need_sync = 1;
 }
 
 /*****************************************************************************
@@ -1034,8 +1033,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
        if (addr == bad_dma_address)
                goto out;
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
 out:
        spin_unlock_irqrestore(&domain->lock, flags);
@@ -1063,8 +1061,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
 
        __unmap_single(iommu, domain->priv, dma_addr, size, dir);
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);
 }
@@ -1130,8 +1127,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
                        goto unmap;
        }
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
 out:
        spin_unlock_irqrestore(&domain->lock, flags);
@@ -1176,8 +1172,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
                s->dma_address = s->dma_length = 0;
        }
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);
 }
@@ -1228,8 +1223,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
                goto out;
        }
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
 out:
        spin_unlock_irqrestore(&domain->lock, flags);
@@ -1260,8 +1254,7 @@ static void free_coherent(struct device *dev, size_t size,
 
        __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
 
-       if (unlikely(iommu->need_sync))
-               iommu_completion_wait(iommu);
+       iommu_completion_wait(iommu);
 
        spin_unlock_irqrestore(&domain->lock, flags);