iommu/amd: Optimize dma_ops_free_addresses
authorJoerg Roedel <jroedel@suse.de>
Tue, 22 Dec 2015 11:15:35 +0000 (12:15 +0100)
committerJoerg Roedel <jroedel@suse.de>
Mon, 28 Dec 2015 16:18:53 +0000 (17:18 +0100)
Don't flush the iommu tlb when we free something behind the
current next_bit pointer. Update the next_bit pointer
instead and let the flush happen on the next wraparound in
the allocation path.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
drivers/iommu/amd_iommu.c

index 39a2048a6cd25948f82490ee5366665a5d25aece..c657e48f0aed0d8c1b6417689f2ecba7d8f4b8cb 100644 (file)
@@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
                return;
 #endif
 
-       if (amd_iommu_unmap_flush ||
-           (address + pages > range->next_bit)) {
+       if (amd_iommu_unmap_flush) {
                domain_flush_tlb(&dom->domain);
                domain_flush_complete(&dom->domain);
        }
@@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
        address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
 
        spin_lock_irqsave(&range->bitmap_lock, flags);
+       if (address + pages > range->next_bit)
+               range->next_bit = address + pages;
        bitmap_clear(range->bitmap, address, pages);
        spin_unlock_irqrestore(&range->bitmap_lock, flags);