From: Joerg Roedel Date: Tue, 22 Dec 2015 11:15:35 +0000 (+0100) Subject: iommu/amd: Optimize dma_ops_free_addresses X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=4eeca8c5e72fad752eba9efc293c924d65faa86e;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git iommu/amd: Optimize dma_ops_free_addresses Don't flush the iommu tlb when we free something behind the current next_bit pointer. Update the next_bit pointer instead and let the flush happen on the next wraparound in the allocation path. Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 39a2048a6cd2..c657e48f0aed 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, return; #endif - if (amd_iommu_unmap_flush || - (address + pages > range->next_bit)) { + if (amd_iommu_unmap_flush) { domain_flush_tlb(&dom->domain); domain_flush_complete(&dom->domain); } @@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; spin_lock_irqsave(&range->bitmap_lock, flags); + if (address + pages > range->next_bit) + range->next_bit = address + pages; bitmap_clear(range->bitmap, address, pages); spin_unlock_irqrestore(&range->bitmap_lock, flags);