From: Sebastian Ott Date: Thu, 8 Sep 2016 11:44:57 +0000 (+0200) Subject: s390/pci_dma: make lazy flush independent from the tlb_refresh bit X-Git-Tag: MMI-PSA29.97-13-9~6596^2~36 X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=4f5359e94bbfbe349fd1ae00516dfe749d53fe22;p=GitHub%2FMotorolaMobilityLLC%2Fkernel-slsi.git s390/pci_dma: make lazy flush independent from the tlb_refresh bit We have 2 strategies to reduce the number of RPCIT instructions: * A HW feature indicated via the tlb_refresh bit allows us to omit RPCIT for invalid -> valid translation-table entry updates. * With "lazy flush" we omit RPCIT for valid -> invalid updates until we run out of dma addresses. When we have to reuse dma addresses we issue a global tlb flush using only one RPCIT instruction. Currently lazy flushing depends on tlb_refresh. Since there is no technical reason for this remove this dependency. Signed-off-by: Sebastian Ott Reviewed-by: Gerald Schaefer Signed-off-by: Martin Schwidefsky --- diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 47f4afbff0a5..0c626c1303cb 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c @@ -181,14 +181,17 @@ static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr, /* * With zdev->tlb_refresh == 0, rpcit is not required to establish new * translations when previously invalid translation-table entries are - * validated. With lazy unmap, it also is skipped for previously valid + * validated. With lazy unmap, rpcit is skipped for previously valid * entries, but a global rpcit is then required before any address can * be re-used, i.e. after each iommu bitmap wrap-around. */ - if (!zdev->tlb_refresh && - (!s390_iommu_strict || - ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))) - return 0; + if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) { + if (!zdev->tlb_refresh) + return 0; + } else { + if (!s390_iommu_strict) + return 0; + } return zpci_refresh_trans((u64) zdev->fh << 32, dma_addr, PAGE_ALIGN(size)); @@ -257,7 +260,7 @@ static dma_addr_t dma_alloc_address(struct device *dev, int size) spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); offset = __dma_alloc_iommu(dev, zdev->next_bit, size); if (offset == -1) { - if (!zdev->tlb_refresh && !s390_iommu_strict) { + if (!s390_iommu_strict) { /* global flush before DMA addresses are reused */ if (zpci_refresh_global(zdev)) goto out_error; @@ -292,7 +295,7 @@ static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size) if (!zdev->iommu_bitmap) goto out; - if (zdev->tlb_refresh || s390_iommu_strict) + if (s390_iommu_strict) bitmap_clear(zdev->iommu_bitmap, offset, size); else bitmap_set(zdev->lazy_bitmap, offset, size); @@ -565,7 +568,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) rc = -ENOMEM; goto free_dma_table; } - if (!zdev->tlb_refresh && !s390_iommu_strict) { + if (!s390_iommu_strict) { zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8); if (!zdev->lazy_bitmap) { rc = -ENOMEM;