From: Bjorn Helgaas Date: Mon, 20 Jul 2015 14:10:36 +0000 (-0500) Subject: iommu/vt-d: Cache PCI ATS state and Invalidate Queue Depth X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=fb0cc3aa552642631c3a4e83deae2b5c1a1ef4fa;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git iommu/vt-d: Cache PCI ATS state and Invalidate Queue Depth We check the ATS state (enabled/disabled) and fetch the PCI ATS Invalidate Queue Depth in performance-sensitive paths. It's easy to cache these, which removes dependencies on PCI. Remember the ATS enabled state. When enabling, read the queue depth once and cache it in the device_domain_info struct. This is similar to what amd_iommu.c does. Signed-off-by: Bjorn Helgaas Reviewed-by: Joerg Roedel Acked-by: Joerg Roedel --- diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..c22a5490eea7 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -408,6 +408,10 @@ struct device_domain_info { struct list_head global; /* link to global list */ u8 bus; /* PCI bus number */ u8 devfn; /* PCI devfn number */ + struct { + u8 enabled:1; + u8 qdep; + } ats; /* ATS state */ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */ struct intel_iommu *iommu; /* IOMMU used by this device */ struct dmar_domain *domain; /* pointer to domain */ @@ -1391,19 +1395,26 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, static void iommu_enable_dev_iotlb(struct device_domain_info *info) { + struct pci_dev *pdev; + if (!info || !dev_is_pci(info->dev)) return; - pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT); + pdev = to_pci_dev(info->dev); + if (pci_enable_ats(pdev, VTD_PAGE_SHIFT)) + return; + + info->ats.enabled = 1; + info->ats.qdep = pci_ats_queue_depth(pdev); } static void iommu_disable_dev_iotlb(struct device_domain_info *info) { - if (!info->dev || !dev_is_pci(info->dev) || - !pci_ats_enabled(to_pci_dev(info->dev))) + if (!info->ats.enabled) return; pci_disable_ats(to_pci_dev(info->dev)); + info->ats.enabled = 0; } static void iommu_flush_dev_iotlb(struct dmar_domain *domain, @@ -1415,16 +1426,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, spin_lock_irqsave(&device_domain_lock, flags); list_for_each_entry(info, &domain->devices, link) { - struct pci_dev *pdev; - if (!info->dev || !dev_is_pci(info->dev)) - continue; - - pdev = to_pci_dev(info->dev); - if (!pci_ats_enabled(pdev)) + if (!info->ats.enabled) continue; sid = info->bus << 8 | info->devfn; - qdep = pci_ats_queue_depth(pdev); + qdep = info->ats.qdep; qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); } spin_unlock_irqrestore(&device_domain_lock, flags); @@ -2272,6 +2278,8 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu, info->bus = bus; info->devfn = devfn; + info->ats.enabled = 0; + info->ats.qdep = 0; info->dev = dev; info->domain = domain; info->iommu = iommu;