}
+ iommu_flush_tlb_all(domain);
+
out:
iommu_put_resv_regions(dev, &mappings);
}
EXPORT_SYMBOL_GPL(iommu_map);
-size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
+static size_t __iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ bool sync)
{
+ const struct iommu_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
- unsigned int min_pagesz;
unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
- if (unlikely(domain->ops->unmap == NULL ||
+ if (unlikely(ops->unmap == NULL ||
domain->pgsize_bitmap == 0UL))
return -ENODEV;
while (unmapped < size) {
size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
- unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ unmapped_page = ops->unmap(domain, iova, pgsize);
if (!unmapped_page)
break;
+ if (sync && ops->iotlb_range_add)
+ ops->iotlb_range_add(domain, iova, pgsize);
+
pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
iova, unmapped_page);
unmapped += unmapped_page;
}
+ if (sync && ops->iotlb_sync)
+ ops->iotlb_sync(domain);
+
trace_unmap(orig_iova, size, unmapped);
return unmapped;
}
+
+size_t iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ return __iommu_unmap(domain, iova, size, true);
+}
EXPORT_SYMBOL_GPL(iommu_unmap);
+size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ return __iommu_unmap(domain, iova, size, false);
+}
+EXPORT_SYMBOL_GPL(iommu_unmap_fast);
+
size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
{
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @map_sg: map a scatter-gather list of physically contiguous memory chunks
+ * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
+ * @tlb_range_add: Add a given iova range to the flush queue for this domain
+ * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
+ * queue
* to an iommu domain
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
size_t size);
size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot);
+ void (*flush_iotlb_all)(struct iommu_domain *domain);
+ void (*iotlb_range_add)(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
+ void (*iotlb_sync)(struct iommu_domain *domain);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
int (*add_device)(struct device *dev);
void (*remove_device)(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size);
+ size_t size);
+extern size_t iommu_unmap_fast(struct iommu_domain *domain,
+ unsigned long iova, size_t size);
extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents,
int prot);
extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags);
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+ if (domain->ops->flush_iotlb_all)
+ domain->ops->flush_iotlb_all(domain);
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+ if (domain->ops->iotlb_range_add)
+ domain->ops->iotlb_range_add(domain, iova, size);
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+ if (domain->ops->iotlb_sync)
+ domain->ops->iotlb_sync(domain);
+}
+
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
return -ENODEV;
}
+static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova,
+ int gfp_order)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
unsigned int nents, int prot)
return -ENODEV;
}
+static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_tlb_range_add(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
+{
+}
+
+static inline void iommu_tlb_sync(struct iommu_domain *domain)
+{
+}
+
static inline int iommu_domain_window_enable(struct iommu_domain *domain,
u32 wnd_nr, phys_addr_t paddr,
u64 size, int prot)