struct deferred_flush_entry {
struct iova *iova;
+ unsigned long nrpages;
struct dmar_domain *domain;
struct page *freelist;
};
struct deferred_flush_entry *entry =
&flush_table->entries[j];
struct iova *iova = entry->iova;
+ unsigned long nrpages = entry->nrpages;
struct dmar_domain *domain = entry->domain;
struct page *freelist = entry->freelist;
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
mm_to_dma_pfn(iova->pfn_lo),
- mm_to_dma_pfn(iova_size(iova)),
- !freelist, 0);
+ nrpages, !freelist, 0);
else {
- mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
+ mask = ilog2(nrpages);
iommu_flush_dev_iotlb(domain,
(uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
}
spin_unlock_irqrestore(&flush_data->lock, flags);
}
-static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
+static void add_unmap(struct dmar_domain *dom, struct iova *iova,
+ unsigned long nrpages, struct page *freelist)
{
unsigned long flags;
int entry_id, iommu_id;
entry = &flush_data->tables[iommu_id].entries[entry_id];
entry->domain = dom;
entry->iova = iova;
+ entry->nrpages = nrpages;
entry->freelist = freelist;
if (!flush_data->timer_on) {
put_cpu();
}
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
{
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
+ unsigned long nrpages;
struct iova *iova;
struct intel_iommu *iommu;
struct page *freelist;
(unsigned long long)dev_addr))
return;
+ nrpages = aligned_nrpages(dev_addr, size);
start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+ last_pfn = start_pfn + nrpages - 1;
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
dev_name(dev), start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- last_pfn - start_pfn + 1, !freelist, 0);
+ nrpages, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova, freelist);
+ add_unmap(domain, iova, nrpages, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, dev_addr);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap(dev, dma_handle);
+ intel_unmap(dev, dma_handle, size);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
}
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, sglist[0].dma_address);
+ dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ unsigned long nrpages = 0;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
+ }
+
+ intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_nontranslate_map_sg(struct device *hddev,