dma_addr_t *dma_handle, gfp_t gfp);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
- dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
- size_t size, int direction);
- void (*unmap_single)(struct device *dev, dma_addr_t addr,
- size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
dma_addr_t dma_handle, size_t size,
int direction);
struct dma_mapping_ops *ops = get_dma_ops(hwdev);
BUG_ON(!valid_dma_direction(direction));
- return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+ return ops->map_page(hwdev, virt_to_page(ptr),
+ (unsigned long)ptr & ~PAGE_MASK, size,
+ direction, NULL);
}
static inline void
struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction));
- if (ops->unmap_single)
- ops->unmap_single(dev, addr, size, direction);
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, direction, NULL);
}
static inline int
struct dma_mapping_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(direction));
- return ops->map_single(dev, page_to_phys(page) + offset,
- size, direction);
+ return ops->map_page(dev, page, offset, size, direction, NULL);
}
static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
return addr;
}
-static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir)
-{
- return map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
- paddr & ~PAGE_MASK, size, dir, NULL);
-}
-
/*
* The exported unmap_single function for dma_ops.
*/
spin_unlock_irqrestore(&domain->lock, flags);
}
-static void unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, int dir)
-{
- return unmap_page(dev, dma_addr, size, dir, NULL);
-}
-
/*
* This is a special map_sg function which is used if we should map a
* device which is not handled by an AMD IOMMU in the system.
static struct dma_mapping_ops amd_iommu_dma_ops = {
.alloc_coherent = alloc_coherent,
.free_coherent = free_coherent,
- .map_single = map_single,
- .unmap_single = unmap_single,
.map_page = map_page,
.unmap_page = unmap_page,
.map_sg = map_sg,
return iommu_alloc(dev, tbl, vaddr, npages, dir);
}
-static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int direction)
-{
- return calgary_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
- paddr & ~PAGE_MASK, size,
- direction, NULL);
-}
-
static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
iommu_free(tbl, dma_addr, npages);
}
-static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
- size_t size, int direction)
-{
- calgary_unmap_page(dev, dma_handle, size, direction, NULL);
-}
-
static void* calgary_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
static struct dma_mapping_ops calgary_dma_ops = {
.alloc_coherent = calgary_alloc_coherent,
.free_coherent = calgary_free_coherent,
- .map_single = calgary_map_single,
- .unmap_single = calgary_unmap_single,
.map_sg = calgary_map_sg,
.unmap_sg = calgary_unmap_sg,
.map_page = calgary_map_page,
return bus;
}
-static dma_addr_t gart_map_single(struct device *dev, phys_addr_t paddr,
- size_t size, int dir)
-{
- return gart_map_page(dev, pfn_to_page(paddr >> PAGE_SHIFT),
- paddr & ~PAGE_MASK, size, dir, NULL);
-}
-
/*
* Free a DMA mapping.
*/
free_iommu(iommu_page, npages);
}
-static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, int direction)
-{
- gart_unmap_page(dev, dma_addr, size, direction, NULL);
-}
-
/*
* Wrapper for pci_unmap_single working with scatterlists.
*/
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
- gart_unmap_single(dev, s->dma_address, s->dma_length, dir);
+ gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
}
}
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_addr)
{
- gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL);
+ gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, get_order(size));
}
}
static struct dma_mapping_ops gart_dma_ops = {
- .map_single = gart_map_single,
- .unmap_single = gart_unmap_single,
.map_sg = gart_map_sg,
.unmap_sg = gart_unmap_sg,
.map_page = gart_map_page,
return bus;
}
-static dma_addr_t nommu_map_single(struct device *hwdev, phys_addr_t paddr,
- size_t size, int direction)
-{
- return nommu_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
- paddr & ~PAGE_MASK, size, direction, NULL);
-}
-
/* Map a set of buffers described by scatterlist in streaming
* mode for DMA. This is the scatter-gather version of the
* above pci_map_single interface. Here the scatter gather list
struct dma_mapping_ops nommu_dma_ops = {
.alloc_coherent = dma_generic_alloc_coherent,
.free_coherent = nommu_free_coherent,
- .map_single = nommu_map_single,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.is_phys = 1,
return 0;
}
-static dma_addr_t
-swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
- int direction)
-{
- return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
-}
-
/* these will be moved to lib/swiotlb.c later on */
static dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
.mapping_error = swiotlb_dma_mapping_error,
.alloc_coherent = x86_swiotlb_alloc_coherent,
.free_coherent = swiotlb_free_coherent,
- .map_single = swiotlb_map_single_phys,
- .unmap_single = swiotlb_unmap_single,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
static struct dma_mapping_ops intel_dma_ops = {
.alloc_coherent = intel_alloc_coherent,
.free_coherent = intel_free_coherent,
- .map_single = intel_map_single,
- .unmap_single = intel_unmap_single,
.map_sg = intel_map_sg,
.unmap_sg = intel_unmap_sg,
#ifdef CONFIG_X86_64