{
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
- c6x_dma_sync(handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(handle, size, dir);
+
return handle;
}
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- c6x_dma_sync(handle, size, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(handle, size, dir);
}
static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
- c6x_dma_sync(sg->dma_address, sg->length, dir);
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ c6x_dma_sync(sg->dma_address, sg->length, dir);
}
return nents;
struct scatterlist *sg;
int i;
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
+ return;
+
for_each_sg(sglist, sg, nents, i)
c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
-
}
static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,