#include <linux/types.h>
#include <linux/mm.h>
-#include <linux/string.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
-#include <asm/io.h>
+#include <asm/cacheflush.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
free_pages((unsigned long)vaddr, get_order(size));
}
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir)
+void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir)
{
+ switch (dir) {
+ case DMA_TO_DEVICE:
+ flush_dcache_range(handle, size);
+ break;
+ case DMA_FROM_DEVICE:
+ /* Should be clear already */
+ break;
+ default:
+ if (printk_ratelimit())
+ printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
+ break;
+ }
+}
+
+EXPORT_SYMBOL(dma_sync_single_for_device);
+dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t handle = virt_to_phys(addr);
+ flush_dcache_range(handle, size);
+ return handle;
}
+EXPORT_SYMBOL(dma_map_single);
+dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ dma_addr_t handle = page_to_phys(page) + offset;
+ dma_sync_single_for_device(dev, handle, size, dir);
+ return handle;
+}
+EXPORT_SYMBOL(dma_map_page);