extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */
/*
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+ void *, dma_addr_t, size_t);
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
+
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction)
{
return 0;
}
fs_initcall(dma_init);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t handle, size_t size)
+{
+ unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+ pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+ return remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+}
+EXPORT_SYMBOL_GPL(dma_mmap_coherent);
#endif
}
EXPORT_SYMBOL(__dma_sync_page);
+
+/*
+ * Return the PFN for a given cpu virtual address returned by
+ * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ */
+unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+{
+ /* This should always be populated, so we don't test every
+ * level. If that fails, we'll have a nice crash which
+ * will be as good as a BUG_ON()
+ */
+ pgd_t *pgd = pgd_offset_k(cpu_addr);
+ pud_t *pud = pud_offset(pgd, cpu_addr);
+ pmd_t *pmd = pmd_offset(pud, cpu_addr);
+ pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
+
+ if (pte_none(*ptep) || !pte_present(*ptep))
+ return 0;
+ return pte_pfn(*ptep);
+}