memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- - __cpuc_flush_kernel_dcache_area(ptr, size);
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
++ + __cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
++++++ ++ } else {
++++++ ++ __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
}
}
void (*coherent_kern_range)(unsigned long, unsigned long);
void (*coherent_user_range)(unsigned long, unsigned long);
- void (*flush_kern_dcache_page)(void *);
+ void (*flush_kern_dcache_area)(void *, size_t);
+
-- --- -- void (*dma_inv_range)(const void *, const void *);
-- --- -- void (*dma_clean_range)(const void *, const void *);
++++++ ++ void (*dma_map_area)(const void *, size_t, int);
++++++ ++ void (*dma_unmap_area)(const void *, size_t, int);
++ +++ ++
- void (*dma_inv_range)(const void *, const void *);
- void (*dma_clean_range)(const void *, const void *);
void (*dma_flush_range)(const void *, const void *);
};
}
}
- -------static inline void
- ------vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- ------- unsigned long uaddr, void *kaddr,
- ------- unsigned long len, int write)
- -------{
- ------- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- ------- unsigned long addr = (unsigned long)kaddr;
- ------- __cpuc_coherent_kern_range(addr, addr + len);
- ------- }
- -------}
- ------
+ #ifndef CONFIG_CPU_CACHE_VIPT
+ #define flush_cache_mm(mm) \
+ vivt_flush_cache_mm(mm)
+ #define flush_cache_range(vma,start,end) \
+ vivt_flush_cache_range(vma,start,end)
+ #define flush_cache_page(vma,addr,pfn) \
+ vivt_flush_cache_page(vma,addr,pfn)
- ------#define flush_ptrace_access(vma,page,ua,ka,len,write) \
- ------ vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
- - __cpuc_flush_dcache_page(kto);
+ #ifdef CONFIG_HIGHMEM
+ /*
+ * kmap_atomic() doesn't set the page virtual address, and
+ * kunmap_atomic() takes care of cache flushing already.
+ */
+ if (page_address(to) != NULL)
+ #endif
++ + __cpuc_flush_dcache_area(kto, PAGE_SIZE);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
.long xsc3_flush_user_cache_range
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
- .long xsc3_flush_kern_dcache_page
- .long xsc3_dma_inv_range
- .long xsc3_dma_clean_range
+ .long xsc3_flush_kern_dcache_area
-- --- -- .long xsc3_dma_inv_range
-- --- -- .long xsc3_dma_clean_range
++++++ ++ .long xsc3_dma_map_area
++++++ ++ .long xsc3_dma_unmap_area
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)