void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1);
- if (cpu_has_dc_aliases && !Page_dcache_dirty(from)) {
+ if (cpu_has_dc_aliases && page_mapped(from)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases) {
+ if (cpu_has_dc_aliases && page_mapped(page)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent();
- } else
+ } else {
memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (cpu_has_dc_aliases) {
- void *vfrom =
- kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ if (cpu_has_dc_aliases && page_mapped(page)) {
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
- } else
+ } else {
memcpy(dst, src, len);
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(page);
+ }
}
EXPORT_SYMBOL(copy_from_user_page);