sh: Assume new page cache pages have dirty dcache lines.
authorPaul Mundt <lethal@linux-sh.org>
Wed, 1 Dec 2010 06:39:51 +0000 (15:39 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Wed, 1 Dec 2010 06:39:51 +0000 (15:39 +0900)
This follows the ARM change c01778001a4f5ad9c62d882776235f3f31922fdd
("ARM: 6379/1: Assume new page cache pages have dirty D-cache") for the
same rationale:

    There are places in Linux where writes to newly allocated page
    cache pages happen without a subsequent call to flush_dcache_page()
    (several PIO drivers including USB HCD). This patch changes the
    meaning of PG_arch_1 to be PG_dcache_clean and always flush the
    D-cache for a newly mapped page in update_mmu_cache().

This addresses issues seen with executing binaries from MMC, in
addition to some of the other HCDs that don't explicitly do cache
management for their pipe-in buffers.

Requested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/cacheflush.h
arch/sh/mm/cache-sh4.c
arch/sh/mm/cache-sh7705.c
arch/sh/mm/cache.c
arch/sh/mm/kmap.c

index 1f4e562c5e8cc1317bd97a448e5067465bad26a9..82e1eabeac989fff1b4c2fc43aa74bcd2b23d715 100644 (file)
@@ -96,7 +96,7 @@ void kmap_coherent_init(void);
 void *kmap_coherent(struct page *page, unsigned long addr);
 void kunmap_coherent(void *kvaddr);
 
-#define PG_dcache_dirty        PG_arch_1
+#define PG_dcache_clean        PG_arch_1
 
 void cpu_cache_init(void);
 
index 2cfae81914aaac53289df444123cfb9f5f99476d..92eb98633ab05655fcdc3c4073f071798302e632 100644 (file)
@@ -114,7 +114,7 @@ static void sh4_flush_dcache_page(void *arg)
        struct address_space *mapping = page_mapping(page);
 
        if (mapping && !mapping_mapped(mapping))
-               set_bit(PG_dcache_dirty, &page->flags);
+               clear_bit(PG_dcache_clean, &page->flags);
        else
 #endif
                flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
@@ -239,7 +239,7 @@ static void sh4_flush_cache_page(void *args)
                 * another ASID than the current one.
                 */
                map_coherent = (current_cpu_data.dcache.n_aliases &&
-                       !test_bit(PG_dcache_dirty, &page->flags) &&
+                       test_bit(PG_dcache_clean, &page->flags) &&
                        page_mapped(page));
                if (map_coherent)
                        vaddr = kmap_coherent(page, address);
index f498da1cce7ac5a070e3c563f94d04a1b812f17f..7729cca727ebebb48dfd787d0dd9a601dc9d1be3 100644 (file)
@@ -139,7 +139,7 @@ static void sh7705_flush_dcache_page(void *arg)
        struct address_space *mapping = page_mapping(page);
 
        if (mapping && !mapping_mapped(mapping))
-               set_bit(PG_dcache_dirty, &page->flags);
+               clear_bit(PG_dcache_clean, &page->flags);
        else
                __flush_dcache_page(__pa(page_address(page)));
 }
index ba401d137bb9f5057b93f18afdd75da87209525e..88d3dc3d30d50aabcbdfcff04ac197cf19470bb7 100644 (file)
@@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
                       unsigned long len)
 {
        if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-           !test_bit(PG_dcache_dirty, &page->flags)) {
+           test_bit(PG_dcache_clean, &page->flags)) {
                void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(vto, src, len);
                kunmap_coherent(vto);
        } else {
                memcpy(dst, src, len);
                if (boot_cpu_data.dcache.n_aliases)
-                       set_bit(PG_dcache_dirty, &page->flags);
+                       clear_bit(PG_dcache_clean, &page->flags);
        }
 
        if (vma->vm_flags & VM_EXEC)
@@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
                         unsigned long len)
 {
        if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-           !test_bit(PG_dcache_dirty, &page->flags)) {
+           test_bit(PG_dcache_clean, &page->flags)) {
                void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
                memcpy(dst, vfrom, len);
                kunmap_coherent(vfrom);
        } else {
                memcpy(dst, src, len);
                if (boot_cpu_data.dcache.n_aliases)
-                       set_bit(PG_dcache_dirty, &page->flags);
+                       clear_bit(PG_dcache_clean, &page->flags);
        }
 }
 
@@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
        vto = kmap_atomic(to, KM_USER1);
 
        if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
-           !test_bit(PG_dcache_dirty, &from->flags)) {
+           test_bit(PG_dcache_clean, &from->flags)) {
                vfrom = kmap_coherent(from, vaddr);
                copy_page(vto, vfrom);
                kunmap_coherent(vfrom);
@@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
 
        page = pfn_to_page(pfn);
        if (pfn_valid(pfn)) {
-               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+               int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
                if (dirty)
                        __flush_purge_region(page_address(page), PAGE_SIZE);
        }
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 
        if (pages_do_alias(addr, vmaddr)) {
                if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
-                   !test_bit(PG_dcache_dirty, &page->flags)) {
+                   test_bit(PG_dcache_clean, &page->flags)) {
                        void *kaddr;
 
                        kaddr = kmap_coherent(page, vmaddr);
index 15d74ea4209497b8d938840a7fbf6b7cfce19666..ec29e14ec5a85643b600952b3140f013ada5d258 100644 (file)
@@ -34,7 +34,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
-       BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
+       BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
 
        pagefault_disable();