sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms.
authorPaul Mundt <lethal@linux-sh.org>
Mon, 27 Jul 2009 12:30:17 +0000 (21:30 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 27 Jul 2009 12:30:17 +0000 (21:30 +0900)
Now that the SH-4 page clear/copy ops are generic, they can be used for
all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too
will gradually be converted over to using this interface.

SH-3 platforms which do not contain aliases will see no impact from this
change, while aliasing SH-3 platforms will get the same interface as
SH-4.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/cacheflush.h
arch/sh/include/asm/page.h
arch/sh/include/asm/pgtable.h
arch/sh/mm/Makefile_32
arch/sh/mm/cache-sh5.c
arch/sh/mm/pg-mmu.c [new file with mode: 0644]
arch/sh/mm/pg-sh4.c [deleted file]

index 4e360114269d8947bf3d1b5994ef8bc4137826f6..4c85d55847cce12704a003d662149b51a326a239 100644 (file)
@@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
        flush_dcache_page(page);
 }
 
-#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF)
 extern void copy_to_user_page(struct vm_area_struct *vma,
        struct page *page, unsigned long vaddr, void *dst, const void *src,
        unsigned long len);
@@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma,
 extern void copy_from_user_page(struct vm_area_struct *vma,
        struct page *page, unsigned long vaddr, void *dst, const void *src,
        unsigned long len);
-#else
-#define copy_to_user_page(vma, page, vaddr, dst, src, len)     \
-       do {                                                    \
-               flush_cache_page(vma, vaddr, page_to_pfn(page));\
-               memcpy(dst, src, len);                          \
-               flush_icache_user_range(vma, page, vaddr, len); \
-       } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len)   \
-       do {                                                    \
-               flush_cache_page(vma, vaddr, page_to_pfn(page));\
-               memcpy(dst, src, len);                          \
-       } while (0)
-#endif
 
 #define flush_cache_vmap(start, end)           flush_cache_all()
 #define flush_cache_vunmap(start, end)         flush_cache_all()
index 5208b7bfc24e820c8c30667776bfc97810c3a2d5..847eeabb9083fec300504cc7d14c0bbc0b1553a7 100644 (file)
@@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from);
 struct page;
 struct vm_area_struct;
 
-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
-       (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
-        defined(CONFIG_SH7705_CACHE_32KB))
+#if defined(CONFIG_CPU_SH5)
 extern void clear_user_page(void *to, unsigned long address, struct page *page);
 extern void copy_user_page(void *to, void *from, unsigned long address,
                           struct page *page);
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+
+#elif defined(CONFIG_MMU)
 extern void copy_user_highpage(struct page *to, struct page *from,
                               unsigned long vaddr, struct vm_area_struct *vma);
 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
 extern void clear_user_highpage(struct page *page, unsigned long vaddr);
 #define clear_user_highpage    clear_user_highpage
-#endif
+
 #else
+
 #define clear_user_page(page, vaddr, pg)       clear_page(page)
 #define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
+
 #endif
 
 /*
index bef3ab7fc09ed27e562535a78c31e2e64a711e86..ba2333216c5b6fa75255de71b1ae2748dbd2a193 100644 (file)
@@ -141,8 +141,7 @@ extern void paging_init(void);
 extern void page_table_range_init(unsigned long start, unsigned long end,
                                  pgd_t *pgd);
 
-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
-    defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU)
+#if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5)
 extern void kmap_coherent_init(void);
 #else
 #define kmap_coherent_init()   do { } while (0)
index 5c04bbb08d36806b87010619fa42bc717f7a3886..62e280734dcb74d92760aeea123e02121d870c23 100644 (file)
@@ -15,7 +15,7 @@ endif
 obj-y                  += $(cache-y)
 
 mmu-y                  := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU)      := fault_32.o tlbflush_32.o ioremap_32.o
+mmu-$(CONFIG_MMU)      := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o
 
 obj-y                  += $(mmu-y)
 obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3)                := tlb-sh3.o
 tlb-$(CONFIG_CPU_SH4)          := tlb-sh4.o
 tlb-$(CONFIG_CPU_HAS_PTEAEX)   := tlb-pteaex.o
 obj-y                          += $(tlb-y)
-ifndef CONFIG_CACHE_OFF
-obj-$(CONFIG_CPU_SH4)          += pg-sh4.o
-obj-$(CONFIG_SH7705_CACHE_32KB)        += pg-sh4.o
-endif
 endif
 
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
index 86762092508c4d12535cae098a54c693c212baeb..3e2d7321b636aaa74dad3624efaa27a475a79cc2 100644 (file)
@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
        else
                sh64_clear_user_page_coloured(to, address);
 }
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long vaddr, void *dst, const void *src,
+                      unsigned long len)
+{
+       flush_cache_page(vma, vaddr, page_to_pfn(page));
+       memcpy(dst, src, len);
+       flush_icache_user_range(vma, page, vaddr, len);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+                        unsigned long vaddr, void *dst, const void *src,
+                        unsigned long len)
+{
+       flush_cache_page(vma, vaddr, page_to_pfn(page));
+       memcpy(dst, src, len);
+}
 #endif
diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c
new file mode 100644 (file)
index 0000000..356d2cd
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * arch/sh/mm/pg-mmu.c
+ *
+ * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
+ * Copyright (C) 2002 - 2009  Paul Mundt
+ *
+ * Released under the terms of the GNU GPL v2.0.
+ */
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+
+#define kmap_get_fixmap_pte(vaddr)                                     \
+       pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
+
+static pte_t *kmap_coherent_pte;
+
+void __init kmap_coherent_init(void)
+{
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+       unsigned long vaddr;
+
+       /* cache the first coherent kmap pte */
+       vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
+       kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+#endif
+}
+
+static inline void *kmap_coherent(struct page *page, unsigned long addr)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr, flags;
+       pte_t pte;
+
+       inc_preempt_count();
+
+       idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
+       vaddr = __fix_to_virt(FIX_CMAP_END - idx);
+       pte = mk_pte(page, PAGE_KERNEL);
+
+       local_irq_save(flags);
+       flush_tlb_one(get_asid(), vaddr);
+       local_irq_restore(flags);
+
+       update_mmu_cache(NULL, vaddr, pte);
+
+       set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
+
+       return (void *)vaddr;
+}
+
+static inline void kunmap_coherent(struct page *page)
+{
+       dec_preempt_count();
+       preempt_check_resched();
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long vaddr, void *dst, const void *src,
+                      unsigned long len)
+{
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+           !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(vto, src, len);
+               kunmap_coherent(vto);
+       } else {
+               memcpy(dst, src, len);
+               if (boot_cpu_data.dcache.n_aliases)
+                       set_bit(PG_dcache_dirty, &page->flags);
+       }
+
+       if (vma->vm_flags & VM_EXEC)
+               flush_cache_page(vma, vaddr, page_to_pfn(page));
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+                        unsigned long vaddr, void *dst, const void *src,
+                        unsigned long len)
+{
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+           !test_bit(PG_dcache_dirty, &page->flags)) {
+               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+               memcpy(dst, vfrom, len);
+               kunmap_coherent(vfrom);
+       } else {
+               memcpy(dst, src, len);
+               if (boot_cpu_data.dcache.n_aliases)
+                       set_bit(PG_dcache_dirty, &page->flags);
+       }
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+                       unsigned long vaddr, struct vm_area_struct *vma)
+{
+       void *vfrom, *vto;
+
+       vto = kmap_atomic(to, KM_USER1);
+
+       if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
+           !test_bit(PG_dcache_dirty, &from->flags)) {
+               vfrom = kmap_coherent(from, vaddr);
+               copy_page(vto, vfrom);
+               kunmap_coherent(vfrom);
+       } else {
+               vfrom = kmap_atomic(from, KM_USER0);
+               copy_page(vto, vfrom);
+               kunmap_atomic(vfrom, KM_USER0);
+       }
+
+       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+               __flush_wback_region(vto, PAGE_SIZE);
+
+       kunmap_atomic(vto, KM_USER1);
+       /* Make sure this page is cleared on other CPU's too before using it */
+       smp_wmb();
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+       void *kaddr = kmap_atomic(page, KM_USER0);
+
+       clear_page(kaddr);
+
+       if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
+               __flush_wback_region(kaddr, PAGE_SIZE);
+
+       kunmap_atomic(kaddr, KM_USER0);
+}
+EXPORT_SYMBOL(clear_user_highpage);
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
deleted file mode 100644 (file)
index 4d93070..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * arch/sh/mm/pg-sh4.c
- *
- * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
- * Copyright (C) 2002 - 2009  Paul Mundt
- *
- * Released under the terms of the GNU GPL v2.0.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-#define kmap_get_fixmap_pte(vaddr)                                     \
-       pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
-
-static pte_t *kmap_coherent_pte;
-
-void __init kmap_coherent_init(void)
-{
-       unsigned long vaddr;
-
-       /* cache the first coherent kmap pte */
-       vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
-       kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
-}
-
-static inline void *kmap_coherent(struct page *page, unsigned long addr)
-{
-       enum fixed_addresses idx;
-       unsigned long vaddr, flags;
-       pte_t pte;
-
-       inc_preempt_count();
-
-       idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
-       vaddr = __fix_to_virt(FIX_CMAP_END - idx);
-       pte = mk_pte(page, PAGE_KERNEL);
-
-       local_irq_save(flags);
-       flush_tlb_one(get_asid(), vaddr);
-       local_irq_restore(flags);
-
-       update_mmu_cache(NULL, vaddr, pte);
-
-       set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
-
-       return (void *)vaddr;
-}
-
-static inline void kunmap_coherent(struct page *page)
-{
-       dec_preempt_count();
-       preempt_check_resched();
-}
-
-void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
-                      unsigned long vaddr, void *dst, const void *src,
-                      unsigned long len)
-{
-       if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
-               void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-               memcpy(vto, src, len);
-               kunmap_coherent(vto);
-       } else {
-               memcpy(dst, src, len);
-               set_bit(PG_dcache_dirty, &page->flags);
-       }
-
-       if (vma->vm_flags & VM_EXEC)
-               flush_cache_page(vma, vaddr, page_to_pfn(page));
-}
-
-void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
-                        unsigned long vaddr, void *dst, const void *src,
-                        unsigned long len)
-{
-       if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
-               void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
-               memcpy(dst, vfrom, len);
-               kunmap_coherent(vfrom);
-       } else {
-               memcpy(dst, src, len);
-               set_bit(PG_dcache_dirty, &page->flags);
-       }
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
-                       unsigned long vaddr, struct vm_area_struct *vma)
-{
-       void *vfrom, *vto;
-
-       vto = kmap_atomic(to, KM_USER1);
-
-       if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
-               vfrom = kmap_coherent(from, vaddr);
-               copy_page(vto, vfrom);
-               kunmap_coherent(vfrom);
-       } else {
-               vfrom = kmap_atomic(from, KM_USER0);
-               copy_page(vto, vfrom);
-               kunmap_atomic(vfrom, KM_USER0);
-       }
-
-       if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
-               __flush_wback_region(vto, PAGE_SIZE);
-
-       kunmap_atomic(vto, KM_USER1);
-       /* Make sure this page is cleared on other CPU's too before using it */
-       smp_wmb();
-}
-EXPORT_SYMBOL(copy_user_highpage);
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
-       void *kaddr = kmap_atomic(page, KM_USER0);
-
-       clear_page(kaddr);
-
-       if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
-               __flush_wback_region(kaddr, PAGE_SIZE);
-
-       kunmap_atomic(kaddr, KM_USER0);
-}
-EXPORT_SYMBOL(clear_user_highpage);