sh: rework nommu for generic cache.c use.
authorPaul Mundt <lethal@linux-sh.org>
Sat, 15 Aug 2009 00:49:32 +0000 (09:49 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Sat, 15 Aug 2009 00:49:32 +0000 (09:49 +0900)
This does a bit of reorganizing for allowing nommu to use the new
and generic cache.c, no functional changes.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/cacheflush.h
arch/sh/include/asm/page.h
arch/sh/include/cpu-sh3/cpu/cacheflush.h
arch/sh/include/cpu-sh4/cpu/cacheflush.h
arch/sh/kernel/cpu/init.c
arch/sh/mm/mmap.c
arch/sh/mm/tlb-nommu.c

index 4bf621e4146d9d551c861f87f028eb55826282da..9ec13fb909ddbbc6a973cbee768f552cc1728e89 100644 (file)
@@ -76,5 +76,7 @@ void kmap_coherent_init(void);
 void *kmap_coherent(struct page *page, unsigned long addr);
 void kunmap_coherent(void);
 
+#define PG_dcache_dirty        PG_arch_1
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHEFLUSH_H */
index 847eeabb9083fec300504cc7d14c0bbc0b1553a7..a316eeb50b39c8f004c69aea9224cf109edd183e 100644 (file)
@@ -68,18 +68,13 @@ extern void clear_user_page(void *to, unsigned long address, struct page *page);
 extern void copy_user_page(void *to, void *from, unsigned long address,
                           struct page *page);
 
-#elif defined(CONFIG_MMU)
+#else
 extern void copy_user_highpage(struct page *to, struct page *from,
                               unsigned long vaddr, struct vm_area_struct *vma);
 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
 extern void clear_user_highpage(struct page *page, unsigned long vaddr);
 #define clear_user_highpage    clear_user_highpage
 
-#else
-
-#define clear_user_page(page, vaddr, pg)       clear_page(page)
-#define copy_user_page(to, from, vaddr, pg)    copy_page(to, from)
-
 #endif
 
 /*
index 6485ad5649adba34016fa1f95811d9868d1f2354..3b5f3df4e1c8a169b3e23000214ec27d995d75d7 100644 (file)
@@ -15,8 +15,6 @@
  * SH4. Unlike the SH4 this is a unified cache so we need to do some work
  * in mmap when 'exec'ing a new binary
  */
-#define PG_dcache_dirty        PG_arch_1
-
 void flush_cache_all(void);
 void flush_cache_mm(struct mm_struct *mm);
 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
index 3564f1722195f4fa9422dfacbd6f8af965e066b8..76764f0fb88aefe34de3ba9b3abf9e5148171730 100644 (file)
@@ -38,6 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 /* Initialization of P3 area for copy_user_page */
 void p3_cache_init(void);
 
-#define PG_dcache_dirty        PG_arch_1
-
 #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
index ad85421099cdd49ba32b48c28065c90004e9a3a5..c832fa4cf8edf0b3bc5630416ecbe0a016e95156 100644 (file)
@@ -268,11 +268,9 @@ asmlinkage void __init sh_cpu_init(void)
        cache_init();
 
        if (raw_smp_processor_id() == 0) {
-#ifdef CONFIG_MMU
                shm_align_mask = max_t(unsigned long,
                                       current_cpu_data.dcache.way_size - 1,
                                       PAGE_SIZE - 1);
-#endif
 
                /* Boot CPU sets the cache shape */
                detect_cache_shape();
index 1b5fdfb4e0c2a98cb2a71b598e4d97ebc43bdd16..d2984fa42d3d0ee69fce3a3e0a7e4fa24323c835 100644 (file)
 #include <asm/page.h>
 #include <asm/processor.h>
 
-#ifdef CONFIG_MMU
 unsigned long shm_align_mask = PAGE_SIZE - 1;  /* Sane caches */
 EXPORT_SYMBOL(shm_align_mask);
 
+#ifdef CONFIG_MMU
 /*
  * To avoid cache aliases, we map the shared page with same color.
  */
index c49e9d24c2e6fe7870719bfa6ea272eeed0b9dfb..0710ebb99b9a306427dbb6700a66e649e6b410bb 100644 (file)
@@ -50,11 +50,6 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
 }
 
-void __update_cache(struct vm_area_struct *vma,
-                   unsigned long address, pte_t pte)
-{
-}
-
 void __init kmap_coherent_init(void)
 {
 }