[ARM] 4134/1: Add generic support for outer caches
authorCatalin Marinas <catalin.marinas@arm.com>
Mon, 5 Feb 2007 13:48:08 +0000 (14:48 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 8 Feb 2007 14:49:40 +0000 (14:49 +0000)
The outer cache can be L2 as on RealView/EB MPCore platform or even L3
or further on ARMv7 cores. This patch adds the generic support for
flushing the outer cache in the DMA operations.

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/common/dmabounce.c
arch/arm/kernel/setup.c
arch/arm/mm/Kconfig
arch/arm/mm/consistent.c
include/asm-arm/cacheflush.h

index 272702accd8b259eee13cc2baa778e11ac3b1275..b4748e3171c6890b39e97aa239ac4dc3459148e7 100644 (file)
@@ -338,6 +338,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
                         */
                        ptr = (unsigned long)buf->ptr;
                        dmac_clean_range(ptr, ptr + size);
+                       outer_clean_range(__pa(ptr), __pa(ptr) + size);
                }
                free_safe_buffer(device_info, buf);
        }
index bbab134cd82d3fa547d72d5d2514591da9f52d92..243aea45805740ec44e064e66ed0ec93e08cc487 100644 (file)
@@ -88,6 +88,9 @@ struct cpu_user_fns cpu_user;
 #ifdef MULTI_CACHE
 struct cpu_cache_fns cpu_cache;
 #endif
+#ifdef CONFIG_OUTER_CACHE
+struct outer_cache_fns outer_cache;
+#endif
 
 struct stack {
        u32 irq[3];
index aade2f72c9209f995b4f7e70461742610648bccf..a84eed9f8542167ea8d852bb0338fde34ff8cca7 100644 (file)
@@ -609,3 +609,6 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
          Forget about fast user space cmpxchg support.
          It is just not possible.
 
+config OUTER_CACHE
+       bool
+       default n
index 6a9c362fef5e241969c42c6133456fb901ce31a2..83bd035c7d5e8d4f359af40f79a33df95abf9b61 100644 (file)
@@ -208,6 +208,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
                unsigned long kaddr = (unsigned long)page_address(page);
                memset(page_address(page), 0, size);
                dmac_flush_range(kaddr, kaddr + size);
+               outer_flush_range(__pa(kaddr), __pa(kaddr) + size);
        }
 
        /*
@@ -485,15 +486,20 @@ void consistent_sync(void *vaddr, size_t size, int direction)
        unsigned long start = (unsigned long)vaddr;
        unsigned long end   = start + size;
 
+       BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end));
+
        switch (direction) {
        case DMA_FROM_DEVICE:           /* invalidate only */
                dmac_inv_range(start, end);
+               outer_inv_range(__pa(start), __pa(end));
                break;
        case DMA_TO_DEVICE:             /* writeback only */
                dmac_clean_range(start, end);
+               outer_clean_range(__pa(start), __pa(end));
                break;
        case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
                dmac_flush_range(start, end);
+               outer_flush_range(__pa(start), __pa(end));
                break;
        default:
                BUG();
index 5f531ea03059d003bab3e86643ed2580a36133cf..ce60b3702ba5588e742697610d1dd6d4885f96c6 100644 (file)
@@ -190,6 +190,12 @@ struct cpu_cache_fns {
        void (*dma_flush_range)(unsigned long, unsigned long);
 };
 
+struct outer_cache_fns {
+       void (*inv_range)(unsigned long, unsigned long);
+       void (*clean_range)(unsigned long, unsigned long);
+       void (*flush_range)(unsigned long, unsigned long);
+};
+
 /*
  * Select the calling method
  */
@@ -246,6 +252,37 @@ extern void dmac_flush_range(unsigned long, unsigned long);
 
 #endif
 
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+       if (outer_cache.inv_range)
+               outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+       if (outer_cache.clean_range)
+               outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+       if (outer_cache.flush_range)
+               outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
 /*
  * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  * vmalloc, ioremap etc) in kernel space for pages.  Since the