extern void *kmap_coherent(struct page *page, unsigned long addr);
extern void kunmap_coherent(void);
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+}
+
+/*
+ * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
+ * cache writeback and invalidate operation.
+ */
+extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
#endif /* _ASM_CACHEFLUSH_H */
octeon_flush_icache_all_cores(vma);
}
+static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
/**
* Probe Octeon's caches
flush_icache_range = octeon_flush_icache_range;
local_flush_icache_range = local_octeon_flush_icache_range;
+ __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range;
+
build_clear_page();
build_copy_page();
}
write_c0_status(flags);
}
+static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
+
static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
{
/* Catch bad driver code */
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
+ __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range;
+
flush_cache_sigtramp = r3k_flush_cache_sigtramp;
local_flush_data_cache_page = local_r3k_flush_data_cache_page;
flush_data_cache_page = r3k_flush_data_cache_page;
r4k_blast_icache();
}
+struct flush_kernel_vmap_range_args {
+ unsigned long vaddr;
+ int size;
+};
+
+static inline void local_r4k_flush_kernel_vmap_range(void *args)
+{
+ struct flush_kernel_vmap_range_args *vmra = args;
+ unsigned long vaddr = vmra->vaddr;
+ int size = vmra->size;
+
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ if (cpu_has_safe_index_cacheops && size >= dcache_size)
+ r4k_blast_dcache();
+ else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
+ }
+}
+
+static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ struct flush_kernel_vmap_range_args args;
+
+ args.vaddr = (unsigned long) vaddr;
+ args.size = size;
+
+ r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+}
+
static inline void rm7k_erratum31(void)
{
const unsigned long ic_lsize = 32;
flush_cache_page = r4k_flush_cache_page;
flush_cache_range = r4k_flush_cache_range;
+ __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range;
+
flush_cache_sigtramp = r4k_flush_cache_sigtramp;
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
}
}
+static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
+{
+ BUG();
+}
+
static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
unsigned long end;
flush_icache_range = tx39_flush_icache_range;
local_flush_icache_range = tx39_flush_icache_range;
+ __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
+
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
flush_data_cache_page = tx39_flush_data_cache_page;
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);
+void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
+
+EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
+
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);