2 * arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
15 #include <asm/glue-cache.h>
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
18 #include <asm/outercache.h>
19 #include <asm/rodata.h>
21 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
24 * This flag is used to indicate that the page pointed to by a pte is clean
25 * and does not require cleaning before returning it to the user.
27 #define PG_dcache_clean PG_arch_1
33 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
34 * implement these methods.
36 * Start addresses are inclusive and end addresses are exclusive;
37 * start addresses should be rounded down, end addresses up.
39 * See Documentation/cachetlb.txt for more information.
40 * Please note that the implementation of these, and the required
41 * effects are cache-type (VIVT/VIPT/PIPT) specific.
45 * Unconditionally clean and invalidate the entire icache.
46 * Currently only needed for cache-v6.S and cache-v7.S, see
47 * __flush_icache_all for the generic implementation.
51 * Unconditionally clean and invalidate the entire cache.
55 * Flush data cache levels up to the level of unification
56 * inner shareable and invalidate the I-cache.
57 * Only needed from v7 onwards, falls back to flush_cache_all()
58 * for all other processor versions.
62 * Clean and invalidate all user space cache entries
63 * before a change of page tables.
65 * flush_user_range(start, end, flags)
67 * Clean and invalidate a range of cache entries in the
68 * specified address space before a change of page tables.
69 * - start - user start address (inclusive, page aligned)
70 * - end - user end address (exclusive, page aligned)
71 * - flags - vma->vm_flags field
73 * coherent_kern_range(start, end)
75 * Ensure coherency between the Icache and the Dcache in the
76 * region described by start, end. If you have non-snooping
77 * Harvard caches, you need to implement this function.
78 * - start - virtual start address
79 * - end - virtual end address
81 * coherent_user_range(start, end)
83 * Ensure coherency between the Icache and the Dcache in the
84 * region described by start, end. If you have non-snooping
85 * Harvard caches, you need to implement this function.
86 * - start - virtual start address
87 * - end - virtual end address
89 * flush_kern_dcache_area(kaddr, size)
91 * Ensure that the data held in page is written back.
92 * - kaddr - page address
93 * - size - region size
98 * dma_flush_range(start, end)
100 * Clean and invalidate the specified virtual address range.
101 * - start - virtual start address
102 * - end - virtual end address
105 struct cpu_cache_fns
{
106 void (*flush_icache_all
)(void);
107 void (*flush_kern_all
)(void);
108 void (*flush_kern_louis
)(void);
109 void (*flush_user_all
)(void);
110 void (*flush_user_range
)(unsigned long, unsigned long, unsigned int);
112 void (*coherent_kern_range
)(unsigned long, unsigned long);
113 int (*coherent_user_range
)(unsigned long, unsigned long);
114 void (*flush_kern_dcache_area
)(void *, size_t);
116 void (*dma_map_area
)(const void *, size_t, int);
117 void (*dma_unmap_area
)(const void *, size_t, int);
119 void (*dma_flush_range
)(const void *, const void *);
123 * Select the calling method
127 extern struct cpu_cache_fns cpu_cache
;
129 #define __cpuc_flush_icache_all cpu_cache.flush_icache_all
130 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
131 #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
132 #define __cpuc_flush_user_all cpu_cache.flush_user_all
133 #define __cpuc_flush_user_range cpu_cache.flush_user_range
134 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
135 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
136 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
139 * These are private to the dma-mapping API. Do not use directly.
140 * Their sole purpose is to ensure that data held in the cache
141 * is visible to DMA, or data written by DMA to system memory is
142 * visible to the CPU.
144 #define dmac_map_area cpu_cache.dma_map_area
145 #define dmac_unmap_area cpu_cache.dma_unmap_area
146 #define dmac_flush_range cpu_cache.dma_flush_range
150 extern void __cpuc_flush_icache_all(void);
151 extern void __cpuc_flush_kern_all(void);
152 extern void __cpuc_flush_kern_louis(void);
153 extern void __cpuc_flush_user_all(void);
154 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
155 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
156 extern int __cpuc_coherent_user_range(unsigned long, unsigned long);
157 extern void __cpuc_flush_dcache_area(void *, size_t);
160 * These are private to the dma-mapping API. Do not use directly.
161 * Their sole purpose is to ensure that data held in the cache
162 * is visible to DMA, or data written by DMA to system memory is
163 * visible to the CPU.
165 extern void dmac_map_area(const void *, size_t, int);
166 extern void dmac_unmap_area(const void *, size_t, int);
167 extern void dmac_flush_range(const void *, const void *);
172 * Copy user data from/to a page which is mapped into a different
173 * processes address space. Really, we want to allow our "user
174 * space" model to handle this.
176 extern void copy_to_user_page(struct vm_area_struct
*, struct page
*,
177 unsigned long, void *, const void *, unsigned long);
178 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
180 memcpy(dst, src, len); \
184 * Convert calls to our calling convention.
187 /* Invalidate I-cache */
188 #ifdef CONFIG_ARM_ERRATA_831171
189 #define __flush_icache_all_generic() \
191 asm("mcr p15, 0, %0, c7, c5, 0" \
193 asm("mcr p15, 0, %0, c7, c5, 0" \
197 #define __flush_icache_all_generic() \
198 asm("mcr p15, 0, %0, c7, c5, 0" \
202 /* Invalidate I-cache inner shareable */
203 #ifdef CONFIG_ARM_ERRATA_831171
204 #define __flush_icache_all_v7_smp() \
206 asm("mcr p15, 0, %0, c7, c1, 0" \
208 asm("mcr p15, 0, %0, c7, c1, 0" \
212 #define __flush_icache_all_v7_smp() \
213 asm("mcr p15, 0, %0, c7, c1, 0" \
217 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
218 * will fall through to use __flush_icache_all_generic.
220 #if (defined(CONFIG_CPU_V7) && \
221 (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \
222 defined(CONFIG_SMP_ON_UP)
223 #define __flush_icache_preferred __cpuc_flush_icache_all
224 #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
225 #define __flush_icache_preferred __flush_icache_all_v7_smp
226 #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
227 #define __flush_icache_preferred __cpuc_flush_icache_all
229 #define __flush_icache_preferred __flush_icache_all_generic
232 static inline void __flush_icache_all(void)
234 __flush_icache_preferred();
239 * Flush caches up to Level of Unification Inner Shareable
241 #define flush_cache_louis() __cpuc_flush_kern_louis()
243 #define flush_cache_all() __cpuc_flush_kern_all()
245 static inline void vivt_flush_cache_mm(struct mm_struct
*mm
)
247 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
248 __cpuc_flush_user_all();
252 vivt_flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
254 struct mm_struct
*mm
= vma
->vm_mm
;
256 if (!mm
|| cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
)))
257 __cpuc_flush_user_range(start
& PAGE_MASK
, PAGE_ALIGN(end
),
262 vivt_flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
)
264 struct mm_struct
*mm
= vma
->vm_mm
;
266 if (!mm
|| cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm
))) {
267 unsigned long addr
= user_addr
& PAGE_MASK
;
268 __cpuc_flush_user_range(addr
, addr
+ PAGE_SIZE
, vma
->vm_flags
);
272 #ifndef CONFIG_CPU_CACHE_VIPT
273 #define flush_cache_mm(mm) \
274 vivt_flush_cache_mm(mm)
275 #define flush_cache_range(vma,start,end) \
276 vivt_flush_cache_range(vma,start,end)
277 #define flush_cache_page(vma,addr,pfn) \
278 vivt_flush_cache_page(vma,addr,pfn)
280 extern void flush_cache_mm(struct mm_struct
*mm
);
281 extern void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
);
282 extern void flush_cache_page(struct vm_area_struct
*vma
, unsigned long user_addr
, unsigned long pfn
);
285 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
288 * flush_cache_user_range is used when we want to ensure that the
289 * Harvard caches are synchronised for the user space address range.
290 * This is used for the ARM private sys_cacheflush system call.
292 #define flush_cache_user_range(start,end) \
293 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
296 * Perform necessary cache operations to ensure that data previously
297 * stored within this range of addresses can be executed by the CPU.
299 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
302 * Perform necessary cache operations to ensure that the TLB will
303 * see data written in the specified area.
305 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
308 * flush_dcache_page is used when the kernel has written to the page
309 * cache page at virtual address page->virtual.
311 * If this page isn't mapped (ie, page_mapping == NULL), or it might
312 * have userspace mappings, then we _must_ always clean + invalidate
313 * the dcache entries associated with the kernel mapping.
315 * Otherwise we can defer the operation, and clean the cache when we are
316 * about to change to user space. This is the same method as used on SPARC64.
317 * See update_mmu_cache for the user space part.
319 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
320 extern void flush_dcache_page(struct page
*);
322 static inline void flush_kernel_vmap_range(void *addr
, int size
)
324 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
325 __cpuc_flush_dcache_area(addr
, (size_t)size
);
327 static inline void invalidate_kernel_vmap_range(void *addr
, int size
)
329 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
330 __cpuc_flush_dcache_area(addr
, (size_t)size
);
333 #define ARCH_HAS_FLUSH_ANON_PAGE
334 static inline void flush_anon_page(struct vm_area_struct
*vma
,
335 struct page
*page
, unsigned long vmaddr
)
337 extern void __flush_anon_page(struct vm_area_struct
*vma
,
338 struct page
*, unsigned long);
340 __flush_anon_page(vma
, page
, vmaddr
);
343 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
344 extern void flush_kernel_dcache_page(struct page
*);
346 #define flush_dcache_mmap_lock(mapping) \
347 spin_lock_irq(&(mapping)->tree_lock)
348 #define flush_dcache_mmap_unlock(mapping) \
349 spin_unlock_irq(&(mapping)->tree_lock)
351 #define flush_icache_user_range(vma,page,addr,len) \
352 flush_dcache_page(page)
355 * We don't appear to need to do anything here. In fact, if we did, we'd
356 * duplicate cache flushing elsewhere performed by flush_dcache_page().
358 #define flush_icache_page(vma,page) do { } while (0)
361 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
362 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
363 * caches, since the direct-mappings of these pages may contain cached
364 * data, we need to do a full cache flush to ensure that writebacks
365 * don't corrupt data placed into these pages via the new mappings.
367 static inline void flush_cache_vmap(unsigned long start
, unsigned long end
)
369 if (!cache_is_vipt_nonaliasing())
373 * set_pte_at() called from vmap_pte_range() does not
374 * have a DSB after cleaning the cache line.
379 static inline void flush_cache_vunmap(unsigned long start
, unsigned long end
)
381 if (!cache_is_vipt_nonaliasing())
386 * Memory synchronization helpers for mixed cached vs non cached accesses.
388 * Some synchronization algorithms have to set states in memory with the
389 * cache enabled or disabled depending on the code path. It is crucial
390 * to always ensure proper cache maintenance to update main memory right
393 * Any cached write must be followed by a cache clean operation.
394 * Any cached read must be preceded by a cache invalidate operation.
395 * Yet, in the read case, a cache flush i.e. atomic clean+invalidate
396 * operation is needed to avoid discarding possible concurrent writes to the
399 * Also, in order to prevent a cached writer from interfering with an
400 * adjacent non-cached writer, each state variable must be located to
401 * a separate cache line.
405 * This needs to be >= the max cache writeback size of all
406 * supported platforms included in the current kernel configuration.
407 * This is used to align state variables to their own cache lines.
409 #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */
410 #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER)
413 * There is no __cpuc_clean_dcache_area but we use it anyway for
414 * code intent clarity, and alias it to __cpuc_flush_dcache_area.
416 #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area
419 * Ensure preceding writes to *p by this CPU are visible to
420 * subsequent reads by other CPUs:
422 static inline void __sync_cache_range_w(volatile void *p
, size_t size
)
424 char *_p
= (char *)p
;
426 __cpuc_clean_dcache_area(_p
, size
);
427 outer_clean_range(__pa(_p
), __pa(_p
+ size
));
431 * Ensure preceding writes to *p by other CPUs are visible to
432 * subsequent reads by this CPU. We must be careful not to
433 * discard data simultaneously written by another CPU, hence the
434 * usage of flush rather than invalidate operations.
436 static inline void __sync_cache_range_r(volatile void *p
, size_t size
)
438 char *_p
= (char *)p
;
440 #ifdef CONFIG_OUTER_CACHE
441 if (outer_cache
.flush_range
) {
443 * Ensure dirty data migrated from other CPUs into our cache
444 * are cleaned out safely before the outer cache is cleaned:
446 __cpuc_clean_dcache_area(_p
, size
);
448 /* Clean and invalidate stale data for *p from outer ... */
449 outer_flush_range(__pa(_p
), __pa(_p
+ size
));
453 /* ... and inner cache: */
454 __cpuc_flush_dcache_area(_p
, size
);
457 #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
458 #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))