Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/cacheflush.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #ifndef _ASMARM_CACHEFLUSH_H | |
11 | #define _ASMARM_CACHEFLUSH_H | |
12 | ||
1da177e4 LT |
13 | #include <linux/mm.h> |
14 | ||
753790e7 | 15 | #include <asm/glue-cache.h> |
b8a9b66f | 16 | #include <asm/shmparam.h> |
376e1421 | 17 | #include <asm/cachetype.h> |
33f663ff | 18 | #include <asm/outercache.h> |
6fa3eb70 | 19 | #include <asm/rodata.h> |
b8a9b66f RK |
20 | |
21 | #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) | |
1da177e4 | 22 | |
1da177e4 | 23 | /* |
c0177800 CM |
24 | * This flag is used to indicate that the page pointed to by a pte is clean |
25 | * and does not require cleaning before returning it to the user. | |
1da177e4 | 26 | */ |
c0177800 | 27 | #define PG_dcache_clean PG_arch_1 |
1da177e4 LT |
28 | |
29 | /* | |
30 | * MM Cache Management | |
31 | * =================== | |
32 | * | |
33 | * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files | |
34 | * implement these methods. | |
35 | * | |
36 | * Start addresses are inclusive and end addresses are exclusive; | |
37 | * start addresses should be rounded down, end addresses up. | |
38 | * | |
39 | * See Documentation/cachetlb.txt for more information. | |
40 | * Please note that the implementation of these, and the required | |
41 | * effects are cache-type (VIVT/VIPT/PIPT) specific. | |
42 | * | |
81d11955 TL |
43 | * flush_icache_all() |
44 | * | |
45 | * Unconditionally clean and invalidate the entire icache. | |
46 | * Currently only needed for cache-v6.S and cache-v7.S, see | |
47 | * __flush_icache_all for the generic implementation. | |
48 | * | |
2045124f | 49 | * flush_kern_all() |
1da177e4 LT |
50 | * |
51 | * Unconditionally clean and invalidate the entire cache. | |
52 | * | |
031bd879 LP |
53 | * flush_kern_louis() |
54 | * | |
55 | * Flush data cache levels up to the level of unification | |
56 | * inner shareable and invalidate the I-cache. | |
57 | * Only needed from v7 onwards, falls back to flush_cache_all() | |
58 | * for all other processor versions. | |
59 | * | |
2045124f | 60 | * flush_user_all() |
1da177e4 LT |
61 | * |
62 | * Clean and invalidate all user space cache entries | |
63 | * before a change of page tables. | |
64 | * | |
2045124f | 65 | * flush_user_range(start, end, flags) |
1da177e4 LT |
66 | * |
67 | * Clean and invalidate a range of cache entries in the | |
68 | * specified address space before a change of page tables. | |
69 | * - start - user start address (inclusive, page aligned) | |
70 | * - end - user end address (exclusive, page aligned) | |
71 | * - flags - vma->vm_flags field | |
72 | * | |
73 | * coherent_kern_range(start, end) | |
74 | * | |
75 | * Ensure coherency between the Icache and the Dcache in the | |
76 | * region described by start, end. If you have non-snooping | |
77 | * Harvard caches, you need to implement this function. | |
78 | * - start - virtual start address | |
79 | * - end - virtual end address | |
80 | * | |
2045124f TL |
81 | * coherent_user_range(start, end) |
82 | * | |
83 | * Ensure coherency between the Icache and the Dcache in the | |
84 | * region described by start, end. If you have non-snooping | |
85 | * Harvard caches, you need to implement this function. | |
86 | * - start - virtual start address | |
87 | * - end - virtual end address | |
88 | * | |
89 | * flush_kern_dcache_area(kaddr, size) | |
90 | * | |
91 | * Ensure that the data held in page is written back. | |
92 | * - kaddr - page address | |
93 | * - size - region size | |
94 | * | |
1da177e4 LT |
95 | * DMA Cache Coherency |
96 | * =================== | |
97 | * | |
1da177e4 LT |
98 | * dma_flush_range(start, end) |
99 | * | |
100 | * Clean and invalidate the specified virtual address range. | |
101 | * - start - virtual start address | |
102 | * - end - virtual end address | |
103 | */ | |
104 | ||
105 | struct cpu_cache_fns { | |
81d11955 | 106 | void (*flush_icache_all)(void); |
1da177e4 | 107 | void (*flush_kern_all)(void); |
031bd879 | 108 | void (*flush_kern_louis)(void); |
1da177e4 LT |
109 | void (*flush_user_all)(void); |
110 | void (*flush_user_range)(unsigned long, unsigned long, unsigned int); | |
111 | ||
112 | void (*coherent_kern_range)(unsigned long, unsigned long); | |
c5102f59 | 113 | int (*coherent_user_range)(unsigned long, unsigned long); |
2c9b9c84 | 114 | void (*flush_kern_dcache_area)(void *, size_t); |
1da177e4 | 115 | |
a9c9147e RK |
116 | void (*dma_map_area)(const void *, size_t, int); |
117 | void (*dma_unmap_area)(const void *, size_t, int); | |
1da177e4 | 118 | |
7ae5a761 | 119 | void (*dma_flush_range)(const void *, const void *); |
1da177e4 LT |
120 | }; |
121 | ||
122 | /* | |
123 | * Select the calling method | |
124 | */ | |
125 | #ifdef MULTI_CACHE | |
126 | ||
127 | extern struct cpu_cache_fns cpu_cache; | |
128 | ||
81d11955 | 129 | #define __cpuc_flush_icache_all cpu_cache.flush_icache_all |
1da177e4 | 130 | #define __cpuc_flush_kern_all cpu_cache.flush_kern_all |
031bd879 | 131 | #define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis |
1da177e4 LT |
132 | #define __cpuc_flush_user_all cpu_cache.flush_user_all |
133 | #define __cpuc_flush_user_range cpu_cache.flush_user_range | |
134 | #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range | |
135 | #define __cpuc_coherent_user_range cpu_cache.coherent_user_range | |
2c9b9c84 | 136 | #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area |
1da177e4 LT |
137 | |
138 | /* | |
139 | * These are private to the dma-mapping API. Do not use directly. | |
140 | * Their sole purpose is to ensure that data held in the cache | |
141 | * is visible to DMA, or data written by DMA to system memory is | |
142 | * visible to the CPU. | |
143 | */ | |
a9c9147e | 144 | #define dmac_map_area cpu_cache.dma_map_area |
753790e7 | 145 | #define dmac_unmap_area cpu_cache.dma_unmap_area |
1da177e4 LT |
146 | #define dmac_flush_range cpu_cache.dma_flush_range |
147 | ||
148 | #else | |
149 | ||
81d11955 | 150 | extern void __cpuc_flush_icache_all(void); |
1da177e4 | 151 | extern void __cpuc_flush_kern_all(void); |
031bd879 | 152 | extern void __cpuc_flush_kern_louis(void); |
1da177e4 LT |
153 | extern void __cpuc_flush_user_all(void); |
154 | extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); | |
155 | extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); | |
c5102f59 | 156 | extern int __cpuc_coherent_user_range(unsigned long, unsigned long); |
2c9b9c84 | 157 | extern void __cpuc_flush_dcache_area(void *, size_t); |
1da177e4 LT |
158 | |
159 | /* | |
160 | * These are private to the dma-mapping API. Do not use directly. | |
161 | * Their sole purpose is to ensure that data held in the cache | |
162 | * is visible to DMA, or data written by DMA to system memory is | |
163 | * visible to the CPU. | |
164 | */ | |
a9c9147e RK |
165 | extern void dmac_map_area(const void *, size_t, int); |
166 | extern void dmac_unmap_area(const void *, size_t, int); | |
7ae5a761 | 167 | extern void dmac_flush_range(const void *, const void *); |
1da177e4 LT |
168 | |
169 | #endif | |
170 | ||
1da177e4 LT |
171 | /* |
172 | * Copy user data from/to a page which is mapped into a different | |
173 | * processes address space. Really, we want to allow our "user | |
174 | * space" model to handle this. | |
175 | */ | |
2ef7f3db RK |
176 | extern void copy_to_user_page(struct vm_area_struct *, struct page *, |
177 | unsigned long, void *, const void *, unsigned long); | |
1da177e4 LT |
178 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
179 | do { \ | |
1da177e4 LT |
180 | memcpy(dst, src, len); \ |
181 | } while (0) | |
182 | ||
183 | /* | |
184 | * Convert calls to our calling convention. | |
185 | */ | |
81d11955 TL |
186 | |
187 | /* Invalidate I-cache */ | |
6fa3eb70 S |
188 | #ifdef CONFIG_ARM_ERRATA_831171 |
189 | #define __flush_icache_all_generic() \ | |
190 | do { \ | |
191 | asm("mcr p15, 0, %0, c7, c5, 0" \ | |
192 | : : "r" (0)); \ | |
193 | asm("mcr p15, 0, %0, c7, c5, 0" \ | |
194 | : : "r" (0)); \ | |
195 | } while (0) | |
196 | #else | |
81d11955 TL |
197 | #define __flush_icache_all_generic() \ |
198 | asm("mcr p15, 0, %0, c7, c5, 0" \ | |
199 | : : "r" (0)); | |
6fa3eb70 | 200 | #endif |
81d11955 TL |
201 | |
202 | /* Invalidate I-cache inner shareable */ | |
6fa3eb70 S |
203 | #ifdef CONFIG_ARM_ERRATA_831171 |
204 | #define __flush_icache_all_v7_smp() \ | |
205 | do { \ | |
206 | asm("mcr p15, 0, %0, c7, c1, 0" \ | |
207 | : : "r" (0)); \ | |
208 | asm("mcr p15, 0, %0, c7, c1, 0" \ | |
209 | : : "r" (0)); \ | |
210 | } while (0) | |
211 | #else | |
81d11955 TL |
212 | #define __flush_icache_all_v7_smp() \ |
213 | asm("mcr p15, 0, %0, c7, c1, 0" \ | |
214 | : : "r" (0)); | |
6fa3eb70 | 215 | #endif |
81d11955 TL |
216 | /* |
217 | * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 | |
218 | * will fall through to use __flush_icache_all_generic. | |
219 | */ | |
e399b1a4 RK |
220 | #if (defined(CONFIG_CPU_V7) && \ |
221 | (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ | |
81d11955 TL |
222 | defined(CONFIG_SMP_ON_UP) |
223 | #define __flush_icache_preferred __cpuc_flush_icache_all | |
224 | #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) | |
225 | #define __flush_icache_preferred __flush_icache_all_v7_smp | |
226 | #elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920) | |
227 | #define __flush_icache_preferred __cpuc_flush_icache_all | |
228 | #else | |
229 | #define __flush_icache_preferred __flush_icache_all_generic | |
230 | #endif | |
231 | ||
232 | static inline void __flush_icache_all(void) | |
233 | { | |
234 | __flush_icache_preferred(); | |
c943f5f9 | 235 | dsb(); |
81d11955 TL |
236 | } |
237 | ||
031bd879 LP |
238 | /* |
239 | * Flush caches up to Level of Unification Inner Shareable | |
240 | */ | |
241 | #define flush_cache_louis() __cpuc_flush_kern_louis() | |
242 | ||
1da177e4 | 243 | #define flush_cache_all() __cpuc_flush_kern_all() |
2f0b1926 RK |
244 | |
245 | static inline void vivt_flush_cache_mm(struct mm_struct *mm) | |
1da177e4 | 246 | { |
56f8ba83 | 247 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
1da177e4 LT |
248 | __cpuc_flush_user_all(); |
249 | } | |
250 | ||
251 | static inline void | |
2f0b1926 | 252 | vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
1da177e4 | 253 | { |
b74253f7 WD |
254 | struct mm_struct *mm = vma->vm_mm; |
255 | ||
256 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | |
1da177e4 LT |
257 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
258 | vma->vm_flags); | |
259 | } | |
260 | ||
261 | static inline void | |
2f0b1926 | 262 | vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
1da177e4 | 263 | { |
b74253f7 WD |
264 | struct mm_struct *mm = vma->vm_mm; |
265 | ||
266 | if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { | |
1da177e4 LT |
267 | unsigned long addr = user_addr & PAGE_MASK; |
268 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | |
269 | } | |
270 | } | |
a188ad2b | 271 | |
2f0b1926 RK |
272 | #ifndef CONFIG_CPU_CACHE_VIPT |
273 | #define flush_cache_mm(mm) \ | |
274 | vivt_flush_cache_mm(mm) | |
275 | #define flush_cache_range(vma,start,end) \ | |
276 | vivt_flush_cache_range(vma,start,end) | |
277 | #define flush_cache_page(vma,addr,pfn) \ | |
278 | vivt_flush_cache_page(vma,addr,pfn) | |
d7b6b358 RK |
279 | #else |
280 | extern void flush_cache_mm(struct mm_struct *mm); | |
281 | extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | |
282 | extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); | |
283 | #endif | |
1da177e4 | 284 | |
ec8c0446 RB |
285 | #define flush_cache_dup_mm(mm) flush_cache_mm(mm) |
286 | ||
1da177e4 LT |
287 | /* |
288 | * flush_cache_user_range is used when we want to ensure that the | |
289 | * Harvard caches are synchronised for the user space address range. | |
290 | * This is used for the ARM private sys_cacheflush system call. | |
291 | */ | |
4542b6a0 | 292 | #define flush_cache_user_range(start,end) \ |
1da177e4 LT |
293 | __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end)) |
294 | ||
295 | /* | |
296 | * Perform necessary cache operations to ensure that data previously | |
297 | * stored within this range of addresses can be executed by the CPU. | |
298 | */ | |
299 | #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e) | |
300 | ||
301 | /* | |
302 | * Perform necessary cache operations to ensure that the TLB will | |
303 | * see data written in the specified area. | |
304 | */ | |
305 | #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size) | |
306 | ||
307 | /* | |
308 | * flush_dcache_page is used when the kernel has written to the page | |
309 | * cache page at virtual address page->virtual. | |
310 | * | |
311 | * If this page isn't mapped (ie, page_mapping == NULL), or it might | |
312 | * have userspace mappings, then we _must_ always clean + invalidate | |
313 | * the dcache entries associated with the kernel mapping. | |
314 | * | |
315 | * Otherwise we can defer the operation, and clean the cache when we are | |
316 | * about to change to user space. This is the same method as used on SPARC64. | |
317 | * See update_mmu_cache for the user space part. | |
318 | */ | |
2d4dc890 | 319 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
1da177e4 LT |
320 | extern void flush_dcache_page(struct page *); |
321 | ||
252a9aff JB |
322 | static inline void flush_kernel_vmap_range(void *addr, int size) |
323 | { | |
324 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) | |
325 | __cpuc_flush_dcache_area(addr, (size_t)size); | |
326 | } | |
327 | static inline void invalidate_kernel_vmap_range(void *addr, int size) | |
328 | { | |
329 | if ((cache_is_vivt() || cache_is_vipt_aliasing())) | |
330 | __cpuc_flush_dcache_area(addr, (size_t)size); | |
331 | } | |
826cbdaf | 332 | |
6020dff0 RK |
333 | #define ARCH_HAS_FLUSH_ANON_PAGE |
334 | static inline void flush_anon_page(struct vm_area_struct *vma, | |
335 | struct page *page, unsigned long vmaddr) | |
336 | { | |
337 | extern void __flush_anon_page(struct vm_area_struct *vma, | |
338 | struct page *, unsigned long); | |
339 | if (PageAnon(page)) | |
340 | __flush_anon_page(vma, page, vmaddr); | |
341 | } | |
342 | ||
73be1591 | 343 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
1bc39742 | 344 | extern void flush_kernel_dcache_page(struct page *); |
73be1591 | 345 | |
1da177e4 | 346 | #define flush_dcache_mmap_lock(mapping) \ |
19fd6231 | 347 | spin_lock_irq(&(mapping)->tree_lock) |
1da177e4 | 348 | #define flush_dcache_mmap_unlock(mapping) \ |
19fd6231 | 349 | spin_unlock_irq(&(mapping)->tree_lock) |
1da177e4 LT |
350 | |
351 | #define flush_icache_user_range(vma,page,addr,len) \ | |
352 | flush_dcache_page(page) | |
353 | ||
354 | /* | |
355 | * We don't appear to need to do anything here. In fact, if we did, we'd | |
356 | * duplicate cache flushing elsewhere performed by flush_dcache_page(). | |
357 | */ | |
358 | #define flush_icache_page(vma,page) do { } while (0) | |
359 | ||
376e1421 CM |
360 | /* |
361 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | |
362 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT | |
363 | * caches, since the direct-mappings of these pages may contain cached | |
364 | * data, we need to do a full cache flush to ensure that writebacks | |
365 | * don't corrupt data placed into these pages via the new mappings. | |
366 | */ | |
367 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | |
368 | { | |
369 | if (!cache_is_vipt_nonaliasing()) | |
370 | flush_cache_all(); | |
371 | else | |
372 | /* | |
373 | * set_pte_at() called from vmap_pte_range() does not | |
374 | * have a DSB after cleaning the cache line. | |
375 | */ | |
376 | dsb(); | |
377 | } | |
378 | ||
379 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | |
380 | { | |
381 | if (!cache_is_vipt_nonaliasing()) | |
382 | flush_cache_all(); | |
383 | } | |
384 | ||
0c91e7e0 NP |
385 | /* |
386 | * Memory synchronization helpers for mixed cached vs non cached accesses. | |
387 | * | |
388 | * Some synchronization algorithms have to set states in memory with the | |
389 | * cache enabled or disabled depending on the code path. It is crucial | |
390 | * to always ensure proper cache maintenance to update main memory right | |
391 | * away in that case. | |
392 | * | |
393 | * Any cached write must be followed by a cache clean operation. | |
394 | * Any cached read must be preceded by a cache invalidate operation. | |
395 | * Yet, in the read case, a cache flush i.e. atomic clean+invalidate | |
396 | * operation is needed to avoid discarding possible concurrent writes to the | |
397 | * accessed memory. | |
398 | * | |
399 | * Also, in order to prevent a cached writer from interfering with an | |
400 | * adjacent non-cached writer, each state variable must be located to | |
401 | * a separate cache line. | |
402 | */ | |
403 | ||
404 | /* | |
405 | * This needs to be >= the max cache writeback size of all | |
406 | * supported platforms included in the current kernel configuration. | |
407 | * This is used to align state variables to their own cache lines. | |
408 | */ | |
409 | #define __CACHE_WRITEBACK_ORDER 6 /* guessed from existing platforms */ | |
410 | #define __CACHE_WRITEBACK_GRANULE (1 << __CACHE_WRITEBACK_ORDER) | |
411 | ||
412 | /* | |
413 | * There is no __cpuc_clean_dcache_area but we use it anyway for | |
414 | * code intent clarity, and alias it to __cpuc_flush_dcache_area. | |
415 | */ | |
416 | #define __cpuc_clean_dcache_area __cpuc_flush_dcache_area | |
417 | ||
418 | /* | |
419 | * Ensure preceding writes to *p by this CPU are visible to | |
420 | * subsequent reads by other CPUs: | |
421 | */ | |
422 | static inline void __sync_cache_range_w(volatile void *p, size_t size) | |
423 | { | |
424 | char *_p = (char *)p; | |
425 | ||
426 | __cpuc_clean_dcache_area(_p, size); | |
427 | outer_clean_range(__pa(_p), __pa(_p + size)); | |
428 | } | |
429 | ||
430 | /* | |
431 | * Ensure preceding writes to *p by other CPUs are visible to | |
432 | * subsequent reads by this CPU. We must be careful not to | |
433 | * discard data simultaneously written by another CPU, hence the | |
434 | * usage of flush rather than invalidate operations. | |
435 | */ | |
436 | static inline void __sync_cache_range_r(volatile void *p, size_t size) | |
437 | { | |
438 | char *_p = (char *)p; | |
439 | ||
440 | #ifdef CONFIG_OUTER_CACHE | |
441 | if (outer_cache.flush_range) { | |
442 | /* | |
443 | * Ensure dirty data migrated from other CPUs into our cache | |
444 | * are cleaned out safely before the outer cache is cleaned: | |
445 | */ | |
446 | __cpuc_clean_dcache_area(_p, size); | |
447 | ||
448 | /* Clean and invalidate stale data for *p from outer ... */ | |
449 | outer_flush_range(__pa(_p), __pa(_p + size)); | |
450 | } | |
451 | #endif | |
452 | ||
453 | /* ... and inner cache: */ | |
454 | __cpuc_flush_dcache_area(_p, size); | |
455 | } | |
456 | ||
457 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) | |
458 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) | |
459 | ||
1da177e4 | 460 | #endif |