Merge branch 'for-linus' of git://gitorious.org/linux-omap-dss2/linux
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / include / asm / cacheflush.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/cacheflush.h
1da177e4
LT
3 *
4 * Copyright (C) 1999-2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_CACHEFLUSH_H
11#define _ASMARM_CACHEFLUSH_H
12
1da177e4
LT
13#include <linux/mm.h>
14
1da177e4 15#include <asm/glue.h>
b8a9b66f 16#include <asm/shmparam.h>
376e1421 17#include <asm/cachetype.h>
b8a9b66f
RK
18
19#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
1da177e4
LT
20
21/*
22 * Cache Model
23 * ===========
24 */
25#undef _CACHE
26#undef MULTI_CACHE
27
6cc7cbef 28#if defined(CONFIG_CPU_CACHE_V3)
1da177e4
LT
29# ifdef _CACHE
30# define MULTI_CACHE 1
31# else
32# define _CACHE v3
33# endif
34#endif
35
6cc7cbef 36#if defined(CONFIG_CPU_CACHE_V4)
1da177e4
LT
37# ifdef _CACHE
38# define MULTI_CACHE 1
39# else
40# define _CACHE v4
41# endif
42#endif
43
44#if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
1c8e170a
AWG
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
46 defined(CONFIG_CPU_ARM1026)
1da177e4
LT
47# define MULTI_CACHE 1
48#endif
49
28853ac8
PZ
50#if defined(CONFIG_CPU_FA526)
51# ifdef _CACHE
52# define MULTI_CACHE 1
53# else
54# define _CACHE fa
55# endif
56#endif
57
1da177e4
LT
58#if defined(CONFIG_CPU_ARM926T)
59# ifdef _CACHE
60# define MULTI_CACHE 1
61# else
62# define _CACHE arm926
63# endif
64#endif
65
d60674eb
HC
66#if defined(CONFIG_CPU_ARM940T)
67# ifdef _CACHE
68# define MULTI_CACHE 1
69# else
70# define _CACHE arm940
71# endif
72#endif
73
f37f46eb
HC
74#if defined(CONFIG_CPU_ARM946E)
75# ifdef _CACHE
76# define MULTI_CACHE 1
77# else
78# define _CACHE arm946
79# endif
80#endif
81
6cc7cbef 82#if defined(CONFIG_CPU_CACHE_V4WB)
1da177e4
LT
83# ifdef _CACHE
84# define MULTI_CACHE 1
85# else
86# define _CACHE v4wb
87# endif
88#endif
89
90#if defined(CONFIG_CPU_XSCALE)
91# ifdef _CACHE
92# define MULTI_CACHE 1
93# else
94# define _CACHE xscale
95# endif
96#endif
97
23bdf86a
LB
98#if defined(CONFIG_CPU_XSC3)
99# ifdef _CACHE
100# define MULTI_CACHE 1
101# else
102# define _CACHE xsc3
103# endif
104#endif
105
49cbe786
EM
106#if defined(CONFIG_CPU_MOHAWK)
107# ifdef _CACHE
108# define MULTI_CACHE 1
109# else
110# define _CACHE mohawk
111# endif
112#endif
113
e50d6409 114#if defined(CONFIG_CPU_FEROCEON)
836a8051 115# define MULTI_CACHE 1
e50d6409
AH
116#endif
117
1da177e4
LT
118#if defined(CONFIG_CPU_V6)
119//# ifdef _CACHE
120# define MULTI_CACHE 1
121//# else
122//# define _CACHE v6
123//# endif
124#endif
125
bbe88886
CM
126#if defined(CONFIG_CPU_V7)
127//# ifdef _CACHE
128# define MULTI_CACHE 1
129//# else
130//# define _CACHE v7
131//# endif
132#endif
133
1da177e4
LT
134#if !defined(_CACHE) && !defined(MULTI_CACHE)
135#error Unknown cache maintainence model
136#endif
137
138/*
139 * This flag is used to indicate that the page pointed to by a pte
140 * is dirty and requires cleaning before returning it to the user.
141 */
142#define PG_dcache_dirty PG_arch_1
143
144/*
145 * MM Cache Management
146 * ===================
147 *
148 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
149 * implement these methods.
150 *
151 * Start addresses are inclusive and end addresses are exclusive;
152 * start addresses should be rounded down, end addresses up.
153 *
154 * See Documentation/cachetlb.txt for more information.
155 * Please note that the implementation of these, and the required
156 * effects are cache-type (VIVT/VIPT/PIPT) specific.
157 *
2045124f 158 * flush_kern_all()
1da177e4
LT
159 *
160 * Unconditionally clean and invalidate the entire cache.
161 *
2045124f 162 * flush_user_all()
1da177e4
LT
163 *
164 * Clean and invalidate all user space cache entries
165 * before a change of page tables.
166 *
2045124f 167 * flush_user_range(start, end, flags)
1da177e4
LT
168 *
169 * Clean and invalidate a range of cache entries in the
170 * specified address space before a change of page tables.
171 * - start - user start address (inclusive, page aligned)
172 * - end - user end address (exclusive, page aligned)
173 * - flags - vma->vm_flags field
174 *
175 * coherent_kern_range(start, end)
176 *
177 * Ensure coherency between the Icache and the Dcache in the
178 * region described by start, end. If you have non-snooping
179 * Harvard caches, you need to implement this function.
180 * - start - virtual start address
181 * - end - virtual end address
182 *
2045124f
TL
183 * coherent_user_range(start, end)
184 *
185 * Ensure coherency between the Icache and the Dcache in the
186 * region described by start, end. If you have non-snooping
187 * Harvard caches, you need to implement this function.
188 * - start - virtual start address
189 * - end - virtual end address
190 *
191 * flush_kern_dcache_area(kaddr, size)
192 *
193 * Ensure that the data held in page is written back.
194 * - kaddr - page address
195 * - size - region size
196 *
1da177e4
LT
197 * DMA Cache Coherency
198 * ===================
199 *
1da177e4
LT
200 * dma_flush_range(start, end)
201 *
202 * Clean and invalidate the specified virtual address range.
203 * - start - virtual start address
204 * - end - virtual end address
205 */
206
207struct cpu_cache_fns {
208 void (*flush_kern_all)(void);
209 void (*flush_user_all)(void);
210 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
211
212 void (*coherent_kern_range)(unsigned long, unsigned long);
213 void (*coherent_user_range)(unsigned long, unsigned long);
2c9b9c84 214 void (*flush_kern_dcache_area)(void *, size_t);
1da177e4 215
a9c9147e
RK
216 void (*dma_map_area)(const void *, size_t, int);
217 void (*dma_unmap_area)(const void *, size_t, int);
1da177e4 218
7ae5a761 219 void (*dma_flush_range)(const void *, const void *);
1da177e4
LT
220};
221
953233dc
CM
222struct outer_cache_fns {
223 void (*inv_range)(unsigned long, unsigned long);
224 void (*clean_range)(unsigned long, unsigned long);
225 void (*flush_range)(unsigned long, unsigned long);
226};
227
1da177e4
LT
228/*
229 * Select the calling method
230 */
231#ifdef MULTI_CACHE
232
233extern struct cpu_cache_fns cpu_cache;
234
235#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
236#define __cpuc_flush_user_all cpu_cache.flush_user_all
237#define __cpuc_flush_user_range cpu_cache.flush_user_range
238#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
239#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
2c9b9c84 240#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
1da177e4
LT
241
242/*
243 * These are private to the dma-mapping API. Do not use directly.
244 * Their sole purpose is to ensure that data held in the cache
245 * is visible to DMA, or data written by DMA to system memory is
246 * visible to the CPU.
247 */
a9c9147e
RK
248#define dmac_map_area cpu_cache.dma_map_area
249#define dmac_unmap_area cpu_cache.dma_unmap_area
1da177e4
LT
250#define dmac_flush_range cpu_cache.dma_flush_range
251
252#else
253
254#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
255#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
256#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
257#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
258#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
2c9b9c84 259#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
1da177e4
LT
260
261extern void __cpuc_flush_kern_all(void);
262extern void __cpuc_flush_user_all(void);
263extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
264extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
265extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
2c9b9c84 266extern void __cpuc_flush_dcache_area(void *, size_t);
1da177e4
LT
267
268/*
269 * These are private to the dma-mapping API. Do not use directly.
270 * Their sole purpose is to ensure that data held in the cache
271 * is visible to DMA, or data written by DMA to system memory is
272 * visible to the CPU.
273 */
a9c9147e
RK
274#define dmac_map_area __glue(_CACHE,_dma_map_area)
275#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
1da177e4
LT
276#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
277
a9c9147e
RK
278extern void dmac_map_area(const void *, size_t, int);
279extern void dmac_unmap_area(const void *, size_t, int);
7ae5a761 280extern void dmac_flush_range(const void *, const void *);
1da177e4
LT
281
282#endif
283
953233dc
CM
284#ifdef CONFIG_OUTER_CACHE
285
286extern struct outer_cache_fns outer_cache;
287
288static inline void outer_inv_range(unsigned long start, unsigned long end)
289{
290 if (outer_cache.inv_range)
291 outer_cache.inv_range(start, end);
292}
293static inline void outer_clean_range(unsigned long start, unsigned long end)
294{
295 if (outer_cache.clean_range)
296 outer_cache.clean_range(start, end);
297}
298static inline void outer_flush_range(unsigned long start, unsigned long end)
299{
300 if (outer_cache.flush_range)
301 outer_cache.flush_range(start, end);
302}
303
304#else
305
306static inline void outer_inv_range(unsigned long start, unsigned long end)
307{ }
308static inline void outer_clean_range(unsigned long start, unsigned long end)
309{ }
310static inline void outer_flush_range(unsigned long start, unsigned long end)
311{ }
312
313#endif
314
1da177e4
LT
315/*
316 * Copy user data from/to a page which is mapped into a different
317 * processes address space. Really, we want to allow our "user
318 * space" model to handle this.
319 */
2ef7f3db
RK
320extern void copy_to_user_page(struct vm_area_struct *, struct page *,
321 unsigned long, void *, const void *, unsigned long);
1da177e4
LT
322#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
323 do { \
1da177e4
LT
324 memcpy(dst, src, len); \
325 } while (0)
326
327/*
328 * Convert calls to our calling convention.
329 */
330#define flush_cache_all() __cpuc_flush_kern_all()
2f0b1926
RK
331
332static inline void vivt_flush_cache_mm(struct mm_struct *mm)
1da177e4 333{
56f8ba83 334 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
1da177e4
LT
335 __cpuc_flush_user_all();
336}
337
338static inline void
2f0b1926 339vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1da177e4 340{
56f8ba83 341 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
1da177e4
LT
342 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
343 vma->vm_flags);
344}
345
346static inline void
2f0b1926 347vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
1da177e4 348{
56f8ba83 349 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
1da177e4
LT
350 unsigned long addr = user_addr & PAGE_MASK;
351 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
352 }
353}
a188ad2b 354
2f0b1926
RK
355#ifndef CONFIG_CPU_CACHE_VIPT
356#define flush_cache_mm(mm) \
357 vivt_flush_cache_mm(mm)
358#define flush_cache_range(vma,start,end) \
359 vivt_flush_cache_range(vma,start,end)
360#define flush_cache_page(vma,addr,pfn) \
361 vivt_flush_cache_page(vma,addr,pfn)
d7b6b358
RK
362#else
363extern void flush_cache_mm(struct mm_struct *mm);
364extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
365extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
366#endif
1da177e4 367
ec8c0446
RB
368#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
369
1da177e4
LT
370/*
371 * flush_cache_user_range is used when we want to ensure that the
372 * Harvard caches are synchronised for the user space address range.
373 * This is used for the ARM private sys_cacheflush system call.
374 */
375#define flush_cache_user_range(vma,start,end) \
376 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
377
378/*
379 * Perform necessary cache operations to ensure that data previously
380 * stored within this range of addresses can be executed by the CPU.
381 */
382#define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
383
384/*
385 * Perform necessary cache operations to ensure that the TLB will
386 * see data written in the specified area.
387 */
388#define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
389
390/*
391 * flush_dcache_page is used when the kernel has written to the page
392 * cache page at virtual address page->virtual.
393 *
394 * If this page isn't mapped (ie, page_mapping == NULL), or it might
395 * have userspace mappings, then we _must_ always clean + invalidate
396 * the dcache entries associated with the kernel mapping.
397 *
398 * Otherwise we can defer the operation, and clean the cache when we are
399 * about to change to user space. This is the same method as used on SPARC64.
400 * See update_mmu_cache for the user space part.
401 */
2d4dc890 402#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
1da177e4
LT
403extern void flush_dcache_page(struct page *);
404
826cbdaf
CM
405static inline void __flush_icache_all(void)
406{
df71dfd4
RK
407#ifdef CONFIG_ARM_ERRATA_411920
408 extern void v6_icache_inval_all(void);
409 v6_icache_inval_all();
410#else
826cbdaf
CM
411 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
412 :
413 : "r" (0));
df71dfd4 414#endif
826cbdaf 415}
252a9aff
JB
416static inline void flush_kernel_vmap_range(void *addr, int size)
417{
418 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
419 __cpuc_flush_dcache_area(addr, (size_t)size);
420}
421static inline void invalidate_kernel_vmap_range(void *addr, int size)
422{
423 if ((cache_is_vivt() || cache_is_vipt_aliasing()))
424 __cpuc_flush_dcache_area(addr, (size_t)size);
425}
826cbdaf 426
6020dff0
RK
427#define ARCH_HAS_FLUSH_ANON_PAGE
428static inline void flush_anon_page(struct vm_area_struct *vma,
429 struct page *page, unsigned long vmaddr)
430{
431 extern void __flush_anon_page(struct vm_area_struct *vma,
432 struct page *, unsigned long);
433 if (PageAnon(page))
434 __flush_anon_page(vma, page, vmaddr);
435}
436
73be1591
NP
437#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
438static inline void flush_kernel_dcache_page(struct page *page)
439{
440 /* highmem pages are always flushed upon kunmap already */
441 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
2c9b9c84 442 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
73be1591
NP
443}
444
1da177e4 445#define flush_dcache_mmap_lock(mapping) \
19fd6231 446 spin_lock_irq(&(mapping)->tree_lock)
1da177e4 447#define flush_dcache_mmap_unlock(mapping) \
19fd6231 448 spin_unlock_irq(&(mapping)->tree_lock)
1da177e4
LT
449
450#define flush_icache_user_range(vma,page,addr,len) \
451 flush_dcache_page(page)
452
453/*
454 * We don't appear to need to do anything here. In fact, if we did, we'd
455 * duplicate cache flushing elsewhere performed by flush_dcache_page().
456 */
457#define flush_icache_page(vma,page) do { } while (0)
458
376e1421
CM
459/*
460 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
461 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
462 * caches, since the direct-mappings of these pages may contain cached
463 * data, we need to do a full cache flush to ensure that writebacks
464 * don't corrupt data placed into these pages via the new mappings.
465 */
466static inline void flush_cache_vmap(unsigned long start, unsigned long end)
467{
468 if (!cache_is_vipt_nonaliasing())
469 flush_cache_all();
470 else
471 /*
472 * set_pte_at() called from vmap_pte_range() does not
473 * have a DSB after cleaning the cache line.
474 */
475 dsb();
476}
477
478static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
479{
480 if (!cache_is_vipt_nonaliasing())
481 flush_cache_all();
482}
483
1da177e4 484#endif