Merge tag 'ecryptfs-3.8-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / include / asm / tlbflush.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/tlbflush.h
1da177e4
LT
3 *
4 * Copyright (C) 1999-2003 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef _ASMARM_TLBFLUSH_H
11#define _ASMARM_TLBFLUSH_H
12
58e9c47f 13#ifdef CONFIG_MMU
0157903e 14
1da177e4
LT
15#include <asm/glue.h>
16
17#define TLB_V3_PAGE (1 << 0)
18#define TLB_V4_U_PAGE (1 << 1)
19#define TLB_V4_D_PAGE (1 << 2)
20#define TLB_V4_I_PAGE (1 << 3)
21#define TLB_V6_U_PAGE (1 << 4)
22#define TLB_V6_D_PAGE (1 << 5)
23#define TLB_V6_I_PAGE (1 << 6)
24
25#define TLB_V3_FULL (1 << 8)
26#define TLB_V4_U_FULL (1 << 9)
27#define TLB_V4_D_FULL (1 << 10)
28#define TLB_V4_I_FULL (1 << 11)
29#define TLB_V6_U_FULL (1 << 12)
30#define TLB_V6_D_FULL (1 << 13)
31#define TLB_V6_I_FULL (1 << 14)
32
33#define TLB_V6_U_ASID (1 << 16)
34#define TLB_V6_D_ASID (1 << 17)
35#define TLB_V6_I_ASID (1 << 18)
36
faa7bc51
CM
37/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
38#define TLB_V7_UIS_PAGE (1 << 19)
39#define TLB_V7_UIS_FULL (1 << 20)
40#define TLB_V7_UIS_ASID (1 << 21)
41
4348810a 42#define TLB_BARRIER (1 << 28)
99c6dc11 43#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
1da177e4
LT
44#define TLB_DCLEAN (1 << 30)
45#define TLB_WB (1 << 31)
46
47/*
48 * MMU TLB Model
49 * =============
50 *
51 * We have the following to choose from:
52 * v3 - ARMv3
53 * v4 - ARMv4 without write buffer
54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
99c6dc11 56 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
4348810a 57 * fa - Faraday (v4 with write buffer with UTLB)
1da177e4 58 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
61db7fb1 59 * v7wbi - identical to v6wbi
1da177e4
LT
60 */
61#undef _TLB
62#undef MULTI_TLB
63
f00ec48f
RK
64#ifdef CONFIG_SMP_ON_UP
65#define MULTI_TLB 1
66#endif
67
1da177e4
LT
68#define v4_tlb_flags (TLB_V4_U_FULL | TLB_V4_U_PAGE)
69
70#ifdef CONFIG_CPU_TLB_V4WT
71# define v4_possible_flags v4_tlb_flags
72# define v4_always_flags v4_tlb_flags
73# ifdef _TLB
74# define MULTI_TLB 1
75# else
76# define _TLB v4
77# endif
78#else
79# define v4_possible_flags 0
80# define v4_always_flags (-1UL)
81#endif
82
4348810a 83#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
28853ac8
PZ
84 TLB_V4_U_FULL | TLB_V4_U_PAGE)
85
86#ifdef CONFIG_CPU_TLB_FA
87# define fa_possible_flags fa_tlb_flags
88# define fa_always_flags fa_tlb_flags
89# ifdef _TLB
90# define MULTI_TLB 1
91# else
92# define _TLB fa
93# endif
94#else
95# define fa_possible_flags 0
96# define fa_always_flags (-1UL)
97#endif
98
1da177e4
LT
99#define v4wbi_tlb_flags (TLB_WB | TLB_DCLEAN | \
100 TLB_V4_I_FULL | TLB_V4_D_FULL | \
101 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
102
103#ifdef CONFIG_CPU_TLB_V4WBI
104# define v4wbi_possible_flags v4wbi_tlb_flags
105# define v4wbi_always_flags v4wbi_tlb_flags
106# ifdef _TLB
107# define MULTI_TLB 1
108# else
109# define _TLB v4wbi
110# endif
111#else
112# define v4wbi_possible_flags 0
113# define v4wbi_always_flags (-1UL)
114#endif
115
99c6dc11
LB
116#define fr_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_L2CLEAN_FR | \
117 TLB_V4_I_FULL | TLB_V4_D_FULL | \
118 TLB_V4_I_PAGE | TLB_V4_D_PAGE)
119
120#ifdef CONFIG_CPU_TLB_FEROCEON
121# define fr_possible_flags fr_tlb_flags
122# define fr_always_flags fr_tlb_flags
123# ifdef _TLB
124# define MULTI_TLB 1
125# else
126# define _TLB v4wbi
127# endif
128#else
129# define fr_possible_flags 0
130# define fr_always_flags (-1UL)
131#endif
132
1da177e4
LT
133#define v4wb_tlb_flags (TLB_WB | TLB_DCLEAN | \
134 TLB_V4_I_FULL | TLB_V4_D_FULL | \
135 TLB_V4_D_PAGE)
136
137#ifdef CONFIG_CPU_TLB_V4WB
138# define v4wb_possible_flags v4wb_tlb_flags
139# define v4wb_always_flags v4wb_tlb_flags
140# ifdef _TLB
141# define MULTI_TLB 1
142# else
143# define _TLB v4wb
144# endif
145#else
146# define v4wb_possible_flags 0
147# define v4wb_always_flags (-1UL)
148#endif
149
4348810a 150#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
1da177e4
LT
151 TLB_V6_I_FULL | TLB_V6_D_FULL | \
152 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
153 TLB_V6_I_ASID | TLB_V6_D_ASID)
154
155#ifdef CONFIG_CPU_TLB_V6
156# define v6wbi_possible_flags v6wbi_tlb_flags
157# define v6wbi_always_flags v6wbi_tlb_flags
158# ifdef _TLB
159# define MULTI_TLB 1
160# else
161# define _TLB v6wbi
162# endif
163#else
164# define v6wbi_possible_flags 0
165# define v6wbi_always_flags (-1UL)
166#endif
167
4348810a 168#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
faa7bc51 169 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
4348810a 170#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
faa7bc51 171 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
faa7bc51 172
2ccdd1e7 173#ifdef CONFIG_CPU_TLB_V7
f00ec48f
RK
174
175# ifdef CONFIG_SMP_ON_UP
176# define v7wbi_possible_flags (v7wbi_tlb_flags_smp | v7wbi_tlb_flags_up)
177# define v7wbi_always_flags (v7wbi_tlb_flags_smp & v7wbi_tlb_flags_up)
178# elif defined(CONFIG_SMP)
179# define v7wbi_possible_flags v7wbi_tlb_flags_smp
180# define v7wbi_always_flags v7wbi_tlb_flags_smp
181# else
182# define v7wbi_possible_flags v7wbi_tlb_flags_up
183# define v7wbi_always_flags v7wbi_tlb_flags_up
184# endif
2ccdd1e7
CM
185# ifdef _TLB
186# define MULTI_TLB 1
187# else
188# define _TLB v7wbi
189# endif
190#else
191# define v7wbi_possible_flags 0
192# define v7wbi_always_flags (-1UL)
193#endif
194
1da177e4
LT
195#ifndef _TLB
196#error Unknown TLB model
197#endif
198
199#ifndef __ASSEMBLY__
200
e8edc6e0
AD
201#include <linux/sched.h>
202
1da177e4
LT
203struct cpu_tlb_fns {
204 void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
205 void (*flush_kern_range)(unsigned long, unsigned long);
206 unsigned long tlb_flags;
207};
208
209/*
210 * Select the calling method
211 */
212#ifdef MULTI_TLB
213
214#define __cpu_flush_user_tlb_range cpu_tlb.flush_user_range
215#define __cpu_flush_kern_tlb_range cpu_tlb.flush_kern_range
216
217#else
218
219#define __cpu_flush_user_tlb_range __glue(_TLB,_flush_user_tlb_range)
220#define __cpu_flush_kern_tlb_range __glue(_TLB,_flush_kern_tlb_range)
221
222extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
223extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
224
225#endif
226
227extern struct cpu_tlb_fns cpu_tlb;
228
229#define __cpu_tlb_flags cpu_tlb.tlb_flags
230
231/*
232 * TLB Management
233 * ==============
234 *
235 * The arch/arm/mm/tlb-*.S files implement these methods.
236 *
237 * The TLB specific code is expected to perform whatever tests it
238 * needs to determine if it should invalidate the TLB for each
239 * call. Start addresses are inclusive and end addresses are
240 * exclusive; it is safe to round these addresses down.
241 *
242 * flush_tlb_all()
243 *
244 * Invalidate the entire TLB.
245 *
246 * flush_tlb_mm(mm)
247 *
248 * Invalidate all TLB entries in a particular address
249 * space.
250 * - mm - mm_struct describing address space
251 *
252 * flush_tlb_range(mm,start,end)
253 *
254 * Invalidate a range of TLB entries in the specified
255 * address space.
256 * - mm - mm_struct describing address space
257 * - start - start address (may not be aligned)
258 * - end - end address (exclusive, may not be aligned)
259 *
260 * flush_tlb_page(vaddr,vma)
261 *
262 * Invalidate the specified page in the specified address range.
263 * - vaddr - virtual address (may not be aligned)
264 * - vma - vma_struct describing address range
265 *
266 * flush_kern_tlb_page(kaddr)
267 *
268 * Invalidate the TLB entry for the specified page. The address
269 * will be in the kernels virtual memory space. Current uses
270 * only require the D-TLB to be invalidated.
271 * - kaddr - Kernel virtual memory address
272 */
273
274/*
275 * We optimise the code below by:
276 * - building a set of TLB flags that might be set in __cpu_tlb_flags
277 * - building a set of TLB flags that will always be set in __cpu_tlb_flags
278 * - if we're going to need __cpu_tlb_flags, access it once and only once
279 *
280 * This allows us to build optimal assembly for the single-CPU type case,
281 * and as close to optimal given the compiler constrants for multi-CPU
282 * case. We could do better for the multi-CPU case if the compiler
283 * implemented the "%?" method, but this has been discontinued due to too
284 * many people getting it wrong.
285 */
357c9c1f 286#define possible_tlb_flags (v4_possible_flags | \
1da177e4 287 v4wbi_possible_flags | \
99c6dc11 288 fr_possible_flags | \
1da177e4 289 v4wb_possible_flags | \
28853ac8 290 fa_possible_flags | \
61db7fb1
PW
291 v6wbi_possible_flags | \
292 v7wbi_possible_flags)
1da177e4 293
357c9c1f 294#define always_tlb_flags (v4_always_flags & \
1da177e4 295 v4wbi_always_flags & \
99c6dc11 296 fr_always_flags & \
1da177e4 297 v4wb_always_flags & \
28853ac8 298 fa_always_flags & \
61db7fb1
PW
299 v6wbi_always_flags & \
300 v7wbi_always_flags)
1da177e4
LT
301
302#define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
303
87067a93
RK
304#define __tlb_op(f, insnarg, arg) \
305 do { \
306 if (always_tlb_flags & (f)) \
307 asm("mcr " insnarg \
308 : : "r" (arg) : "cc"); \
309 else if (possible_tlb_flags & (f)) \
310 asm("tst %1, %2\n\t" \
311 "mcrne " insnarg \
312 : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \
313 : "cc"); \
314 } while (0)
315
316#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
317#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
318
603fff54 319static inline void local_flush_tlb_all(void)
1da177e4
LT
320{
321 const int zero = 0;
322 const unsigned int __tlb_flag = __cpu_tlb_flags;
323
324 if (tlb_flag(TLB_WB))
e6a5d66f 325 dsb();
1da177e4 326
87067a93
RK
327 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
328 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
329 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
330 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
331 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
e6a5d66f 332
4348810a 333 if (tlb_flag(TLB_BARRIER)) {
b8349b56
CM
334 dsb();
335 isb();
336 }
1da177e4
LT
337}
338
603fff54 339static inline void local_flush_tlb_mm(struct mm_struct *mm)
1da177e4
LT
340{
341 const int zero = 0;
342 const int asid = ASID(mm);
343 const unsigned int __tlb_flag = __cpu_tlb_flags;
344
345 if (tlb_flag(TLB_WB))
e6a5d66f 346 dsb();
1da177e4 347
87067a93
RK
348 if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
349 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
350 tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
351 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
352 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
353 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
354 }
355 put_cpu();
1da177e4 356 }
87067a93
RK
357
358 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
359 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
360 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
cdf357f1 361#ifdef CONFIG_ARM_ERRATA_720789
87067a93 362 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
cdf357f1 363#else
87067a93 364 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
cdf357f1 365#endif
e6a5d66f 366
4348810a 367 if (tlb_flag(TLB_BARRIER))
b8349b56 368 dsb();
1da177e4
LT
369}
370
371static inline void
603fff54 372local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
1da177e4
LT
373{
374 const int zero = 0;
375 const unsigned int __tlb_flag = __cpu_tlb_flags;
376
377 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
378
379 if (tlb_flag(TLB_WB))
e6a5d66f 380 dsb();
1da177e4 381
87067a93
RK
382 if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
383 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
384 tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
386 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
387 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
1da177e4 388 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
6a39dd62 389 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
1da177e4
LT
390 }
391
87067a93
RK
392 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
393 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
394 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
cdf357f1 395#ifdef CONFIG_ARM_ERRATA_720789
87067a93 396 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
cdf357f1 397#else
87067a93 398 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr);
cdf357f1 399#endif
e6a5d66f 400
4348810a 401 if (tlb_flag(TLB_BARRIER))
b8349b56 402 dsb();
1da177e4
LT
403}
404
603fff54 405static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
1da177e4
LT
406{
407 const int zero = 0;
408 const unsigned int __tlb_flag = __cpu_tlb_flags;
409
410 kaddr &= PAGE_MASK;
411
412 if (tlb_flag(TLB_WB))
e6a5d66f 413 dsb();
1da177e4 414
87067a93
RK
415 tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
416 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
417 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
418 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
1da177e4 419 if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL))
6a39dd62 420 asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc");
1da177e4 421
87067a93
RK
422 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
423 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
424 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
425 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
6a0e2430 426
4348810a 427 if (tlb_flag(TLB_BARRIER)) {
b8349b56
CM
428 dsb();
429 isb();
430 }
1da177e4
LT
431}
432
433/*
434 * flush_pmd_entry
435 *
436 * Flush a PMD entry (word aligned, or double-word aligned) to
437 * RAM if the TLB for the CPU we are running on requires this.
438 * This is typically used when we are creating PMD entries.
439 *
440 * clean_pmd_entry
441 *
442 * Clean (but don't drain the write buffer) if the CPU requires
443 * these operations. This is typically used when we are removing
444 * PMD entries.
445 */
442e70c0 446static inline void flush_pmd_entry(void *pmd)
1da177e4 447{
1da177e4
LT
448 const unsigned int __tlb_flag = __cpu_tlb_flags;
449
87067a93
RK
450 tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
451 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
99c6dc11 452
1da177e4 453 if (tlb_flag(TLB_WB))
e6a5d66f 454 dsb();
1da177e4
LT
455}
456
442e70c0 457static inline void clean_pmd_entry(void *pmd)
1da177e4
LT
458{
459 const unsigned int __tlb_flag = __cpu_tlb_flags;
460
87067a93
RK
461 tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd);
462 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
1da177e4
LT
463}
464
87067a93 465#undef tlb_op
1da177e4
LT
466#undef tlb_flag
467#undef always_tlb_flags
468#undef possible_tlb_flags
469
470/*
471 * Convert calls to our calling convention.
472 */
603fff54
RK
473#define local_flush_tlb_range(vma,start,end) __cpu_flush_user_tlb_range(start,end,vma)
474#define local_flush_tlb_kernel_range(s,e) __cpu_flush_kern_tlb_range(s,e)
475
476#ifndef CONFIG_SMP
477#define flush_tlb_all local_flush_tlb_all
478#define flush_tlb_mm local_flush_tlb_mm
479#define flush_tlb_page local_flush_tlb_page
480#define flush_tlb_kernel_page local_flush_tlb_kernel_page
481#define flush_tlb_range local_flush_tlb_range
482#define flush_tlb_kernel_range local_flush_tlb_kernel_range
483#else
484extern void flush_tlb_all(void);
485extern void flush_tlb_mm(struct mm_struct *mm);
486extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr);
487extern void flush_tlb_kernel_page(unsigned long kaddr);
488extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
489extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
490#endif
1da177e4
LT
491
492/*
c0177800 493 * If PG_dcache_clean is not set for the page, we need to ensure that any
1da177e4 494 * cache entries for the kernels virtual memory range are written
6012191a
CM
495 * back to the page. On ARMv6 and later, the cache coherency is handled via
496 * the set_pte_at() function.
1da177e4 497 */
6012191a 498#if __LINUX_ARM_ARCH__ < 6
4b3073e1
RK
499extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
500 pte_t *ptep);
6012191a
CM
501#else
502static inline void update_mmu_cache(struct vm_area_struct *vma,
503 unsigned long addr, pte_t *ptep)
504{
505}
506#endif
1da177e4 507
1da177e4
LT
508#endif
509
0157903e
HC
510#endif /* CONFIG_MMU */
511
1da177e4 512#endif