Merge branch 'next/cleanup-samsung' into next/cleanup-samsung-2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / include / asm / tlb.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/tlb.h
1da177e4
LT
3 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
0157903e
HC
21
22#ifndef CONFIG_MMU
23
24#include <linux/pagemap.h>
58e9c47f
RK
25
26#define tlb_flush(tlb) ((void) tlb)
27
0157903e
HC
28#include <asm-generic/tlb.h>
29
30#else /* !CONFIG_MMU */
31
06824ba8 32#include <linux/swap.h>
1da177e4 33#include <asm/pgalloc.h>
06824ba8
RK
34#include <asm/tlbflush.h>
35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
06824ba8
RK
44#else
45#define tlb_fast_mode(tlb) 1
06824ba8 46#endif
1da177e4 47
9e14f674
PZ
48#define MMU_GATHER_BUNDLE 8
49
1da177e4
LT
50/*
51 * TLB handling. This allows us to remove pages from the page
52 * tables, and efficiently handle the TLB issues.
53 */
54struct mmu_gather {
55 struct mm_struct *mm;
1da177e4 56 unsigned int fullmm;
06824ba8 57 struct vm_area_struct *vma;
7fccfc00
AK
58 unsigned long range_start;
59 unsigned long range_end;
06824ba8 60 unsigned int nr;
9e14f674
PZ
61 unsigned int max;
62 struct page **pages;
63 struct page *local[MMU_GATHER_BUNDLE];
1da177e4
LT
64};
65
66DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
67
06824ba8
RK
68/*
69 * This is unnecessarily complex. There's three ways the TLB shootdown
70 * code is used:
71 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
72 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
73 * tlb->vma will be non-NULL.
74 * 2. Unmapping all vmas. See exit_mmap().
75 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
76 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
77 * 3. Unmapping argument pages. See shift_arg_pages().
78 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
79 * tlb->vma will be NULL.
80 */
81static inline void tlb_flush(struct mmu_gather *tlb)
82{
83 if (tlb->fullmm || !tlb->vma)
84 flush_tlb_mm(tlb->mm);
85 else if (tlb->range_end > 0) {
86 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
87 tlb->range_start = TASK_SIZE;
88 tlb->range_end = 0;
89 }
90}
91
92static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
93{
94 if (!tlb->fullmm) {
95 if (addr < tlb->range_start)
96 tlb->range_start = addr;
97 if (addr + PAGE_SIZE > tlb->range_end)
98 tlb->range_end = addr + PAGE_SIZE;
99 }
100}
101
9e14f674
PZ
102static inline void __tlb_alloc_page(struct mmu_gather *tlb)
103{
104 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
105
106 if (addr) {
107 tlb->pages = (void *)addr;
108 tlb->max = PAGE_SIZE / sizeof(struct page *);
109 }
110}
111
06824ba8
RK
112static inline void tlb_flush_mmu(struct mmu_gather *tlb)
113{
114 tlb_flush(tlb);
115 if (!tlb_fast_mode(tlb)) {
116 free_pages_and_swap_cache(tlb->pages, tlb->nr);
117 tlb->nr = 0;
9e14f674
PZ
118 if (tlb->pages == tlb->local)
119 __tlb_alloc_page(tlb);
06824ba8
RK
120 }
121}
122
9e14f674
PZ
123static inline void
124tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
1da177e4 125{
1da177e4 126 tlb->mm = mm;
9e14f674 127 tlb->fullmm = fullmm;
06824ba8 128 tlb->vma = NULL;
9e14f674
PZ
129 tlb->max = ARRAY_SIZE(tlb->local);
130 tlb->pages = tlb->local;
06824ba8 131 tlb->nr = 0;
9e14f674 132 __tlb_alloc_page(tlb);
1da177e4
LT
133}
134
135static inline void
136tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
137{
06824ba8 138 tlb_flush_mmu(tlb);
1da177e4
LT
139
140 /* keep the page table cache within bounds */
141 check_pgt_cache();
15a23ffa 142
9e14f674
PZ
143 if (tlb->pages != tlb->local)
144 free_pages((unsigned long)tlb->pages, 0);
1da177e4
LT
145}
146
7fccfc00
AK
147/*
148 * Memorize the range for the TLB flush.
149 */
150static inline void
151tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
152{
06824ba8 153 tlb_add_flush(tlb, addr);
7fccfc00 154}
1da177e4
LT
155
156/*
157 * In the case of tlb vma handling, we can optimise these away in the
158 * case where we're doing a full MM flush. When we're doing a munmap,
159 * the vmas are adjusted to only cover the region to be torn down.
160 */
161static inline void
162tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
163{
7fccfc00 164 if (!tlb->fullmm) {
1da177e4 165 flush_cache_range(vma, vma->vm_start, vma->vm_end);
06824ba8 166 tlb->vma = vma;
7fccfc00
AK
167 tlb->range_start = TASK_SIZE;
168 tlb->range_end = 0;
169 }
1da177e4
LT
170}
171
172static inline void
173tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
174{
06824ba8
RK
175 if (!tlb->fullmm)
176 tlb_flush(tlb);
177}
178
9e14f674 179static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
06824ba8
RK
180{
181 if (tlb_fast_mode(tlb)) {
182 free_page_and_swap_cache(page);
9e14f674 183 return 1; /* avoid calling tlb_flush_mmu */
06824ba8 184 }
9e14f674
PZ
185
186 tlb->pages[tlb->nr++] = page;
187 VM_BUG_ON(tlb->nr > tlb->max);
188 return tlb->max - tlb->nr;
189}
190
191static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
192{
193 if (!__tlb_remove_page(tlb, page))
194 tlb_flush_mmu(tlb);
06824ba8
RK
195}
196
197static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
198 unsigned long addr)
199{
200 pgtable_page_dtor(pte);
6d3ec1ae 201
df547e08
WD
202#ifdef CONFIG_ARM_LPAE
203 tlb_add_flush(tlb, addr);
204#else
6d3ec1ae
CM
205 /*
206 * With the classic ARM MMU, a pte page has two corresponding pmd
207 * entries, each covering 1MB.
208 */
209 addr &= PMD_MASK;
210 tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
211 tlb_add_flush(tlb, addr + SZ_1M);
df547e08 212#endif
6d3ec1ae 213
06824ba8 214 tlb_remove_page(tlb, pte);
1da177e4
LT
215}
216
c9f27f10
CM
217static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
218 unsigned long addr)
219{
220#ifdef CONFIG_ARM_LPAE
221 tlb_add_flush(tlb, addr);
222 tlb_remove_page(tlb, virt_to_page(pmdp));
223#endif
224}
225
06824ba8 226#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
c9f27f10 227#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
a32618d2 228#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
1da177e4
LT
229
230#define tlb_migrate_finish(mm) do { } while (0)
231
0157903e 232#endif /* CONFIG_MMU */
1da177e4 233#endif