Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/flush.c | |
3 | * | |
4 | * Copyright (C) 1995-2002 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | #include <linux/module.h> | |
11 | #include <linux/mm.h> | |
12 | #include <linux/pagemap.h> | |
39af22a7 | 13 | #include <linux/highmem.h> |
1da177e4 LT |
14 | |
15 | #include <asm/cacheflush.h> | |
46097c7d | 16 | #include <asm/cachetype.h> |
7e5a69e8 | 17 | #include <asm/highmem.h> |
2ef7f3db | 18 | #include <asm/smp_plat.h> |
8d802d28 RK |
19 | #include <asm/tlbflush.h> |
20 | ||
1b2e2b73 RK |
21 | #include "mm.h" |
22 | ||
8d802d28 | 23 | #ifdef CONFIG_CPU_CACHE_VIPT |
d7b6b358 | 24 | |
481467d6 CM |
25 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
26 | { | |
de27c308 | 27 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
141fa40c | 28 | const int zero = 0; |
481467d6 | 29 | |
67ece144 | 30 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
481467d6 CM |
31 | |
32 | asm( "mcrr p15, 0, %1, %0, c14\n" | |
df71dfd4 | 33 | " mcr p15, 0, %2, c7, c10, 4" |
481467d6 | 34 | : |
141fa40c | 35 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
481467d6 CM |
36 | : "cc"); |
37 | } | |
38 | ||
c4e259c8 WD |
39 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
40 | { | |
67ece144 | 41 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
c4e259c8 WD |
42 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
43 | unsigned long to; | |
44 | ||
67ece144 RK |
45 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
46 | to = va + offset; | |
c4e259c8 WD |
47 | flush_icache_range(to, to + len); |
48 | } | |
49 | ||
d7b6b358 RK |
50 | void flush_cache_mm(struct mm_struct *mm) |
51 | { | |
52 | if (cache_is_vivt()) { | |
2f0b1926 | 53 | vivt_flush_cache_mm(mm); |
d7b6b358 RK |
54 | return; |
55 | } | |
56 | ||
57 | if (cache_is_vipt_aliasing()) { | |
58 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 59 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
60 | : |
61 | : "r" (0) | |
62 | : "cc"); | |
63 | } | |
64 | } | |
65 | ||
66 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | |
67 | { | |
68 | if (cache_is_vivt()) { | |
2f0b1926 | 69 | vivt_flush_cache_range(vma, start, end); |
d7b6b358 RK |
70 | return; |
71 | } | |
72 | ||
73 | if (cache_is_vipt_aliasing()) { | |
74 | asm( "mcr p15, 0, %0, c7, c14, 0\n" | |
df71dfd4 | 75 | " mcr p15, 0, %0, c7, c10, 4" |
d7b6b358 RK |
76 | : |
77 | : "r" (0) | |
78 | : "cc"); | |
79 | } | |
9e95922b | 80 | |
6060e8df | 81 | if (vma->vm_flags & VM_EXEC) |
9e95922b | 82 | __flush_icache_all(); |
d7b6b358 RK |
83 | } |
84 | ||
85 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | |
86 | { | |
87 | if (cache_is_vivt()) { | |
2f0b1926 | 88 | vivt_flush_cache_page(vma, user_addr, pfn); |
d7b6b358 RK |
89 | return; |
90 | } | |
91 | ||
2df341ed | 92 | if (cache_is_vipt_aliasing()) { |
d7b6b358 | 93 | flush_pfn_alias(pfn, user_addr); |
2df341ed RK |
94 | __flush_icache_all(); |
95 | } | |
9e95922b RK |
96 | |
97 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | |
98 | __flush_icache_all(); | |
d7b6b358 | 99 | } |
c4e259c8 | 100 | |
2ef7f3db | 101 | #else |
c4e259c8 WD |
102 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
103 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | |
2ef7f3db | 104 | #endif |
a188ad2b | 105 | |
2ef7f3db RK |
106 | static void flush_ptrace_access_other(void *args) |
107 | { | |
108 | __flush_icache_all(); | |
109 | } | |
2ef7f3db RK |
110 | |
111 | static | |
a188ad2b | 112 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
2ef7f3db | 113 | unsigned long uaddr, void *kaddr, unsigned long len) |
a188ad2b GD |
114 | { |
115 | if (cache_is_vivt()) { | |
2ef7f3db RK |
116 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
117 | unsigned long addr = (unsigned long)kaddr; | |
118 | __cpuc_coherent_kern_range(addr, addr + len); | |
119 | } | |
a188ad2b GD |
120 | return; |
121 | } | |
122 | ||
123 | if (cache_is_vipt_aliasing()) { | |
124 | flush_pfn_alias(page_to_pfn(page), uaddr); | |
2df341ed | 125 | __flush_icache_all(); |
a188ad2b GD |
126 | return; |
127 | } | |
128 | ||
c4e259c8 | 129 | /* VIPT non-aliasing D-cache */ |
2ef7f3db | 130 | if (vma->vm_flags & VM_EXEC) { |
a188ad2b | 131 | unsigned long addr = (unsigned long)kaddr; |
c4e259c8 WD |
132 | if (icache_is_vipt_aliasing()) |
133 | flush_icache_alias(page_to_pfn(page), uaddr, len); | |
134 | else | |
135 | __cpuc_coherent_kern_range(addr, addr + len); | |
2ef7f3db RK |
136 | if (cache_ops_need_broadcast()) |
137 | smp_call_function(flush_ptrace_access_other, | |
138 | NULL, 1); | |
a188ad2b GD |
139 | } |
140 | } | |
2ef7f3db RK |
141 | |
142 | /* | |
143 | * Copy user data from/to a page which is mapped into a different | |
144 | * processes address space. Really, we want to allow our "user | |
145 | * space" model to handle this. | |
146 | * | |
147 | * Note that this code needs to run on the current CPU. | |
148 | */ | |
149 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |
150 | unsigned long uaddr, void *dst, const void *src, | |
151 | unsigned long len) | |
152 | { | |
153 | #ifdef CONFIG_SMP | |
154 | preempt_disable(); | |
8d802d28 | 155 | #endif |
2ef7f3db RK |
156 | memcpy(dst, src, len); |
157 | flush_ptrace_access(vma, page, uaddr, dst, len); | |
158 | #ifdef CONFIG_SMP | |
159 | preempt_enable(); | |
160 | #endif | |
161 | } | |
1da177e4 | 162 | |
8830f04a | 163 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
1da177e4 | 164 | { |
1da177e4 LT |
165 | /* |
166 | * Writeback any data associated with the kernel mapping of this | |
167 | * page. This ensures that data in the physical page is mutually | |
168 | * coherent with the kernels mapping. | |
169 | */ | |
7e5a69e8 NP |
170 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | |
172 | } else { | |
173 | void *addr = kmap_high_get(page); | |
174 | if (addr) { | |
175 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | |
176 | kunmap_high(page); | |
177 | } else if (cache_is_vipt()) { | |
39af22a7 NP |
178 | /* unmapped pages might still be cached */ |
179 | addr = kmap_atomic(page); | |
7e5a69e8 | 180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
39af22a7 | 181 | kunmap_atomic(addr); |
7e5a69e8 NP |
182 | } |
183 | } | |
1da177e4 LT |
184 | |
185 | /* | |
8830f04a RK |
186 | * If this is a page cache page, and we have an aliasing VIPT cache, |
187 | * we only need to do one flush - which would be at the relevant | |
8d802d28 RK |
188 | * userspace colour, which is congruent with page->index. |
189 | */ | |
f91fb05d | 190 | if (mapping && cache_is_vipt_aliasing()) |
8830f04a RK |
191 | flush_pfn_alias(page_to_pfn(page), |
192 | page->index << PAGE_CACHE_SHIFT); | |
193 | } | |
194 | ||
195 | static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) | |
196 | { | |
197 | struct mm_struct *mm = current->active_mm; | |
198 | struct vm_area_struct *mpnt; | |
8830f04a | 199 | pgoff_t pgoff; |
8d802d28 | 200 | |
1da177e4 LT |
201 | /* |
202 | * There are possible user space mappings of this page: | |
203 | * - VIVT cache: we need to also write back and invalidate all user | |
204 | * data in the current VM view associated with this page. | |
205 | * - aliasing VIPT: we only need to find one mapping of this page. | |
206 | */ | |
207 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
208 | ||
209 | flush_dcache_mmap_lock(mapping); | |
6b2dbba8 | 210 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { |
1da177e4 LT |
211 | unsigned long offset; |
212 | ||
213 | /* | |
214 | * If this VMA is not in our MM, we can ignore it. | |
215 | */ | |
216 | if (mpnt->vm_mm != mm) | |
217 | continue; | |
218 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
219 | continue; | |
220 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
221 | flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); | |
1da177e4 LT |
222 | } |
223 | flush_dcache_mmap_unlock(mapping); | |
224 | } | |
225 | ||
6012191a CM |
226 | #if __LINUX_ARM_ARCH__ >= 6 |
227 | void __sync_icache_dcache(pte_t pteval) | |
228 | { | |
229 | unsigned long pfn; | |
230 | struct page *page; | |
231 | struct address_space *mapping; | |
232 | ||
6012191a CM |
233 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
234 | /* only flush non-aliasing VIPT caches for exec mappings */ | |
235 | return; | |
236 | pfn = pte_pfn(pteval); | |
237 | if (!pfn_valid(pfn)) | |
238 | return; | |
239 | ||
240 | page = pfn_to_page(pfn); | |
241 | if (cache_is_vipt_aliasing()) | |
242 | mapping = page_mapping(page); | |
243 | else | |
244 | mapping = NULL; | |
245 | ||
246 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | |
247 | __flush_dcache_page(mapping, page); | |
8373dc38 | 248 | |
249 | if (pte_exec(pteval)) | |
6012191a CM |
250 | __flush_icache_all(); |
251 | } | |
252 | #endif | |
253 | ||
1da177e4 LT |
254 | /* |
255 | * Ensure cache coherency between kernel mapping and userspace mapping | |
256 | * of this page. | |
257 | * | |
258 | * We have three cases to consider: | |
259 | * - VIPT non-aliasing cache: fully coherent so nothing required. | |
260 | * - VIVT: fully aliasing, so we need to handle every alias in our | |
261 | * current VM view. | |
262 | * - VIPT aliasing: need to handle one alias in our current VM view. | |
263 | * | |
264 | * If we need to handle aliasing: | |
265 | * If the page only exists in the page cache and there are no user | |
266 | * space mappings, we can be lazy and remember that we may have dirty | |
267 | * kernel cache lines for later. Otherwise, we assume we have | |
268 | * aliasing mappings. | |
df2f5e72 | 269 | * |
31bee4cf | 270 | * Note that we disable the lazy flush for SMP configurations where |
271 | * the cache maintenance operations are not automatically broadcasted. | |
1da177e4 LT |
272 | */ |
273 | void flush_dcache_page(struct page *page) | |
274 | { | |
421fe93c RK |
275 | struct address_space *mapping; |
276 | ||
277 | /* | |
278 | * The zero page is never written to, so never has any dirty | |
279 | * cache lines, and therefore never needs to be flushed. | |
280 | */ | |
281 | if (page == ZERO_PAGE(0)) | |
282 | return; | |
283 | ||
284 | mapping = page_mapping(page); | |
1da177e4 | 285 | |
85848dd7 CM |
286 | if (!cache_ops_need_broadcast() && |
287 | mapping && !mapping_mapped(mapping)) | |
c0177800 | 288 | clear_bit(PG_dcache_clean, &page->flags); |
85848dd7 | 289 | else { |
1da177e4 | 290 | __flush_dcache_page(mapping, page); |
8830f04a RK |
291 | if (mapping && cache_is_vivt()) |
292 | __flush_dcache_aliases(mapping, page); | |
826cbdaf CM |
293 | else if (mapping) |
294 | __flush_icache_all(); | |
c0177800 | 295 | set_bit(PG_dcache_clean, &page->flags); |
8830f04a | 296 | } |
1da177e4 LT |
297 | } |
298 | EXPORT_SYMBOL(flush_dcache_page); | |
6020dff0 RK |
299 | |
300 | /* | |
301 | * Flush an anonymous page so that users of get_user_pages() | |
302 | * can safely access the data. The expected sequence is: | |
303 | * | |
304 | * get_user_pages() | |
305 | * -> flush_anon_page | |
306 | * memcpy() to/from page | |
307 | * if written to page, flush_dcache_page() | |
308 | */ | |
309 | void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) | |
310 | { | |
311 | unsigned long pfn; | |
312 | ||
313 | /* VIPT non-aliasing caches need do nothing */ | |
314 | if (cache_is_vipt_nonaliasing()) | |
315 | return; | |
316 | ||
317 | /* | |
318 | * Write back and invalidate userspace mapping. | |
319 | */ | |
320 | pfn = page_to_pfn(page); | |
321 | if (cache_is_vivt()) { | |
322 | flush_cache_page(vma, vmaddr, pfn); | |
323 | } else { | |
324 | /* | |
325 | * For aliasing VIPT, we can flush an alias of the | |
326 | * userspace address only. | |
327 | */ | |
328 | flush_pfn_alias(pfn, vmaddr); | |
2df341ed | 329 | __flush_icache_all(); |
6020dff0 RK |
330 | } |
331 | ||
332 | /* | |
333 | * Invalidate kernel mapping. No data should be contained | |
334 | * in this mapping of the page. FIXME: this is overkill | |
335 | * since we actually ask for a write-back and invalidate. | |
336 | */ | |
2c9b9c84 | 337 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
6020dff0 | 338 | } |