Commit | Line | Data |
---|---|---|
f1a0c4aa CM |
1 | /* |
2 | * Cache maintenance | |
3 | * | |
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/linkage.h> | |
21 | #include <linux/init.h> | |
22 | #include <asm/assembler.h> | |
23 | ||
24 | #include "proc-macros.S" | |
25 | ||
26 | /* | |
27 | * __flush_dcache_all() | |
28 | * | |
29 | * Flush the whole D-cache. | |
30 | * | |
31 | * Corrupted registers: x0-x7, x9-x11 | |
32 | */ | |
33 | ENTRY(__flush_dcache_all) | |
34 | dsb sy // ensure ordering with previous memory accesses | |
35 | mrs x0, clidr_el1 // read clidr | |
36 | and x3, x0, #0x7000000 // extract loc from clidr | |
37 | lsr x3, x3, #23 // left align loc bit field | |
38 | cbz x3, finished // if loc is 0, then no need to clean | |
39 | mov x10, #0 // start clean at cache level 0 | |
40 | loop1: | |
41 | add x2, x10, x10, lsr #1 // work out 3x current cache level | |
42 | lsr x1, x0, x2 // extract cache type bits from clidr | |
43 | and x1, x1, #7 // mask of the bits for current cache only | |
44 | cmp x1, #2 // see what cache we have at this level | |
45 | b.lt skip // skip if no cache, or just i-cache | |
46 | save_and_disable_irqs x9 // make CSSELR and CCSIDR access atomic | |
47 | msr csselr_el1, x10 // select current cache level in csselr | |
48 | isb // isb to sych the new cssr&csidr | |
49 | mrs x1, ccsidr_el1 // read the new ccsidr | |
50 | restore_irqs x9 | |
51 | and x2, x1, #7 // extract the length of the cache lines | |
52 | add x2, x2, #4 // add 4 (line length offset) | |
53 | mov x4, #0x3ff | |
54 | and x4, x4, x1, lsr #3 // find maximum number on the way size | |
b4fed079 | 55 | clz w5, w4 // find bit position of way size increment |
f1a0c4aa CM |
56 | mov x7, #0x7fff |
57 | and x7, x7, x1, lsr #13 // extract max number of the index size | |
58 | loop2: | |
59 | mov x9, x4 // create working copy of max way size | |
60 | loop3: | |
61 | lsl x6, x9, x5 | |
62 | orr x11, x10, x6 // factor way and cache number into x11 | |
63 | lsl x6, x7, x2 | |
64 | orr x11, x11, x6 // factor index number into x11 | |
65 | dc cisw, x11 // clean & invalidate by set/way | |
66 | subs x9, x9, #1 // decrement the way | |
67 | b.ge loop3 | |
68 | subs x7, x7, #1 // decrement the index | |
69 | b.ge loop2 | |
70 | skip: | |
71 | add x10, x10, #2 // increment cache number | |
72 | cmp x3, x10 | |
73 | b.gt loop1 | |
74 | finished: | |
75 | mov x10, #0 // swith back to cache level 0 | |
76 | msr csselr_el1, x10 // select current cache level in csselr | |
77 | dsb sy | |
78 | isb | |
79 | ret | |
80 | ENDPROC(__flush_dcache_all) | |
81 | ||
82 | /* | |
83 | * flush_cache_all() | |
84 | * | |
85 | * Flush the entire cache system. The data cache flush is now achieved | |
86 | * using atomic clean / invalidates working outwards from L1 cache. This | |
87 | * is done using Set/Way based cache maintainance instructions. The | |
88 | * instruction cache can still be invalidated back to the point of | |
89 | * unification in a single instruction. | |
90 | */ | |
91 | ENTRY(flush_cache_all) | |
92 | mov x12, lr | |
93 | bl __flush_dcache_all | |
94 | mov x0, #0 | |
95 | ic ialluis // I+BTB cache invalidate | |
96 | ret x12 | |
97 | ENDPROC(flush_cache_all) | |
98 | ||
99 | /* | |
100 | * flush_icache_range(start,end) | |
101 | * | |
102 | * Ensure that the I and D caches are coherent within specified region. | |
103 | * This is typically used when code has been written to a memory region, | |
104 | * and will be executed. | |
105 | * | |
106 | * - start - virtual start address of region | |
107 | * - end - virtual end address of region | |
108 | */ | |
109 | ENTRY(flush_icache_range) | |
110 | /* FALLTHROUGH */ | |
111 | ||
112 | /* | |
113 | * __flush_cache_user_range(start,end) | |
114 | * | |
115 | * Ensure that the I and D caches are coherent within specified region. | |
116 | * This is typically used when code has been written to a memory region, | |
117 | * and will be executed. | |
118 | * | |
119 | * - start - virtual start address of region | |
120 | * - end - virtual end address of region | |
121 | */ | |
122 | ENTRY(__flush_cache_user_range) | |
123 | dcache_line_size x2, x3 | |
124 | sub x3, x2, #1 | |
125 | bic x4, x0, x3 | |
126 | 1: | |
6fa3eb70 S |
127 | #ifdef CONFIG_ARM_ERRATA_824069 |
128 | USER(9f, dc civac, x4 ) // clean & invalidate D line / unified line | |
129 | #else | |
f1a0c4aa | 130 | USER(9f, dc cvau, x4 ) // clean D line to PoU |
6fa3eb70 | 131 | #endif |
f1a0c4aa CM |
132 | add x4, x4, x2 |
133 | cmp x4, x1 | |
134 | b.lo 1b | |
135 | dsb sy | |
136 | ||
137 | icache_line_size x2, x3 | |
138 | sub x3, x2, #1 | |
139 | bic x4, x0, x3 | |
140 | 1: | |
141 | USER(9f, ic ivau, x4 ) // invalidate I line PoU | |
142 | add x4, x4, x2 | |
143 | cmp x4, x1 | |
144 | b.lo 1b | |
145 | 9: // ignore any faulting cache operation | |
146 | dsb sy | |
147 | isb | |
148 | ret | |
149 | ENDPROC(flush_icache_range) | |
150 | ENDPROC(__flush_cache_user_range) | |
151 | ||
152 | /* | |
153 | * __flush_kern_dcache_page(kaddr) | |
154 | * | |
155 | * Ensure that the data held in the page kaddr is written back to the | |
156 | * page in question. | |
157 | * | |
158 | * - kaddr - kernel address | |
159 | * - size - size in question | |
160 | */ | |
161 | ENTRY(__flush_dcache_area) | |
162 | dcache_line_size x2, x3 | |
163 | add x1, x0, x1 | |
164 | sub x3, x2, #1 | |
165 | bic x0, x0, x3 | |
166 | 1: dc civac, x0 // clean & invalidate D line / unified line | |
167 | add x0, x0, x2 | |
168 | cmp x0, x1 | |
169 | b.lo 1b | |
170 | dsb sy | |
171 | ret | |
172 | ENDPROC(__flush_dcache_area) | |
6fa3eb70 S |
173 | |
174 | /* | |
175 | * __dma_inv_range(start, end) | |
176 | * - start - virtual start address of region | |
177 | * - end - virtual end address of region | |
178 | */ | |
179 | __dma_inv_range: | |
180 | dcache_line_size x2, x3 | |
181 | sub x3, x2, #1 | |
182 | tst x1, x3 // end cache line aligned? | |
183 | bic x1, x1, x3 | |
184 | b.eq 1f | |
185 | dc civac, x1 // clean & invalidate D / U line | |
186 | 1: tst x0, x3 // start cache line aligned? | |
187 | bic x0, x0, x3 | |
188 | b.eq 2f | |
189 | dc civac, x0 // clean & invalidate D / U line | |
190 | b 3f | |
191 | 2: dc ivac, x0 // invalidate D / U line | |
192 | 3: add x0, x0, x2 | |
193 | cmp x0, x1 | |
194 | b.lo 2b | |
195 | dsb sy | |
196 | ret | |
197 | ENDPROC(__dma_inv_range) | |
198 | ||
199 | /* | |
200 | * __dma_clean_range(start, end) | |
201 | * - start - virtual start address of region | |
202 | * - end - virtual end address of region | |
203 | */ | |
204 | ENTRY(__dma_clean_range) | |
205 | __dma_clean_range: | |
206 | dcache_line_size x2, x3 | |
207 | sub x3, x2, #1 | |
208 | bic x0, x0, x3 | |
209 | #ifdef CONFIG_ARM_ERRATA_824069 | |
210 | 1: dc civac, x0 // clean & invalidate D / U line | |
211 | #else | |
212 | 1: dc cvac, x0 // clean D / U line | |
213 | #endif | |
214 | add x0, x0, x2 | |
215 | cmp x0, x1 | |
216 | b.lo 1b | |
217 | dsb sy | |
218 | ret | |
219 | ENDPROC(__dma_clean_range) | |
220 | ||
221 | /* | |
222 | * __dma_flush_range(start, end) | |
223 | * - start - virtual start address of region | |
224 | * - end - virtual end address of region | |
225 | */ | |
226 | ENTRY(__dma_flush_range) | |
227 | dcache_line_size x2, x3 | |
228 | sub x3, x2, #1 | |
229 | bic x0, x0, x3 | |
230 | 1: dc civac, x0 // clean & invalidate D / U line | |
231 | add x0, x0, x2 | |
232 | cmp x0, x1 | |
233 | b.lo 1b | |
234 | dsb sy | |
235 | ret | |
236 | ENDPROC(__dma_flush_range) | |
237 | ||
238 | /* | |
239 | * __dma_map_area(start, size, dir) | |
240 | * - start - kernel virtual start address | |
241 | * - size - size of region | |
242 | * - dir - DMA direction | |
243 | */ | |
244 | ENTRY(__dma_map_area) | |
245 | add x1, x1, x0 | |
246 | cmp w2, #DMA_FROM_DEVICE | |
247 | b.eq __dma_inv_range | |
248 | b __dma_clean_range | |
249 | ENDPROC(__dma_map_area) | |
250 | ||
251 | /* | |
252 | * __dma_unmap_area(start, size, dir) | |
253 | * - start - kernel virtual start address | |
254 | * - size - size of region | |
255 | * - dir - DMA direction | |
256 | */ | |
257 | ENTRY(__dma_unmap_area) | |
258 | add x1, x1, x0 | |
259 | cmp w2, #DMA_TO_DEVICE | |
260 | b.ne __dma_inv_range | |
261 | ret | |
262 | ENDPROC(__dma_unmap_area) |