Commit | Line | Data |
---|---|---|
382266ad CM |
1 | /* |
2 | * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support | |
3 | * | |
4 | * Copyright (C) 2007 ARM Limited | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
8c369264 | 19 | #include <linux/err.h> |
382266ad | 20 | #include <linux/init.h> |
07620976 | 21 | #include <linux/spinlock.h> |
fced80c7 | 22 | #include <linux/io.h> |
8c369264 RH |
23 | #include <linux/of.h> |
24 | #include <linux/of_address.h> | |
382266ad CM |
25 | |
26 | #include <asm/cacheflush.h> | |
382266ad CM |
27 | #include <asm/hardware/cache-l2x0.h> |
28 | ||
29 | #define CACHE_LINE_SIZE 32 | |
30 | ||
31 | static void __iomem *l2x0_base; | |
07620976 | 32 | static DEFINE_SPINLOCK(l2x0_lock); |
64039be8 | 33 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
5ba70372 | 34 | static uint32_t l2x0_size; |
382266ad | 35 | |
9a6655e4 | 36 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
382266ad | 37 | { |
9a6655e4 | 38 | /* wait for cache operation by line or way to complete */ |
6775a558 | 39 | while (readl_relaxed(reg) & mask) |
1caf3092 | 40 | cpu_relax(); |
382266ad CM |
41 | } |
42 | ||
9a6655e4 CM |
43 | #ifdef CONFIG_CACHE_PL310 |
44 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | |
45 | { | |
46 | /* cache operations by line are atomic on PL310 */ | |
47 | } | |
48 | #else | |
49 | #define cache_wait cache_wait_way | |
50 | #endif | |
51 | ||
382266ad CM |
52 | static inline void cache_sync(void) |
53 | { | |
3d107434 | 54 | void __iomem *base = l2x0_base; |
885028e4 SK |
55 | |
56 | #ifdef CONFIG_ARM_ERRATA_753970 | |
57 | /* write to an unmmapped register */ | |
58 | writel_relaxed(0, base + L2X0_DUMMY_REG); | |
59 | #else | |
6775a558 | 60 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
885028e4 | 61 | #endif |
3d107434 | 62 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
382266ad CM |
63 | } |
64 | ||
424d6b14 SS |
65 | static inline void l2x0_clean_line(unsigned long addr) |
66 | { | |
67 | void __iomem *base = l2x0_base; | |
68 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | |
6775a558 | 69 | writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); |
424d6b14 SS |
70 | } |
71 | ||
72 | static inline void l2x0_inv_line(unsigned long addr) | |
73 | { | |
74 | void __iomem *base = l2x0_base; | |
75 | cache_wait(base + L2X0_INV_LINE_PA, 1); | |
6775a558 | 76 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); |
424d6b14 SS |
77 | } |
78 | ||
2839e06c | 79 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) |
9e65582a | 80 | |
2839e06c SS |
81 | #define debug_writel(val) outer_cache.set_debug(val) |
82 | ||
83 | static void l2x0_set_debug(unsigned long val) | |
84 | { | |
85 | writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); | |
9e65582a | 86 | } |
2839e06c SS |
87 | #else |
88 | /* Optimised out for non-errata case */ | |
89 | static inline void debug_writel(unsigned long val) | |
90 | { | |
91 | } | |
92 | ||
93 | #define l2x0_set_debug NULL | |
94 | #endif | |
9e65582a | 95 | |
2839e06c | 96 | #ifdef CONFIG_PL310_ERRATA_588369 |
9e65582a SS |
97 | static inline void l2x0_flush_line(unsigned long addr) |
98 | { | |
99 | void __iomem *base = l2x0_base; | |
100 | ||
101 | /* Clean by PA followed by Invalidate by PA */ | |
102 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | |
6775a558 | 103 | writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); |
9e65582a | 104 | cache_wait(base + L2X0_INV_LINE_PA, 1); |
6775a558 | 105 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); |
9e65582a SS |
106 | } |
107 | #else | |
108 | ||
424d6b14 SS |
109 | static inline void l2x0_flush_line(unsigned long addr) |
110 | { | |
111 | void __iomem *base = l2x0_base; | |
112 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | |
6775a558 | 113 | writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); |
424d6b14 | 114 | } |
9e65582a | 115 | #endif |
424d6b14 | 116 | |
23107c54 CM |
117 | static void l2x0_cache_sync(void) |
118 | { | |
119 | unsigned long flags; | |
120 | ||
121 | spin_lock_irqsave(&l2x0_lock, flags); | |
122 | cache_sync(); | |
123 | spin_unlock_irqrestore(&l2x0_lock, flags); | |
124 | } | |
125 | ||
38a8914f | 126 | static void __l2x0_flush_all(void) |
2fd86589 | 127 | { |
2839e06c | 128 | debug_writel(0x03); |
2fd86589 TG |
129 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); |
130 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | |
131 | cache_sync(); | |
2839e06c | 132 | debug_writel(0x00); |
38a8914f WD |
133 | } |
134 | ||
135 | static void l2x0_flush_all(void) | |
136 | { | |
137 | unsigned long flags; | |
138 | ||
139 | /* clean all ways */ | |
140 | spin_lock_irqsave(&l2x0_lock, flags); | |
141 | __l2x0_flush_all(); | |
2fd86589 TG |
142 | spin_unlock_irqrestore(&l2x0_lock, flags); |
143 | } | |
144 | ||
444457c1 SS |
145 | static void l2x0_clean_all(void) |
146 | { | |
147 | unsigned long flags; | |
148 | ||
149 | /* clean all ways */ | |
150 | spin_lock_irqsave(&l2x0_lock, flags); | |
151 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | |
152 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | |
153 | cache_sync(); | |
154 | spin_unlock_irqrestore(&l2x0_lock, flags); | |
155 | } | |
156 | ||
2fd86589 | 157 | static void l2x0_inv_all(void) |
382266ad | 158 | { |
0eb948dd RK |
159 | unsigned long flags; |
160 | ||
382266ad | 161 | /* invalidate all ways */ |
0eb948dd | 162 | spin_lock_irqsave(&l2x0_lock, flags); |
2fd86589 TG |
163 | /* Invalidating when L2 is enabled is a nono */ |
164 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | |
6775a558 | 165 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
9a6655e4 | 166 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
382266ad | 167 | cache_sync(); |
0eb948dd | 168 | spin_unlock_irqrestore(&l2x0_lock, flags); |
382266ad CM |
169 | } |
170 | ||
171 | static void l2x0_inv_range(unsigned long start, unsigned long end) | |
172 | { | |
3d107434 | 173 | void __iomem *base = l2x0_base; |
0eb948dd | 174 | unsigned long flags; |
382266ad | 175 | |
0eb948dd | 176 | spin_lock_irqsave(&l2x0_lock, flags); |
4f6627ac RS |
177 | if (start & (CACHE_LINE_SIZE - 1)) { |
178 | start &= ~(CACHE_LINE_SIZE - 1); | |
9e65582a | 179 | debug_writel(0x03); |
424d6b14 | 180 | l2x0_flush_line(start); |
9e65582a | 181 | debug_writel(0x00); |
4f6627ac RS |
182 | start += CACHE_LINE_SIZE; |
183 | } | |
184 | ||
185 | if (end & (CACHE_LINE_SIZE - 1)) { | |
186 | end &= ~(CACHE_LINE_SIZE - 1); | |
9e65582a | 187 | debug_writel(0x03); |
424d6b14 | 188 | l2x0_flush_line(end); |
9e65582a | 189 | debug_writel(0x00); |
4f6627ac RS |
190 | } |
191 | ||
0eb948dd RK |
192 | while (start < end) { |
193 | unsigned long blk_end = start + min(end - start, 4096UL); | |
194 | ||
195 | while (start < blk_end) { | |
424d6b14 | 196 | l2x0_inv_line(start); |
0eb948dd RK |
197 | start += CACHE_LINE_SIZE; |
198 | } | |
199 | ||
200 | if (blk_end < end) { | |
201 | spin_unlock_irqrestore(&l2x0_lock, flags); | |
202 | spin_lock_irqsave(&l2x0_lock, flags); | |
203 | } | |
204 | } | |
3d107434 | 205 | cache_wait(base + L2X0_INV_LINE_PA, 1); |
382266ad | 206 | cache_sync(); |
0eb948dd | 207 | spin_unlock_irqrestore(&l2x0_lock, flags); |
382266ad CM |
208 | } |
209 | ||
210 | static void l2x0_clean_range(unsigned long start, unsigned long end) | |
211 | { | |
3d107434 | 212 | void __iomem *base = l2x0_base; |
0eb948dd | 213 | unsigned long flags; |
382266ad | 214 | |
444457c1 SS |
215 | if ((end - start) >= l2x0_size) { |
216 | l2x0_clean_all(); | |
217 | return; | |
218 | } | |
219 | ||
0eb948dd | 220 | spin_lock_irqsave(&l2x0_lock, flags); |
382266ad | 221 | start &= ~(CACHE_LINE_SIZE - 1); |
0eb948dd RK |
222 | while (start < end) { |
223 | unsigned long blk_end = start + min(end - start, 4096UL); | |
224 | ||
225 | while (start < blk_end) { | |
424d6b14 | 226 | l2x0_clean_line(start); |
0eb948dd RK |
227 | start += CACHE_LINE_SIZE; |
228 | } | |
229 | ||
230 | if (blk_end < end) { | |
231 | spin_unlock_irqrestore(&l2x0_lock, flags); | |
232 | spin_lock_irqsave(&l2x0_lock, flags); | |
233 | } | |
234 | } | |
3d107434 | 235 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); |
382266ad | 236 | cache_sync(); |
0eb948dd | 237 | spin_unlock_irqrestore(&l2x0_lock, flags); |
382266ad CM |
238 | } |
239 | ||
240 | static void l2x0_flush_range(unsigned long start, unsigned long end) | |
241 | { | |
3d107434 | 242 | void __iomem *base = l2x0_base; |
0eb948dd | 243 | unsigned long flags; |
382266ad | 244 | |
444457c1 SS |
245 | if ((end - start) >= l2x0_size) { |
246 | l2x0_flush_all(); | |
247 | return; | |
248 | } | |
249 | ||
0eb948dd | 250 | spin_lock_irqsave(&l2x0_lock, flags); |
382266ad | 251 | start &= ~(CACHE_LINE_SIZE - 1); |
0eb948dd RK |
252 | while (start < end) { |
253 | unsigned long blk_end = start + min(end - start, 4096UL); | |
254 | ||
9e65582a | 255 | debug_writel(0x03); |
0eb948dd | 256 | while (start < blk_end) { |
424d6b14 | 257 | l2x0_flush_line(start); |
0eb948dd RK |
258 | start += CACHE_LINE_SIZE; |
259 | } | |
9e65582a | 260 | debug_writel(0x00); |
0eb948dd RK |
261 | |
262 | if (blk_end < end) { | |
263 | spin_unlock_irqrestore(&l2x0_lock, flags); | |
264 | spin_lock_irqsave(&l2x0_lock, flags); | |
265 | } | |
266 | } | |
3d107434 | 267 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); |
382266ad | 268 | cache_sync(); |
0eb948dd | 269 | spin_unlock_irqrestore(&l2x0_lock, flags); |
382266ad CM |
270 | } |
271 | ||
2fd86589 TG |
272 | static void l2x0_disable(void) |
273 | { | |
274 | unsigned long flags; | |
275 | ||
276 | spin_lock_irqsave(&l2x0_lock, flags); | |
38a8914f WD |
277 | __l2x0_flush_all(); |
278 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | |
279 | dsb(); | |
2fd86589 TG |
280 | spin_unlock_irqrestore(&l2x0_lock, flags); |
281 | } | |
282 | ||
bac7e6ec LW |
283 | static void __init l2x0_unlock(__u32 cache_id) |
284 | { | |
285 | int lockregs; | |
286 | int i; | |
287 | ||
288 | if (cache_id == L2X0_CACHE_ID_PART_L310) | |
289 | lockregs = 8; | |
290 | else | |
291 | /* L210 and unknown types */ | |
292 | lockregs = 1; | |
293 | ||
294 | for (i = 0; i < lockregs; i++) { | |
295 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + | |
296 | i * L2X0_LOCKDOWN_STRIDE); | |
297 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + | |
298 | i * L2X0_LOCKDOWN_STRIDE); | |
299 | } | |
300 | } | |
301 | ||
382266ad CM |
302 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
303 | { | |
304 | __u32 aux; | |
64039be8 | 305 | __u32 cache_id; |
5ba70372 | 306 | __u32 way_size = 0; |
64039be8 JM |
307 | int ways; |
308 | const char *type; | |
382266ad CM |
309 | |
310 | l2x0_base = base; | |
311 | ||
6775a558 CM |
312 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
313 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | |
64039be8 | 314 | |
4082cfa7 SH |
315 | aux &= aux_mask; |
316 | aux |= aux_val; | |
317 | ||
64039be8 JM |
318 | /* Determine the number of ways */ |
319 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | |
320 | case L2X0_CACHE_ID_PART_L310: | |
321 | if (aux & (1 << 16)) | |
322 | ways = 16; | |
323 | else | |
324 | ways = 8; | |
325 | type = "L310"; | |
326 | break; | |
327 | case L2X0_CACHE_ID_PART_L210: | |
328 | ways = (aux >> 13) & 0xf; | |
329 | type = "L210"; | |
330 | break; | |
331 | default: | |
332 | /* Assume unknown chips have 8 ways */ | |
333 | ways = 8; | |
334 | type = "L2x0 series"; | |
335 | break; | |
336 | } | |
337 | ||
338 | l2x0_way_mask = (1 << ways) - 1; | |
339 | ||
5ba70372 SS |
340 | /* |
341 | * L2 cache Size = Way size * Number of ways | |
342 | */ | |
343 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | |
344 | way_size = 1 << (way_size + 3); | |
345 | l2x0_size = ways * way_size * SZ_1K; | |
346 | ||
48371cd3 SK |
347 | /* |
348 | * Check if l2x0 controller is already enabled. | |
349 | * If you are booting from non-secure mode | |
350 | * accessing the below registers will fault. | |
351 | */ | |
6775a558 | 352 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { |
bac7e6ec LW |
353 | /* Make sure that I&D is not locked down when starting */ |
354 | l2x0_unlock(cache_id); | |
382266ad | 355 | |
48371cd3 | 356 | /* l2x0 controller is disabled */ |
6775a558 | 357 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); |
382266ad | 358 | |
48371cd3 SK |
359 | l2x0_inv_all(); |
360 | ||
361 | /* enable L2X0 */ | |
6775a558 | 362 | writel_relaxed(1, l2x0_base + L2X0_CTRL); |
48371cd3 | 363 | } |
382266ad CM |
364 | |
365 | outer_cache.inv_range = l2x0_inv_range; | |
366 | outer_cache.clean_range = l2x0_clean_range; | |
367 | outer_cache.flush_range = l2x0_flush_range; | |
23107c54 | 368 | outer_cache.sync = l2x0_cache_sync; |
2fd86589 TG |
369 | outer_cache.flush_all = l2x0_flush_all; |
370 | outer_cache.inv_all = l2x0_inv_all; | |
371 | outer_cache.disable = l2x0_disable; | |
2839e06c | 372 | outer_cache.set_debug = l2x0_set_debug; |
382266ad | 373 | |
64039be8 | 374 | printk(KERN_INFO "%s cache controller enabled\n", type); |
5ba70372 SS |
375 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", |
376 | ways, cache_id, aux, l2x0_size); | |
382266ad | 377 | } |
8c369264 RH |
378 | |
379 | #ifdef CONFIG_OF | |
380 | static void __init l2x0_of_setup(const struct device_node *np, | |
381 | __u32 *aux_val, __u32 *aux_mask) | |
382 | { | |
383 | u32 data[2] = { 0, 0 }; | |
384 | u32 tag = 0; | |
385 | u32 dirty = 0; | |
386 | u32 val = 0, mask = 0; | |
387 | ||
388 | of_property_read_u32(np, "arm,tag-latency", &tag); | |
389 | if (tag) { | |
390 | mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; | |
391 | val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; | |
392 | } | |
393 | ||
394 | of_property_read_u32_array(np, "arm,data-latency", | |
395 | data, ARRAY_SIZE(data)); | |
396 | if (data[0] && data[1]) { | |
397 | mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | | |
398 | L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; | |
399 | val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | | |
400 | ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); | |
401 | } | |
402 | ||
403 | of_property_read_u32(np, "arm,dirty-latency", &dirty); | |
404 | if (dirty) { | |
405 | mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; | |
406 | val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; | |
407 | } | |
408 | ||
409 | *aux_val &= ~mask; | |
410 | *aux_val |= val; | |
411 | *aux_mask &= ~mask; | |
412 | } | |
413 | ||
414 | static void __init pl310_of_setup(const struct device_node *np, | |
415 | __u32 *aux_val, __u32 *aux_mask) | |
416 | { | |
417 | u32 data[3] = { 0, 0, 0 }; | |
418 | u32 tag[3] = { 0, 0, 0 }; | |
419 | u32 filter[2] = { 0, 0 }; | |
420 | ||
421 | of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); | |
422 | if (tag[0] && tag[1] && tag[2]) | |
423 | writel_relaxed( | |
424 | ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | |
425 | ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | |
426 | ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | |
427 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | |
428 | ||
429 | of_property_read_u32_array(np, "arm,data-latency", | |
430 | data, ARRAY_SIZE(data)); | |
431 | if (data[0] && data[1] && data[2]) | |
432 | writel_relaxed( | |
433 | ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | |
434 | ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | |
435 | ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | |
436 | l2x0_base + L2X0_DATA_LATENCY_CTRL); | |
437 | ||
438 | of_property_read_u32_array(np, "arm,filter-ranges", | |
439 | filter, ARRAY_SIZE(filter)); | |
74d41f39 | 440 | if (filter[1]) { |
8c369264 RH |
441 | writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), |
442 | l2x0_base + L2X0_ADDR_FILTER_END); | |
443 | writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, | |
444 | l2x0_base + L2X0_ADDR_FILTER_START); | |
445 | } | |
446 | } | |
447 | ||
448 | static const struct of_device_id l2x0_ids[] __initconst = { | |
449 | { .compatible = "arm,pl310-cache", .data = pl310_of_setup }, | |
450 | { .compatible = "arm,l220-cache", .data = l2x0_of_setup }, | |
451 | { .compatible = "arm,l210-cache", .data = l2x0_of_setup }, | |
452 | {} | |
453 | }; | |
454 | ||
455 | int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask) | |
456 | { | |
457 | struct device_node *np; | |
458 | void (*l2_setup)(const struct device_node *np, | |
459 | __u32 *aux_val, __u32 *aux_mask); | |
460 | ||
461 | np = of_find_matching_node(NULL, l2x0_ids); | |
462 | if (!np) | |
463 | return -ENODEV; | |
464 | l2x0_base = of_iomap(np, 0); | |
465 | if (!l2x0_base) | |
466 | return -ENOMEM; | |
467 | ||
468 | /* L2 configuration can only be changed if the cache is disabled */ | |
469 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | |
470 | l2_setup = of_match_node(l2x0_ids, np)->data; | |
471 | if (l2_setup) | |
472 | l2_setup(np, &aux_val, &aux_mask); | |
473 | } | |
474 | l2x0_init(l2x0_base, aux_val, aux_mask); | |
475 | return 0; | |
476 | } | |
477 | #endif |