Merge branch 'drm-nouveau-fixes' of git://anongit.freedesktop.org/git/nouveau/linux...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / cache-l2x0.c
1 /*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
22 #include <linux/io.h>
23 #include <linux/of.h>
24 #include <linux/of_address.h>
25
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-aurora-l2.h"
29
30 #define CACHE_LINE_SIZE 32
31
32 static void __iomem *l2x0_base;
33 static DEFINE_RAW_SPINLOCK(l2x0_lock);
34 static u32 l2x0_way_mask; /* Bitmask of active ways */
35 static u32 l2x0_size;
36 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
37
38 /* Aurora don't have the cache ID register available, so we have to
39 * pass it though the device tree */
40 static u32 cache_id_part_number_from_dt;
41
42 struct l2x0_regs l2x0_saved_regs;
43
44 struct l2x0_of_data {
45 void (*setup)(const struct device_node *, u32 *, u32 *);
46 void (*save)(void);
47 struct outer_cache_fns outer_cache;
48 };
49
50 static bool of_init = false;
51
52 static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
53 {
54 /* wait for cache operation by line or way to complete */
55 while (readl_relaxed(reg) & mask)
56 cpu_relax();
57 }
58
59 #ifdef CONFIG_CACHE_PL310
60 static inline void cache_wait(void __iomem *reg, unsigned long mask)
61 {
62 /* cache operations by line are atomic on PL310 */
63 }
64 #else
65 #define cache_wait cache_wait_way
66 #endif
67
68 static inline void cache_sync(void)
69 {
70 void __iomem *base = l2x0_base;
71
72 writel_relaxed(0, base + sync_reg_offset);
73 cache_wait(base + L2X0_CACHE_SYNC, 1);
74 }
75
76 static inline void l2x0_clean_line(unsigned long addr)
77 {
78 void __iomem *base = l2x0_base;
79 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
80 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
81 }
82
83 static inline void l2x0_inv_line(unsigned long addr)
84 {
85 void __iomem *base = l2x0_base;
86 cache_wait(base + L2X0_INV_LINE_PA, 1);
87 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
88 }
89
90 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
91 static inline void debug_writel(unsigned long val)
92 {
93 if (outer_cache.set_debug)
94 outer_cache.set_debug(val);
95 }
96
97 static void pl310_set_debug(unsigned long val)
98 {
99 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
100 }
101 #else
102 /* Optimised out for non-errata case */
103 static inline void debug_writel(unsigned long val)
104 {
105 }
106
107 #define pl310_set_debug NULL
108 #endif
109
110 #ifdef CONFIG_PL310_ERRATA_588369
111 static inline void l2x0_flush_line(unsigned long addr)
112 {
113 void __iomem *base = l2x0_base;
114
115 /* Clean by PA followed by Invalidate by PA */
116 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
117 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
118 cache_wait(base + L2X0_INV_LINE_PA, 1);
119 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
120 }
121 #else
122
123 static inline void l2x0_flush_line(unsigned long addr)
124 {
125 void __iomem *base = l2x0_base;
126 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
127 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
128 }
129 #endif
130
131 static void l2x0_cache_sync(void)
132 {
133 unsigned long flags;
134
135 raw_spin_lock_irqsave(&l2x0_lock, flags);
136 cache_sync();
137 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
138 }
139
140 static void __l2x0_flush_all(void)
141 {
142 debug_writel(0x03);
143 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
144 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
145 cache_sync();
146 debug_writel(0x00);
147 }
148
149 static void l2x0_flush_all(void)
150 {
151 unsigned long flags;
152
153 /* clean all ways */
154 raw_spin_lock_irqsave(&l2x0_lock, flags);
155 __l2x0_flush_all();
156 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
157 }
158
159 static void l2x0_clean_all(void)
160 {
161 unsigned long flags;
162
163 /* clean all ways */
164 raw_spin_lock_irqsave(&l2x0_lock, flags);
165 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
166 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
167 cache_sync();
168 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
169 }
170
171 static void l2x0_inv_all(void)
172 {
173 unsigned long flags;
174
175 /* invalidate all ways */
176 raw_spin_lock_irqsave(&l2x0_lock, flags);
177 /* Invalidating when L2 is enabled is a nono */
178 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
179 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
180 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
181 cache_sync();
182 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
183 }
184
185 static void l2x0_inv_range(unsigned long start, unsigned long end)
186 {
187 void __iomem *base = l2x0_base;
188 unsigned long flags;
189
190 raw_spin_lock_irqsave(&l2x0_lock, flags);
191 if (start & (CACHE_LINE_SIZE - 1)) {
192 start &= ~(CACHE_LINE_SIZE - 1);
193 debug_writel(0x03);
194 l2x0_flush_line(start);
195 debug_writel(0x00);
196 start += CACHE_LINE_SIZE;
197 }
198
199 if (end & (CACHE_LINE_SIZE - 1)) {
200 end &= ~(CACHE_LINE_SIZE - 1);
201 debug_writel(0x03);
202 l2x0_flush_line(end);
203 debug_writel(0x00);
204 }
205
206 while (start < end) {
207 unsigned long blk_end = start + min(end - start, 4096UL);
208
209 while (start < blk_end) {
210 l2x0_inv_line(start);
211 start += CACHE_LINE_SIZE;
212 }
213
214 if (blk_end < end) {
215 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
216 raw_spin_lock_irqsave(&l2x0_lock, flags);
217 }
218 }
219 cache_wait(base + L2X0_INV_LINE_PA, 1);
220 cache_sync();
221 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
222 }
223
224 static void l2x0_clean_range(unsigned long start, unsigned long end)
225 {
226 void __iomem *base = l2x0_base;
227 unsigned long flags;
228
229 if ((end - start) >= l2x0_size) {
230 l2x0_clean_all();
231 return;
232 }
233
234 raw_spin_lock_irqsave(&l2x0_lock, flags);
235 start &= ~(CACHE_LINE_SIZE - 1);
236 while (start < end) {
237 unsigned long blk_end = start + min(end - start, 4096UL);
238
239 while (start < blk_end) {
240 l2x0_clean_line(start);
241 start += CACHE_LINE_SIZE;
242 }
243
244 if (blk_end < end) {
245 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
246 raw_spin_lock_irqsave(&l2x0_lock, flags);
247 }
248 }
249 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
250 cache_sync();
251 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
252 }
253
254 static void l2x0_flush_range(unsigned long start, unsigned long end)
255 {
256 void __iomem *base = l2x0_base;
257 unsigned long flags;
258
259 if ((end - start) >= l2x0_size) {
260 l2x0_flush_all();
261 return;
262 }
263
264 raw_spin_lock_irqsave(&l2x0_lock, flags);
265 start &= ~(CACHE_LINE_SIZE - 1);
266 while (start < end) {
267 unsigned long blk_end = start + min(end - start, 4096UL);
268
269 debug_writel(0x03);
270 while (start < blk_end) {
271 l2x0_flush_line(start);
272 start += CACHE_LINE_SIZE;
273 }
274 debug_writel(0x00);
275
276 if (blk_end < end) {
277 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
278 raw_spin_lock_irqsave(&l2x0_lock, flags);
279 }
280 }
281 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
282 cache_sync();
283 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
284 }
285
286 static void l2x0_disable(void)
287 {
288 unsigned long flags;
289
290 raw_spin_lock_irqsave(&l2x0_lock, flags);
291 __l2x0_flush_all();
292 writel_relaxed(0, l2x0_base + L2X0_CTRL);
293 dsb();
294 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
295 }
296
297 static void l2x0_unlock(u32 cache_id)
298 {
299 int lockregs;
300 int i;
301
302 switch (cache_id) {
303 case L2X0_CACHE_ID_PART_L310:
304 lockregs = 8;
305 break;
306 case AURORA_CACHE_ID:
307 lockregs = 4;
308 break;
309 default:
310 /* L210 and unknown types */
311 lockregs = 1;
312 break;
313 }
314
315 for (i = 0; i < lockregs; i++) {
316 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
317 i * L2X0_LOCKDOWN_STRIDE);
318 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
319 i * L2X0_LOCKDOWN_STRIDE);
320 }
321 }
322
323 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
324 {
325 u32 aux;
326 u32 cache_id;
327 u32 way_size = 0;
328 int ways;
329 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
330 const char *type;
331
332 l2x0_base = base;
333 if (cache_id_part_number_from_dt)
334 cache_id = cache_id_part_number_from_dt;
335 else
336 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
337 & L2X0_CACHE_ID_PART_MASK;
338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
339
340 aux &= aux_mask;
341 aux |= aux_val;
342
343 /* Determine the number of ways */
344 switch (cache_id) {
345 case L2X0_CACHE_ID_PART_L310:
346 if (aux & (1 << 16))
347 ways = 16;
348 else
349 ways = 8;
350 type = "L310";
351 #ifdef CONFIG_PL310_ERRATA_753970
352 /* Unmapped register. */
353 sync_reg_offset = L2X0_DUMMY_REG;
354 #endif
355 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
356 outer_cache.set_debug = pl310_set_debug;
357 break;
358 case L2X0_CACHE_ID_PART_L210:
359 ways = (aux >> 13) & 0xf;
360 type = "L210";
361 break;
362
363 case AURORA_CACHE_ID:
364 sync_reg_offset = AURORA_SYNC_REG;
365 ways = (aux >> 13) & 0xf;
366 ways = 2 << ((ways + 1) >> 2);
367 way_size_shift = AURORA_WAY_SIZE_SHIFT;
368 type = "Aurora";
369 break;
370 default:
371 /* Assume unknown chips have 8 ways */
372 ways = 8;
373 type = "L2x0 series";
374 break;
375 }
376
377 l2x0_way_mask = (1 << ways) - 1;
378
379 /*
380 * L2 cache Size = Way size * Number of ways
381 */
382 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
383 way_size = 1 << (way_size + way_size_shift);
384
385 l2x0_size = ways * way_size * SZ_1K;
386
387 /*
388 * Check if l2x0 controller is already enabled.
389 * If you are booting from non-secure mode
390 * accessing the below registers will fault.
391 */
392 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
393 /* Make sure that I&D is not locked down when starting */
394 l2x0_unlock(cache_id);
395
396 /* l2x0 controller is disabled */
397 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
398
399 l2x0_inv_all();
400
401 /* enable L2X0 */
402 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
403 }
404
405 /* Re-read it in case some bits are reserved. */
406 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
407
408 /* Save the value for resuming. */
409 l2x0_saved_regs.aux_ctrl = aux;
410
411 if (!of_init) {
412 outer_cache.inv_range = l2x0_inv_range;
413 outer_cache.clean_range = l2x0_clean_range;
414 outer_cache.flush_range = l2x0_flush_range;
415 outer_cache.sync = l2x0_cache_sync;
416 outer_cache.flush_all = l2x0_flush_all;
417 outer_cache.inv_all = l2x0_inv_all;
418 outer_cache.disable = l2x0_disable;
419 }
420
421 printk(KERN_INFO "%s cache controller enabled\n", type);
422 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
423 ways, cache_id, aux, l2x0_size);
424 }
425
426 #ifdef CONFIG_OF
427 static int l2_wt_override;
428
429 /*
430 * Note that the end addresses passed to Linux primitives are
431 * noninclusive, while the hardware cache range operations use
432 * inclusive start and end addresses.
433 */
434 static unsigned long calc_range_end(unsigned long start, unsigned long end)
435 {
436 /*
437 * Limit the number of cache lines processed at once,
438 * since cache range operations stall the CPU pipeline
439 * until completion.
440 */
441 if (end > start + MAX_RANGE_SIZE)
442 end = start + MAX_RANGE_SIZE;
443
444 /*
445 * Cache range operations can't straddle a page boundary.
446 */
447 if (end > PAGE_ALIGN(start+1))
448 end = PAGE_ALIGN(start+1);
449
450 return end;
451 }
452
453 /*
454 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
455 * and range operations only do a TLB lookup on the start address.
456 */
457 static void aurora_pa_range(unsigned long start, unsigned long end,
458 unsigned long offset)
459 {
460 unsigned long flags;
461
462 raw_spin_lock_irqsave(&l2x0_lock, flags);
463 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
464 writel_relaxed(end, l2x0_base + offset);
465 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
466
467 cache_sync();
468 }
469
470 static void aurora_inv_range(unsigned long start, unsigned long end)
471 {
472 /*
473 * round start and end adresses up to cache line size
474 */
475 start &= ~(CACHE_LINE_SIZE - 1);
476 end = ALIGN(end, CACHE_LINE_SIZE);
477
478 /*
479 * Invalidate all full cache lines between 'start' and 'end'.
480 */
481 while (start < end) {
482 unsigned long range_end = calc_range_end(start, end);
483 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
484 AURORA_INVAL_RANGE_REG);
485 start = range_end;
486 }
487 }
488
489 static void aurora_clean_range(unsigned long start, unsigned long end)
490 {
491 /*
492 * If L2 is forced to WT, the L2 will always be clean and we
493 * don't need to do anything here.
494 */
495 if (!l2_wt_override) {
496 start &= ~(CACHE_LINE_SIZE - 1);
497 end = ALIGN(end, CACHE_LINE_SIZE);
498 while (start != end) {
499 unsigned long range_end = calc_range_end(start, end);
500 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
501 AURORA_CLEAN_RANGE_REG);
502 start = range_end;
503 }
504 }
505 }
506
507 static void aurora_flush_range(unsigned long start, unsigned long end)
508 {
509 start &= ~(CACHE_LINE_SIZE - 1);
510 end = ALIGN(end, CACHE_LINE_SIZE);
511 while (start != end) {
512 unsigned long range_end = calc_range_end(start, end);
513 /*
514 * If L2 is forced to WT, the L2 will always be clean and we
515 * just need to invalidate.
516 */
517 if (l2_wt_override)
518 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
519 AURORA_INVAL_RANGE_REG);
520 else
521 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
522 AURORA_FLUSH_RANGE_REG);
523 start = range_end;
524 }
525 }
526
527 static void __init l2x0_of_setup(const struct device_node *np,
528 u32 *aux_val, u32 *aux_mask)
529 {
530 u32 data[2] = { 0, 0 };
531 u32 tag = 0;
532 u32 dirty = 0;
533 u32 val = 0, mask = 0;
534
535 of_property_read_u32(np, "arm,tag-latency", &tag);
536 if (tag) {
537 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
538 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
539 }
540
541 of_property_read_u32_array(np, "arm,data-latency",
542 data, ARRAY_SIZE(data));
543 if (data[0] && data[1]) {
544 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
545 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
546 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
547 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
548 }
549
550 of_property_read_u32(np, "arm,dirty-latency", &dirty);
551 if (dirty) {
552 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
553 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
554 }
555
556 *aux_val &= ~mask;
557 *aux_val |= val;
558 *aux_mask &= ~mask;
559 }
560
561 static void __init pl310_of_setup(const struct device_node *np,
562 u32 *aux_val, u32 *aux_mask)
563 {
564 u32 data[3] = { 0, 0, 0 };
565 u32 tag[3] = { 0, 0, 0 };
566 u32 filter[2] = { 0, 0 };
567
568 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
569 if (tag[0] && tag[1] && tag[2])
570 writel_relaxed(
571 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
572 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
573 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
574 l2x0_base + L2X0_TAG_LATENCY_CTRL);
575
576 of_property_read_u32_array(np, "arm,data-latency",
577 data, ARRAY_SIZE(data));
578 if (data[0] && data[1] && data[2])
579 writel_relaxed(
580 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
581 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
582 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
583 l2x0_base + L2X0_DATA_LATENCY_CTRL);
584
585 of_property_read_u32_array(np, "arm,filter-ranges",
586 filter, ARRAY_SIZE(filter));
587 if (filter[1]) {
588 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
589 l2x0_base + L2X0_ADDR_FILTER_END);
590 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
591 l2x0_base + L2X0_ADDR_FILTER_START);
592 }
593 }
594
595 static void __init pl310_save(void)
596 {
597 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
598 L2X0_CACHE_ID_RTL_MASK;
599
600 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
601 L2X0_TAG_LATENCY_CTRL);
602 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
603 L2X0_DATA_LATENCY_CTRL);
604 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
605 L2X0_ADDR_FILTER_END);
606 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
607 L2X0_ADDR_FILTER_START);
608
609 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
610 /*
611 * From r2p0, there is Prefetch offset/control register
612 */
613 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
614 L2X0_PREFETCH_CTRL);
615 /*
616 * From r3p0, there is Power control register
617 */
618 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
619 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
620 L2X0_POWER_CTRL);
621 }
622 }
623
624 static void aurora_save(void)
625 {
626 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
627 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
628 }
629
630 static void l2x0_resume(void)
631 {
632 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
633 /* restore aux ctrl and enable l2 */
634 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
635
636 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
637 L2X0_AUX_CTRL);
638
639 l2x0_inv_all();
640
641 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
642 }
643 }
644
645 static void pl310_resume(void)
646 {
647 u32 l2x0_revision;
648
649 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
650 /* restore pl310 setup */
651 writel_relaxed(l2x0_saved_regs.tag_latency,
652 l2x0_base + L2X0_TAG_LATENCY_CTRL);
653 writel_relaxed(l2x0_saved_regs.data_latency,
654 l2x0_base + L2X0_DATA_LATENCY_CTRL);
655 writel_relaxed(l2x0_saved_regs.filter_end,
656 l2x0_base + L2X0_ADDR_FILTER_END);
657 writel_relaxed(l2x0_saved_regs.filter_start,
658 l2x0_base + L2X0_ADDR_FILTER_START);
659
660 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
661 L2X0_CACHE_ID_RTL_MASK;
662
663 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
664 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
665 l2x0_base + L2X0_PREFETCH_CTRL);
666 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
667 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
668 l2x0_base + L2X0_POWER_CTRL);
669 }
670 }
671
672 l2x0_resume();
673 }
674
675 static void aurora_resume(void)
676 {
677 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
678 writel_relaxed(l2x0_saved_regs.aux_ctrl,
679 l2x0_base + L2X0_AUX_CTRL);
680 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
681 }
682 }
683
684 static void __init aurora_broadcast_l2_commands(void)
685 {
686 __u32 u;
687 /* Enable Broadcasting of cache commands to L2*/
688 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
689 u |= AURORA_CTRL_FW; /* Set the FW bit */
690 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
691 isb();
692 }
693
694 static void __init aurora_of_setup(const struct device_node *np,
695 u32 *aux_val, u32 *aux_mask)
696 {
697 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
698 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
699
700 of_property_read_u32(np, "cache-id-part",
701 &cache_id_part_number_from_dt);
702
703 /* Determine and save the write policy */
704 l2_wt_override = of_property_read_bool(np, "wt-override");
705
706 if (l2_wt_override) {
707 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
708 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
709 }
710
711 *aux_val &= ~mask;
712 *aux_val |= val;
713 *aux_mask &= ~mask;
714 }
715
716 static const struct l2x0_of_data pl310_data = {
717 .setup = pl310_of_setup,
718 .save = pl310_save,
719 .outer_cache = {
720 .resume = pl310_resume,
721 .inv_range = l2x0_inv_range,
722 .clean_range = l2x0_clean_range,
723 .flush_range = l2x0_flush_range,
724 .sync = l2x0_cache_sync,
725 .flush_all = l2x0_flush_all,
726 .inv_all = l2x0_inv_all,
727 .disable = l2x0_disable,
728 .set_debug = pl310_set_debug,
729 },
730 };
731
732 static const struct l2x0_of_data l2x0_data = {
733 .setup = l2x0_of_setup,
734 .save = NULL,
735 .outer_cache = {
736 .resume = l2x0_resume,
737 .inv_range = l2x0_inv_range,
738 .clean_range = l2x0_clean_range,
739 .flush_range = l2x0_flush_range,
740 .sync = l2x0_cache_sync,
741 .flush_all = l2x0_flush_all,
742 .inv_all = l2x0_inv_all,
743 .disable = l2x0_disable,
744 },
745 };
746
747 static const struct l2x0_of_data aurora_with_outer_data = {
748 .setup = aurora_of_setup,
749 .save = aurora_save,
750 .outer_cache = {
751 .resume = aurora_resume,
752 .inv_range = aurora_inv_range,
753 .clean_range = aurora_clean_range,
754 .flush_range = aurora_flush_range,
755 .sync = l2x0_cache_sync,
756 .flush_all = l2x0_flush_all,
757 .inv_all = l2x0_inv_all,
758 .disable = l2x0_disable,
759 },
760 };
761
762 static const struct l2x0_of_data aurora_no_outer_data = {
763 .setup = aurora_of_setup,
764 .save = aurora_save,
765 .outer_cache = {
766 .resume = aurora_resume,
767 },
768 };
769
770 static const struct of_device_id l2x0_ids[] __initconst = {
771 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
772 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
773 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
774 { .compatible = "marvell,aurora-system-cache",
775 .data = (void *)&aurora_no_outer_data},
776 { .compatible = "marvell,aurora-outer-cache",
777 .data = (void *)&aurora_with_outer_data},
778 {}
779 };
780
781 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
782 {
783 struct device_node *np;
784 const struct l2x0_of_data *data;
785 struct resource res;
786
787 np = of_find_matching_node(NULL, l2x0_ids);
788 if (!np)
789 return -ENODEV;
790
791 if (of_address_to_resource(np, 0, &res))
792 return -ENODEV;
793
794 l2x0_base = ioremap(res.start, resource_size(&res));
795 if (!l2x0_base)
796 return -ENOMEM;
797
798 l2x0_saved_regs.phy_base = res.start;
799
800 data = of_match_node(l2x0_ids, np)->data;
801
802 /* L2 configuration can only be changed if the cache is disabled */
803 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
804 if (data->setup)
805 data->setup(np, &aux_val, &aux_mask);
806
807 /* For aurora cache in no outer mode select the
808 * correct mode using the coprocessor*/
809 if (data == &aurora_no_outer_data)
810 aurora_broadcast_l2_commands();
811 }
812
813 if (data->save)
814 data->save();
815
816 of_init = true;
817 l2x0_init(l2x0_base, aux_val, aux_mask);
818
819 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
820
821 return 0;
822 }
823 #endif