import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8c369264 19#include <linux/err.h>
382266ad 20#include <linux/init.h>
07620976 21#include <linux/spinlock.h>
fced80c7 22#include <linux/io.h>
8c369264
RH
23#include <linux/of.h>
24#include <linux/of_address.h>
382266ad
CM
25
26#include <asm/cacheflush.h>
382266ad 27#include <asm/hardware/cache-l2x0.h>
b8db6b88 28#include "cache-aurora-l2.h"
382266ad
CM
29
30#define CACHE_LINE_SIZE 32
31
32static void __iomem *l2x0_base;
bd31b859 33static DEFINE_RAW_SPINLOCK(l2x0_lock);
3e175ca4
RK
34static u32 l2x0_way_mask; /* Bitmask of active ways */
35static u32 l2x0_size;
6fa3eb70
S
36static u32 l2x0_cache_id;
37static unsigned int l2x0_sets;
38static unsigned int l2x0_ways;
f154fe9b 39static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
382266ad 40
b8db6b88
GC
41/* Aurora don't have the cache ID register available, so we have to
42 * pass it though the device tree */
43static u32 cache_id_part_number_from_dt;
44
91c2ebb9
BS
45struct l2x0_regs l2x0_saved_regs;
46
47struct l2x0_of_data {
3e175ca4 48 void (*setup)(const struct device_node *, u32 *, u32 *);
91c2ebb9 49 void (*save)(void);
6248d060 50 struct outer_cache_fns outer_cache;
91c2ebb9
BS
51};
52
6248d060
GC
53static bool of_init = false;
54
6fa3eb70
S
55static inline bool is_pl310_rev(int rev)
56{
57 return (l2x0_cache_id &
58 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
59 (L2X0_CACHE_ID_PART_L310 | rev);
60}
61
9a6655e4 62static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
382266ad 63{
9a6655e4 64 /* wait for cache operation by line or way to complete */
6775a558 65 while (readl_relaxed(reg) & mask)
1caf3092 66 cpu_relax();
382266ad
CM
67}
68
9a6655e4
CM
69#ifdef CONFIG_CACHE_PL310
70static inline void cache_wait(void __iomem *reg, unsigned long mask)
71{
72 /* cache operations by line are atomic on PL310 */
73}
74#else
75#define cache_wait cache_wait_way
76#endif
77
382266ad
CM
78static inline void cache_sync(void)
79{
3d107434 80 void __iomem *base = l2x0_base;
885028e4 81
f154fe9b 82 writel_relaxed(0, base + sync_reg_offset);
3d107434 83 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
84}
85
424d6b14
SS
86static inline void l2x0_clean_line(unsigned long addr)
87{
88 void __iomem *base = l2x0_base;
89 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 90 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
424d6b14
SS
91}
92
93static inline void l2x0_inv_line(unsigned long addr)
94{
95 void __iomem *base = l2x0_base;
96 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 97 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
424d6b14
SS
98}
99
2839e06c 100#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
ab4d5368
WD
101static inline void debug_writel(unsigned long val)
102{
103 if (outer_cache.set_debug)
104 outer_cache.set_debug(val);
105}
9e65582a 106
ab4d5368 107static void pl310_set_debug(unsigned long val)
2839e06c
SS
108{
109 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
9e65582a 110}
2839e06c
SS
111#else
112/* Optimised out for non-errata case */
113static inline void debug_writel(unsigned long val)
114{
115}
116
ab4d5368 117#define pl310_set_debug NULL
2839e06c 118#endif
9e65582a 119
2839e06c 120#ifdef CONFIG_PL310_ERRATA_588369
9e65582a
SS
121static inline void l2x0_flush_line(unsigned long addr)
122{
123 void __iomem *base = l2x0_base;
124
125 /* Clean by PA followed by Invalidate by PA */
126 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 127 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
9e65582a 128 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 129 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
9e65582a
SS
130}
131#else
132
424d6b14
SS
133static inline void l2x0_flush_line(unsigned long addr)
134{
135 void __iomem *base = l2x0_base;
136 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
6775a558 137 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
424d6b14 138}
9e65582a 139#endif
424d6b14 140
23107c54
CM
141static void l2x0_cache_sync(void)
142{
143 unsigned long flags;
144
bd31b859 145 raw_spin_lock_irqsave(&l2x0_lock, flags);
23107c54 146 cache_sync();
bd31b859 147 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
23107c54
CM
148}
149
6fa3eb70
S
150#ifdef CONFIG_PL310_ERRATA_727915
151static void l2x0_for_each_set_way(void __iomem *reg)
152{
153 int set;
154 int way;
155 unsigned long flags;
156
157 for (way = 0; way < l2x0_ways; way++) {
158 raw_spin_lock_irqsave(&l2x0_lock, flags);
159 for (set = 0; set < l2x0_sets; set++)
160 writel_relaxed((way << 28) | (set << 5), reg);
161 cache_sync();
162 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
163 }
164}
165#endif
166
38a8914f 167static void __l2x0_flush_all(void)
2fd86589 168{
2839e06c 169 debug_writel(0x03);
2fd86589
TG
170 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
171 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
172 cache_sync();
2839e06c 173 debug_writel(0x00);
38a8914f
WD
174}
175
176static void l2x0_flush_all(void)
177{
178 unsigned long flags;
179
6fa3eb70
S
180#ifdef CONFIG_PL310_ERRATA_727915
181 if (is_pl310_rev(REV_PL310_R2P0)) {
182 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
183 return;
184 }
185#endif
186
38a8914f 187 /* clean all ways */
bd31b859 188 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f 189 __l2x0_flush_all();
bd31b859 190 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
191}
192
444457c1
SS
193static void l2x0_clean_all(void)
194{
195 unsigned long flags;
196
6fa3eb70
S
197#ifdef CONFIG_PL310_ERRATA_727915
198 if (is_pl310_rev(REV_PL310_R2P0)) {
199 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
200 return;
201 }
202#endif
203
444457c1 204 /* clean all ways */
bd31b859 205 raw_spin_lock_irqsave(&l2x0_lock, flags);
6fa3eb70 206 debug_writel(0x03);
444457c1
SS
207 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
208 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
209 cache_sync();
6fa3eb70 210 debug_writel(0x00);
bd31b859 211 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
444457c1
SS
212}
213
2fd86589 214static void l2x0_inv_all(void)
382266ad 215{
0eb948dd
RK
216 unsigned long flags;
217
382266ad 218 /* invalidate all ways */
bd31b859 219 raw_spin_lock_irqsave(&l2x0_lock, flags);
2fd86589 220 /* Invalidating when L2 is enabled is a nono */
b8db6b88 221 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
6775a558 222 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
9a6655e4 223 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
382266ad 224 cache_sync();
bd31b859 225 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
226}
227
228static void l2x0_inv_range(unsigned long start, unsigned long end)
229{
3d107434 230 void __iomem *base = l2x0_base;
0eb948dd 231 unsigned long flags;
382266ad 232
bd31b859 233 raw_spin_lock_irqsave(&l2x0_lock, flags);
4f6627ac
RS
234 if (start & (CACHE_LINE_SIZE - 1)) {
235 start &= ~(CACHE_LINE_SIZE - 1);
9e65582a 236 debug_writel(0x03);
424d6b14 237 l2x0_flush_line(start);
9e65582a 238 debug_writel(0x00);
4f6627ac
RS
239 start += CACHE_LINE_SIZE;
240 }
241
242 if (end & (CACHE_LINE_SIZE - 1)) {
243 end &= ~(CACHE_LINE_SIZE - 1);
9e65582a 244 debug_writel(0x03);
424d6b14 245 l2x0_flush_line(end);
9e65582a 246 debug_writel(0x00);
4f6627ac
RS
247 }
248
0eb948dd
RK
249 while (start < end) {
250 unsigned long blk_end = start + min(end - start, 4096UL);
251
252 while (start < blk_end) {
424d6b14 253 l2x0_inv_line(start);
0eb948dd
RK
254 start += CACHE_LINE_SIZE;
255 }
256
257 if (blk_end < end) {
bd31b859
TG
258 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
259 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
260 }
261 }
3d107434 262 cache_wait(base + L2X0_INV_LINE_PA, 1);
382266ad 263 cache_sync();
bd31b859 264 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
265}
266
267static void l2x0_clean_range(unsigned long start, unsigned long end)
268{
3d107434 269 void __iomem *base = l2x0_base;
0eb948dd 270 unsigned long flags;
382266ad 271
444457c1
SS
272 if ((end - start) >= l2x0_size) {
273 l2x0_clean_all();
274 return;
275 }
276
bd31b859 277 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 278 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
279 while (start < end) {
280 unsigned long blk_end = start + min(end - start, 4096UL);
281
282 while (start < blk_end) {
424d6b14 283 l2x0_clean_line(start);
0eb948dd
RK
284 start += CACHE_LINE_SIZE;
285 }
286
287 if (blk_end < end) {
bd31b859
TG
288 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
290 }
291 }
3d107434 292 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
382266ad 293 cache_sync();
bd31b859 294 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
295}
296
297static void l2x0_flush_range(unsigned long start, unsigned long end)
298{
3d107434 299 void __iomem *base = l2x0_base;
0eb948dd 300 unsigned long flags;
382266ad 301
444457c1
SS
302 if ((end - start) >= l2x0_size) {
303 l2x0_flush_all();
304 return;
305 }
306
bd31b859 307 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 308 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
309 while (start < end) {
310 unsigned long blk_end = start + min(end - start, 4096UL);
311
9e65582a 312 debug_writel(0x03);
0eb948dd 313 while (start < blk_end) {
424d6b14 314 l2x0_flush_line(start);
0eb948dd
RK
315 start += CACHE_LINE_SIZE;
316 }
9e65582a 317 debug_writel(0x00);
0eb948dd
RK
318
319 if (blk_end < end) {
bd31b859
TG
320 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
321 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
322 }
323 }
3d107434 324 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
382266ad 325 cache_sync();
bd31b859 326 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
327}
328
2fd86589
TG
329static void l2x0_disable(void)
330{
331 unsigned long flags;
332
bd31b859 333 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f
WD
334 __l2x0_flush_all();
335 writel_relaxed(0, l2x0_base + L2X0_CTRL);
336 dsb();
bd31b859 337 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
338}
339
3e175ca4 340static void l2x0_unlock(u32 cache_id)
bac7e6ec
LW
341{
342 int lockregs;
343 int i;
344
6e7aceeb 345 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
b8db6b88 346 case L2X0_CACHE_ID_PART_L310:
bac7e6ec 347 lockregs = 8;
b8db6b88
GC
348 break;
349 case AURORA_CACHE_ID:
350 lockregs = 4;
351 break;
352 default:
bac7e6ec
LW
353 /* L210 and unknown types */
354 lockregs = 1;
b8db6b88
GC
355 break;
356 }
bac7e6ec
LW
357
358 for (i = 0; i < lockregs; i++) {
359 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
360 i * L2X0_LOCKDOWN_STRIDE);
361 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
362 i * L2X0_LOCKDOWN_STRIDE);
363 }
364}
365
3e175ca4 366void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
382266ad 367{
3e175ca4 368 u32 aux;
3e175ca4 369 u32 way_size = 0;
b8db6b88 370 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
64039be8 371 const char *type;
382266ad
CM
372
373 l2x0_base = base;
b8db6b88 374 if (cache_id_part_number_from_dt)
6fa3eb70 375 l2x0_cache_id = cache_id_part_number_from_dt;
b8db6b88 376 else
6fa3eb70 377 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
6775a558 378 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 379
4082cfa7
SH
380 aux &= aux_mask;
381 aux |= aux_val;
382
64039be8 383 /* Determine the number of ways */
6fa3eb70 384 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
64039be8
JM
385 case L2X0_CACHE_ID_PART_L310:
386 if (aux & (1 << 16))
6fa3eb70 387 l2x0_ways = 16;
64039be8 388 else
6fa3eb70 389 l2x0_ways = 8;
64039be8 390 type = "L310";
f154fe9b
WD
391#ifdef CONFIG_PL310_ERRATA_753970
392 /* Unmapped register. */
393 sync_reg_offset = L2X0_DUMMY_REG;
394#endif
6fa3eb70 395 if ((l2x0_cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
74ddcdb8 396 outer_cache.set_debug = pl310_set_debug;
64039be8
JM
397 break;
398 case L2X0_CACHE_ID_PART_L210:
6fa3eb70 399 l2x0_ways = (aux >> 13) & 0xf;
64039be8
JM
400 type = "L210";
401 break;
b8db6b88
GC
402
403 case AURORA_CACHE_ID:
404 sync_reg_offset = AURORA_SYNC_REG;
6fa3eb70
S
405 l2x0_ways = (aux >> 13) & 0xf;
406 l2x0_ways = 2 << ((l2x0_ways + 1) >> 2);
b8db6b88
GC
407 way_size_shift = AURORA_WAY_SIZE_SHIFT;
408 type = "Aurora";
409 break;
64039be8
JM
410 default:
411 /* Assume unknown chips have 8 ways */
6fa3eb70 412 l2x0_ways = 8;
64039be8
JM
413 type = "L2x0 series";
414 break;
415 }
416
6fa3eb70 417 l2x0_way_mask = (1 << l2x0_ways) - 1;
64039be8 418
5ba70372
SS
419 /*
420 * L2 cache Size = Way size * Number of ways
421 */
422 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
6fa3eb70 423 way_size = SZ_1K << (way_size + way_size_shift);
b8db6b88 424
6fa3eb70
S
425 l2x0_size = l2x0_ways * way_size;
426 l2x0_sets = way_size / CACHE_LINE_SIZE;
5ba70372 427
48371cd3
SK
428 /*
429 * Check if l2x0 controller is already enabled.
430 * If you are booting from non-secure mode
431 * accessing the below registers will fault.
432 */
b8db6b88 433 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
bac7e6ec 434 /* Make sure that I&D is not locked down when starting */
6fa3eb70 435 l2x0_unlock(l2x0_cache_id);
382266ad 436
48371cd3 437 /* l2x0 controller is disabled */
6775a558 438 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
382266ad 439
48371cd3
SK
440 l2x0_inv_all();
441
442 /* enable L2X0 */
b8db6b88 443 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
48371cd3 444 }
382266ad 445
9d4876f0
YM
446 /* Re-read it in case some bits are reserved. */
447 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
448
449 /* Save the value for resuming. */
450 l2x0_saved_regs.aux_ctrl = aux;
451
6248d060
GC
452 if (!of_init) {
453 outer_cache.inv_range = l2x0_inv_range;
454 outer_cache.clean_range = l2x0_clean_range;
455 outer_cache.flush_range = l2x0_flush_range;
456 outer_cache.sync = l2x0_cache_sync;
457 outer_cache.flush_all = l2x0_flush_all;
458 outer_cache.inv_all = l2x0_inv_all;
459 outer_cache.disable = l2x0_disable;
460 }
382266ad 461
64039be8 462 printk(KERN_INFO "%s cache controller enabled\n", type);
5ba70372 463 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
6fa3eb70 464 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
382266ad 465}
8c369264
RH
466
467#ifdef CONFIG_OF
b8db6b88
GC
468static int l2_wt_override;
469
470/*
471 * Note that the end addresses passed to Linux primitives are
472 * noninclusive, while the hardware cache range operations use
473 * inclusive start and end addresses.
474 */
475static unsigned long calc_range_end(unsigned long start, unsigned long end)
476{
477 /*
478 * Limit the number of cache lines processed at once,
479 * since cache range operations stall the CPU pipeline
480 * until completion.
481 */
482 if (end > start + MAX_RANGE_SIZE)
483 end = start + MAX_RANGE_SIZE;
484
485 /*
486 * Cache range operations can't straddle a page boundary.
487 */
488 if (end > PAGE_ALIGN(start+1))
489 end = PAGE_ALIGN(start+1);
490
491 return end;
492}
493
494/*
495 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
496 * and range operations only do a TLB lookup on the start address.
497 */
498static void aurora_pa_range(unsigned long start, unsigned long end,
499 unsigned long offset)
500{
501 unsigned long flags;
502
503 raw_spin_lock_irqsave(&l2x0_lock, flags);
8a3a180d
GC
504 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
505 writel_relaxed(end, l2x0_base + offset);
b8db6b88
GC
506 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
507
508 cache_sync();
509}
510
511static void aurora_inv_range(unsigned long start, unsigned long end)
512{
513 /*
514 * round start and end adresses up to cache line size
515 */
516 start &= ~(CACHE_LINE_SIZE - 1);
517 end = ALIGN(end, CACHE_LINE_SIZE);
518
519 /*
520 * Invalidate all full cache lines between 'start' and 'end'.
521 */
522 while (start < end) {
523 unsigned long range_end = calc_range_end(start, end);
524 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
525 AURORA_INVAL_RANGE_REG);
526 start = range_end;
527 }
528}
529
530static void aurora_clean_range(unsigned long start, unsigned long end)
531{
532 /*
533 * If L2 is forced to WT, the L2 will always be clean and we
534 * don't need to do anything here.
535 */
536 if (!l2_wt_override) {
537 start &= ~(CACHE_LINE_SIZE - 1);
538 end = ALIGN(end, CACHE_LINE_SIZE);
539 while (start != end) {
540 unsigned long range_end = calc_range_end(start, end);
541 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
542 AURORA_CLEAN_RANGE_REG);
543 start = range_end;
544 }
545 }
546}
547
548static void aurora_flush_range(unsigned long start, unsigned long end)
549{
8b827c60
GC
550 start &= ~(CACHE_LINE_SIZE - 1);
551 end = ALIGN(end, CACHE_LINE_SIZE);
552 while (start != end) {
553 unsigned long range_end = calc_range_end(start, end);
554 /*
555 * If L2 is forced to WT, the L2 will always be clean and we
556 * just need to invalidate.
557 */
558 if (l2_wt_override)
b8db6b88 559 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
8b827c60
GC
560 AURORA_INVAL_RANGE_REG);
561 else
562 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
563 AURORA_FLUSH_RANGE_REG);
564 start = range_end;
b8db6b88
GC
565 }
566}
567
8c369264 568static void __init l2x0_of_setup(const struct device_node *np,
3e175ca4 569 u32 *aux_val, u32 *aux_mask)
8c369264
RH
570{
571 u32 data[2] = { 0, 0 };
572 u32 tag = 0;
573 u32 dirty = 0;
574 u32 val = 0, mask = 0;
575
576 of_property_read_u32(np, "arm,tag-latency", &tag);
577 if (tag) {
578 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
579 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
580 }
581
582 of_property_read_u32_array(np, "arm,data-latency",
583 data, ARRAY_SIZE(data));
584 if (data[0] && data[1]) {
585 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
586 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
587 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
588 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
589 }
590
591 of_property_read_u32(np, "arm,dirty-latency", &dirty);
592 if (dirty) {
593 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
594 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
595 }
596
597 *aux_val &= ~mask;
598 *aux_val |= val;
599 *aux_mask &= ~mask;
600}
601
602static void __init pl310_of_setup(const struct device_node *np,
3e175ca4 603 u32 *aux_val, u32 *aux_mask)
8c369264
RH
604{
605 u32 data[3] = { 0, 0, 0 };
606 u32 tag[3] = { 0, 0, 0 };
607 u32 filter[2] = { 0, 0 };
608
609 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
610 if (tag[0] && tag[1] && tag[2])
611 writel_relaxed(
612 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
613 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
614 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
615 l2x0_base + L2X0_TAG_LATENCY_CTRL);
616
617 of_property_read_u32_array(np, "arm,data-latency",
618 data, ARRAY_SIZE(data));
619 if (data[0] && data[1] && data[2])
620 writel_relaxed(
621 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
622 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
623 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
624 l2x0_base + L2X0_DATA_LATENCY_CTRL);
625
626 of_property_read_u32_array(np, "arm,filter-ranges",
627 filter, ARRAY_SIZE(filter));
74d41f39 628 if (filter[1]) {
8c369264
RH
629 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
630 l2x0_base + L2X0_ADDR_FILTER_END);
631 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
632 l2x0_base + L2X0_ADDR_FILTER_START);
633 }
634}
635
91c2ebb9
BS
636static void __init pl310_save(void)
637{
638 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
639 L2X0_CACHE_ID_RTL_MASK;
640
641 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
642 L2X0_TAG_LATENCY_CTRL);
643 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
644 L2X0_DATA_LATENCY_CTRL);
645 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
646 L2X0_ADDR_FILTER_END);
647 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
648 L2X0_ADDR_FILTER_START);
649
650 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
651 /*
652 * From r2p0, there is Prefetch offset/control register
653 */
654 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
655 L2X0_PREFETCH_CTRL);
656 /*
657 * From r3p0, there is Power control register
658 */
659 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
660 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
661 L2X0_POWER_CTRL);
662 }
663}
664
b8db6b88
GC
665static void aurora_save(void)
666{
667 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
668 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
669}
670
91c2ebb9
BS
671static void l2x0_resume(void)
672{
b8db6b88 673 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
91c2ebb9
BS
674 /* restore aux ctrl and enable l2 */
675 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
676
677 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
678 L2X0_AUX_CTRL);
679
680 l2x0_inv_all();
681
b8db6b88 682 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
91c2ebb9
BS
683 }
684}
685
686static void pl310_resume(void)
687{
688 u32 l2x0_revision;
689
b8db6b88 690 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
91c2ebb9
BS
691 /* restore pl310 setup */
692 writel_relaxed(l2x0_saved_regs.tag_latency,
693 l2x0_base + L2X0_TAG_LATENCY_CTRL);
694 writel_relaxed(l2x0_saved_regs.data_latency,
695 l2x0_base + L2X0_DATA_LATENCY_CTRL);
696 writel_relaxed(l2x0_saved_regs.filter_end,
697 l2x0_base + L2X0_ADDR_FILTER_END);
698 writel_relaxed(l2x0_saved_regs.filter_start,
699 l2x0_base + L2X0_ADDR_FILTER_START);
700
701 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
702 L2X0_CACHE_ID_RTL_MASK;
703
704 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
705 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
706 l2x0_base + L2X0_PREFETCH_CTRL);
707 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
708 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
709 l2x0_base + L2X0_POWER_CTRL);
710 }
711 }
712
713 l2x0_resume();
714}
715
b8db6b88
GC
716static void aurora_resume(void)
717{
718 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
8a3a180d
GC
719 writel_relaxed(l2x0_saved_regs.aux_ctrl,
720 l2x0_base + L2X0_AUX_CTRL);
721 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
b8db6b88
GC
722 }
723}
724
725static void __init aurora_broadcast_l2_commands(void)
726{
727 __u32 u;
728 /* Enable Broadcasting of cache commands to L2*/
729 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
730 u |= AURORA_CTRL_FW; /* Set the FW bit */
731 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
732 isb();
733}
734
735static void __init aurora_of_setup(const struct device_node *np,
736 u32 *aux_val, u32 *aux_mask)
737{
738 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
739 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
740
741 of_property_read_u32(np, "cache-id-part",
742 &cache_id_part_number_from_dt);
743
744 /* Determine and save the write policy */
745 l2_wt_override = of_property_read_bool(np, "wt-override");
746
747 if (l2_wt_override) {
748 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
749 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
750 }
751
752 *aux_val &= ~mask;
753 *aux_val |= val;
754 *aux_mask &= ~mask;
755}
756
91c2ebb9 757static const struct l2x0_of_data pl310_data = {
6248d060
GC
758 .setup = pl310_of_setup,
759 .save = pl310_save,
760 .outer_cache = {
761 .resume = pl310_resume,
762 .inv_range = l2x0_inv_range,
763 .clean_range = l2x0_clean_range,
764 .flush_range = l2x0_flush_range,
765 .sync = l2x0_cache_sync,
766 .flush_all = l2x0_flush_all,
767 .inv_all = l2x0_inv_all,
768 .disable = l2x0_disable,
6248d060 769 },
91c2ebb9
BS
770};
771
772static const struct l2x0_of_data l2x0_data = {
6248d060
GC
773 .setup = l2x0_of_setup,
774 .save = NULL,
775 .outer_cache = {
776 .resume = l2x0_resume,
777 .inv_range = l2x0_inv_range,
778 .clean_range = l2x0_clean_range,
779 .flush_range = l2x0_flush_range,
780 .sync = l2x0_cache_sync,
781 .flush_all = l2x0_flush_all,
782 .inv_all = l2x0_inv_all,
783 .disable = l2x0_disable,
784 },
91c2ebb9
BS
785};
786
b8db6b88
GC
787static const struct l2x0_of_data aurora_with_outer_data = {
788 .setup = aurora_of_setup,
789 .save = aurora_save,
790 .outer_cache = {
791 .resume = aurora_resume,
792 .inv_range = aurora_inv_range,
793 .clean_range = aurora_clean_range,
794 .flush_range = aurora_flush_range,
795 .sync = l2x0_cache_sync,
796 .flush_all = l2x0_flush_all,
797 .inv_all = l2x0_inv_all,
798 .disable = l2x0_disable,
799 },
800};
801
802static const struct l2x0_of_data aurora_no_outer_data = {
803 .setup = aurora_of_setup,
804 .save = aurora_save,
805 .outer_cache = {
806 .resume = aurora_resume,
807 },
808};
809
8c369264 810static const struct of_device_id l2x0_ids[] __initconst = {
91c2ebb9
BS
811 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
812 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
813 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
b8db6b88
GC
814 { .compatible = "marvell,aurora-system-cache",
815 .data = (void *)&aurora_no_outer_data},
816 { .compatible = "marvell,aurora-outer-cache",
817 .data = (void *)&aurora_with_outer_data},
8c369264
RH
818 {}
819};
820
3e175ca4 821int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
8c369264
RH
822{
823 struct device_node *np;
e5b5d020 824 const struct l2x0_of_data *data;
91c2ebb9 825 struct resource res;
8c369264
RH
826
827 np = of_find_matching_node(NULL, l2x0_ids);
828 if (!np)
829 return -ENODEV;
91c2ebb9
BS
830
831 if (of_address_to_resource(np, 0, &res))
832 return -ENODEV;
833
834 l2x0_base = ioremap(res.start, resource_size(&res));
8c369264
RH
835 if (!l2x0_base)
836 return -ENOMEM;
837
91c2ebb9
BS
838 l2x0_saved_regs.phy_base = res.start;
839
840 data = of_match_node(l2x0_ids, np)->data;
841
8c369264 842 /* L2 configuration can only be changed if the cache is disabled */
b8db6b88 843 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
91c2ebb9
BS
844 if (data->setup)
845 data->setup(np, &aux_val, &aux_mask);
b8db6b88
GC
846
847 /* For aurora cache in no outer mode select the
848 * correct mode using the coprocessor*/
849 if (data == &aurora_no_outer_data)
850 aurora_broadcast_l2_commands();
8c369264 851 }
91c2ebb9
BS
852
853 if (data->save)
854 data->save();
855
6248d060 856 of_init = true;
6248d060 857 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
6e7aceeb 858 l2x0_init(l2x0_base, aux_val, aux_mask);
6248d060 859
8c369264
RH
860 return 0;
861}
862#endif