2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/spinlock.h>
24 #include <linux/of_address.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include "cache-aurora-l2.h"
30 #define CACHE_LINE_SIZE 32
32 static void __iomem
*l2x0_base
;
33 static DEFINE_RAW_SPINLOCK(l2x0_lock
);
34 static u32 l2x0_way_mask
; /* Bitmask of active ways */
36 static u32 l2x0_cache_id
;
37 static unsigned int l2x0_sets
;
38 static unsigned int l2x0_ways
;
39 static unsigned long sync_reg_offset
= L2X0_CACHE_SYNC
;
41 /* Aurora don't have the cache ID register available, so we have to
42 * pass it though the device tree */
43 static u32 cache_id_part_number_from_dt
;
45 struct l2x0_regs l2x0_saved_regs
;
48 void (*setup
)(const struct device_node
*, u32
*, u32
*);
50 struct outer_cache_fns outer_cache
;
53 static bool of_init
= false;
55 static inline bool is_pl310_rev(int rev
)
57 return (l2x0_cache_id
&
58 (L2X0_CACHE_ID_PART_MASK
| L2X0_CACHE_ID_REV_MASK
)) ==
59 (L2X0_CACHE_ID_PART_L310
| rev
);
62 static inline void cache_wait_way(void __iomem
*reg
, unsigned long mask
)
64 /* wait for cache operation by line or way to complete */
65 while (readl_relaxed(reg
) & mask
)
69 #ifdef CONFIG_CACHE_PL310
70 static inline void cache_wait(void __iomem
*reg
, unsigned long mask
)
72 /* cache operations by line are atomic on PL310 */
75 #define cache_wait cache_wait_way
78 static inline void cache_sync(void)
80 void __iomem
*base
= l2x0_base
;
82 writel_relaxed(0, base
+ sync_reg_offset
);
83 cache_wait(base
+ L2X0_CACHE_SYNC
, 1);
86 static inline void l2x0_clean_line(unsigned long addr
)
88 void __iomem
*base
= l2x0_base
;
89 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
90 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
93 static inline void l2x0_inv_line(unsigned long addr
)
95 void __iomem
*base
= l2x0_base
;
96 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
97 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
100 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
101 static inline void debug_writel(unsigned long val
)
103 if (outer_cache
.set_debug
)
104 outer_cache
.set_debug(val
);
107 static void pl310_set_debug(unsigned long val
)
109 writel_relaxed(val
, l2x0_base
+ L2X0_DEBUG_CTRL
);
112 /* Optimised out for non-errata case */
113 static inline void debug_writel(unsigned long val
)
117 #define pl310_set_debug NULL
120 #ifdef CONFIG_PL310_ERRATA_588369
121 static inline void l2x0_flush_line(unsigned long addr
)
123 void __iomem
*base
= l2x0_base
;
125 /* Clean by PA followed by Invalidate by PA */
126 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
127 writel_relaxed(addr
, base
+ L2X0_CLEAN_LINE_PA
);
128 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
129 writel_relaxed(addr
, base
+ L2X0_INV_LINE_PA
);
133 static inline void l2x0_flush_line(unsigned long addr
)
135 void __iomem
*base
= l2x0_base
;
136 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
137 writel_relaxed(addr
, base
+ L2X0_CLEAN_INV_LINE_PA
);
141 static void l2x0_cache_sync(void)
145 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
147 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
150 #ifdef CONFIG_PL310_ERRATA_727915
151 static void l2x0_for_each_set_way(void __iomem
*reg
)
157 for (way
= 0; way
< l2x0_ways
; way
++) {
158 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
159 for (set
= 0; set
< l2x0_sets
; set
++)
160 writel_relaxed((way
<< 28) | (set
<< 5), reg
);
162 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
167 static void __l2x0_flush_all(void)
170 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_INV_WAY
);
171 cache_wait_way(l2x0_base
+ L2X0_CLEAN_INV_WAY
, l2x0_way_mask
);
176 static void l2x0_flush_all(void)
180 #ifdef CONFIG_PL310_ERRATA_727915
181 if (is_pl310_rev(REV_PL310_R2P0
)) {
182 l2x0_for_each_set_way(l2x0_base
+ L2X0_CLEAN_INV_LINE_IDX
);
188 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
190 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
193 static void l2x0_clean_all(void)
197 #ifdef CONFIG_PL310_ERRATA_727915
198 if (is_pl310_rev(REV_PL310_R2P0
)) {
199 l2x0_for_each_set_way(l2x0_base
+ L2X0_CLEAN_LINE_IDX
);
205 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
207 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_CLEAN_WAY
);
208 cache_wait_way(l2x0_base
+ L2X0_CLEAN_WAY
, l2x0_way_mask
);
211 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
214 static void l2x0_inv_all(void)
218 /* invalidate all ways */
219 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
220 /* Invalidating when L2 is enabled is a nono */
221 BUG_ON(readl(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
);
222 writel_relaxed(l2x0_way_mask
, l2x0_base
+ L2X0_INV_WAY
);
223 cache_wait_way(l2x0_base
+ L2X0_INV_WAY
, l2x0_way_mask
);
225 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
228 static void l2x0_inv_range(unsigned long start
, unsigned long end
)
230 void __iomem
*base
= l2x0_base
;
233 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
234 if (start
& (CACHE_LINE_SIZE
- 1)) {
235 start
&= ~(CACHE_LINE_SIZE
- 1);
237 l2x0_flush_line(start
);
239 start
+= CACHE_LINE_SIZE
;
242 if (end
& (CACHE_LINE_SIZE
- 1)) {
243 end
&= ~(CACHE_LINE_SIZE
- 1);
245 l2x0_flush_line(end
);
249 while (start
< end
) {
250 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
252 while (start
< blk_end
) {
253 l2x0_inv_line(start
);
254 start
+= CACHE_LINE_SIZE
;
258 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
259 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
262 cache_wait(base
+ L2X0_INV_LINE_PA
, 1);
264 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
267 static void l2x0_clean_range(unsigned long start
, unsigned long end
)
269 void __iomem
*base
= l2x0_base
;
272 if ((end
- start
) >= l2x0_size
) {
277 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
278 start
&= ~(CACHE_LINE_SIZE
- 1);
279 while (start
< end
) {
280 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
282 while (start
< blk_end
) {
283 l2x0_clean_line(start
);
284 start
+= CACHE_LINE_SIZE
;
288 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
289 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
292 cache_wait(base
+ L2X0_CLEAN_LINE_PA
, 1);
294 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
297 static void l2x0_flush_range(unsigned long start
, unsigned long end
)
299 void __iomem
*base
= l2x0_base
;
302 if ((end
- start
) >= l2x0_size
) {
307 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
308 start
&= ~(CACHE_LINE_SIZE
- 1);
309 while (start
< end
) {
310 unsigned long blk_end
= start
+ min(end
- start
, 4096UL);
313 while (start
< blk_end
) {
314 l2x0_flush_line(start
);
315 start
+= CACHE_LINE_SIZE
;
320 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
321 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
324 cache_wait(base
+ L2X0_CLEAN_INV_LINE_PA
, 1);
326 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
329 static void l2x0_disable(void)
333 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
335 writel_relaxed(0, l2x0_base
+ L2X0_CTRL
);
337 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
340 static void l2x0_unlock(u32 cache_id
)
345 switch (cache_id
& L2X0_CACHE_ID_PART_MASK
) {
346 case L2X0_CACHE_ID_PART_L310
:
349 case AURORA_CACHE_ID
:
353 /* L210 and unknown types */
358 for (i
= 0; i
< lockregs
; i
++) {
359 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_D_BASE
+
360 i
* L2X0_LOCKDOWN_STRIDE
);
361 writel_relaxed(0x0, l2x0_base
+ L2X0_LOCKDOWN_WAY_I_BASE
+
362 i
* L2X0_LOCKDOWN_STRIDE
);
366 void __init
l2x0_init(void __iomem
*base
, u32 aux_val
, u32 aux_mask
)
370 int way_size_shift
= L2X0_WAY_SIZE_SHIFT
;
374 if (cache_id_part_number_from_dt
)
375 l2x0_cache_id
= cache_id_part_number_from_dt
;
377 l2x0_cache_id
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
);
378 aux
= readl_relaxed(l2x0_base
+ L2X0_AUX_CTRL
);
383 /* Determine the number of ways */
384 switch (l2x0_cache_id
& L2X0_CACHE_ID_PART_MASK
) {
385 case L2X0_CACHE_ID_PART_L310
:
391 #ifdef CONFIG_PL310_ERRATA_753970
392 /* Unmapped register. */
393 sync_reg_offset
= L2X0_DUMMY_REG
;
395 if ((l2x0_cache_id
& L2X0_CACHE_ID_RTL_MASK
) <= L2X0_CACHE_ID_RTL_R3P0
)
396 outer_cache
.set_debug
= pl310_set_debug
;
398 case L2X0_CACHE_ID_PART_L210
:
399 l2x0_ways
= (aux
>> 13) & 0xf;
403 case AURORA_CACHE_ID
:
404 sync_reg_offset
= AURORA_SYNC_REG
;
405 l2x0_ways
= (aux
>> 13) & 0xf;
406 l2x0_ways
= 2 << ((l2x0_ways
+ 1) >> 2);
407 way_size_shift
= AURORA_WAY_SIZE_SHIFT
;
411 /* Assume unknown chips have 8 ways */
413 type
= "L2x0 series";
417 l2x0_way_mask
= (1 << l2x0_ways
) - 1;
420 * L2 cache Size = Way size * Number of ways
422 way_size
= (aux
& L2X0_AUX_CTRL_WAY_SIZE_MASK
) >> 17;
423 way_size
= SZ_1K
<< (way_size
+ way_size_shift
);
425 l2x0_size
= l2x0_ways
* way_size
;
426 l2x0_sets
= way_size
/ CACHE_LINE_SIZE
;
429 * Check if l2x0 controller is already enabled.
430 * If you are booting from non-secure mode
431 * accessing the below registers will fault.
433 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
)) {
434 /* Make sure that I&D is not locked down when starting */
435 l2x0_unlock(l2x0_cache_id
);
437 /* l2x0 controller is disabled */
438 writel_relaxed(aux
, l2x0_base
+ L2X0_AUX_CTRL
);
443 writel_relaxed(L2X0_CTRL_EN
, l2x0_base
+ L2X0_CTRL
);
446 /* Re-read it in case some bits are reserved. */
447 aux
= readl_relaxed(l2x0_base
+ L2X0_AUX_CTRL
);
449 /* Save the value for resuming. */
450 l2x0_saved_regs
.aux_ctrl
= aux
;
453 outer_cache
.inv_range
= l2x0_inv_range
;
454 outer_cache
.clean_range
= l2x0_clean_range
;
455 outer_cache
.flush_range
= l2x0_flush_range
;
456 outer_cache
.sync
= l2x0_cache_sync
;
457 outer_cache
.flush_all
= l2x0_flush_all
;
458 outer_cache
.inv_all
= l2x0_inv_all
;
459 outer_cache
.disable
= l2x0_disable
;
462 printk(KERN_INFO
"%s cache controller enabled\n", type
);
463 printk(KERN_INFO
"l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
464 l2x0_ways
, l2x0_cache_id
, aux
, l2x0_size
);
468 static int l2_wt_override
;
471 * Note that the end addresses passed to Linux primitives are
472 * noninclusive, while the hardware cache range operations use
473 * inclusive start and end addresses.
475 static unsigned long calc_range_end(unsigned long start
, unsigned long end
)
478 * Limit the number of cache lines processed at once,
479 * since cache range operations stall the CPU pipeline
482 if (end
> start
+ MAX_RANGE_SIZE
)
483 end
= start
+ MAX_RANGE_SIZE
;
486 * Cache range operations can't straddle a page boundary.
488 if (end
> PAGE_ALIGN(start
+1))
489 end
= PAGE_ALIGN(start
+1);
495 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
496 * and range operations only do a TLB lookup on the start address.
498 static void aurora_pa_range(unsigned long start
, unsigned long end
,
499 unsigned long offset
)
503 raw_spin_lock_irqsave(&l2x0_lock
, flags
);
504 writel_relaxed(start
, l2x0_base
+ AURORA_RANGE_BASE_ADDR_REG
);
505 writel_relaxed(end
, l2x0_base
+ offset
);
506 raw_spin_unlock_irqrestore(&l2x0_lock
, flags
);
511 static void aurora_inv_range(unsigned long start
, unsigned long end
)
514 * round start and end adresses up to cache line size
516 start
&= ~(CACHE_LINE_SIZE
- 1);
517 end
= ALIGN(end
, CACHE_LINE_SIZE
);
520 * Invalidate all full cache lines between 'start' and 'end'.
522 while (start
< end
) {
523 unsigned long range_end
= calc_range_end(start
, end
);
524 aurora_pa_range(start
, range_end
- CACHE_LINE_SIZE
,
525 AURORA_INVAL_RANGE_REG
);
530 static void aurora_clean_range(unsigned long start
, unsigned long end
)
533 * If L2 is forced to WT, the L2 will always be clean and we
534 * don't need to do anything here.
536 if (!l2_wt_override
) {
537 start
&= ~(CACHE_LINE_SIZE
- 1);
538 end
= ALIGN(end
, CACHE_LINE_SIZE
);
539 while (start
!= end
) {
540 unsigned long range_end
= calc_range_end(start
, end
);
541 aurora_pa_range(start
, range_end
- CACHE_LINE_SIZE
,
542 AURORA_CLEAN_RANGE_REG
);
548 static void aurora_flush_range(unsigned long start
, unsigned long end
)
550 start
&= ~(CACHE_LINE_SIZE
- 1);
551 end
= ALIGN(end
, CACHE_LINE_SIZE
);
552 while (start
!= end
) {
553 unsigned long range_end
= calc_range_end(start
, end
);
555 * If L2 is forced to WT, the L2 will always be clean and we
556 * just need to invalidate.
559 aurora_pa_range(start
, range_end
- CACHE_LINE_SIZE
,
560 AURORA_INVAL_RANGE_REG
);
562 aurora_pa_range(start
, range_end
- CACHE_LINE_SIZE
,
563 AURORA_FLUSH_RANGE_REG
);
568 static void __init
l2x0_of_setup(const struct device_node
*np
,
569 u32
*aux_val
, u32
*aux_mask
)
571 u32 data
[2] = { 0, 0 };
574 u32 val
= 0, mask
= 0;
576 of_property_read_u32(np
, "arm,tag-latency", &tag
);
578 mask
|= L2X0_AUX_CTRL_TAG_LATENCY_MASK
;
579 val
|= (tag
- 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT
;
582 of_property_read_u32_array(np
, "arm,data-latency",
583 data
, ARRAY_SIZE(data
));
584 if (data
[0] && data
[1]) {
585 mask
|= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK
|
586 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK
;
587 val
|= ((data
[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT
) |
588 ((data
[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT
);
591 of_property_read_u32(np
, "arm,dirty-latency", &dirty
);
593 mask
|= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK
;
594 val
|= (dirty
- 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT
;
602 static void __init
pl310_of_setup(const struct device_node
*np
,
603 u32
*aux_val
, u32
*aux_mask
)
605 u32 data
[3] = { 0, 0, 0 };
606 u32 tag
[3] = { 0, 0, 0 };
607 u32 filter
[2] = { 0, 0 };
609 of_property_read_u32_array(np
, "arm,tag-latency", tag
, ARRAY_SIZE(tag
));
610 if (tag
[0] && tag
[1] && tag
[2])
612 ((tag
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
613 ((tag
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
614 ((tag
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
615 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
617 of_property_read_u32_array(np
, "arm,data-latency",
618 data
, ARRAY_SIZE(data
));
619 if (data
[0] && data
[1] && data
[2])
621 ((data
[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT
) |
622 ((data
[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT
) |
623 ((data
[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT
),
624 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
626 of_property_read_u32_array(np
, "arm,filter-ranges",
627 filter
, ARRAY_SIZE(filter
));
629 writel_relaxed(ALIGN(filter
[0] + filter
[1], SZ_1M
),
630 l2x0_base
+ L2X0_ADDR_FILTER_END
);
631 writel_relaxed((filter
[0] & ~(SZ_1M
- 1)) | L2X0_ADDR_FILTER_EN
,
632 l2x0_base
+ L2X0_ADDR_FILTER_START
);
636 static void __init
pl310_save(void)
638 u32 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
639 L2X0_CACHE_ID_RTL_MASK
;
641 l2x0_saved_regs
.tag_latency
= readl_relaxed(l2x0_base
+
642 L2X0_TAG_LATENCY_CTRL
);
643 l2x0_saved_regs
.data_latency
= readl_relaxed(l2x0_base
+
644 L2X0_DATA_LATENCY_CTRL
);
645 l2x0_saved_regs
.filter_end
= readl_relaxed(l2x0_base
+
646 L2X0_ADDR_FILTER_END
);
647 l2x0_saved_regs
.filter_start
= readl_relaxed(l2x0_base
+
648 L2X0_ADDR_FILTER_START
);
650 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
652 * From r2p0, there is Prefetch offset/control register
654 l2x0_saved_regs
.prefetch_ctrl
= readl_relaxed(l2x0_base
+
657 * From r3p0, there is Power control register
659 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
660 l2x0_saved_regs
.pwr_ctrl
= readl_relaxed(l2x0_base
+
665 static void aurora_save(void)
667 l2x0_saved_regs
.ctrl
= readl_relaxed(l2x0_base
+ L2X0_CTRL
);
668 l2x0_saved_regs
.aux_ctrl
= readl_relaxed(l2x0_base
+ L2X0_AUX_CTRL
);
671 static void l2x0_resume(void)
673 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
)) {
674 /* restore aux ctrl and enable l2 */
675 l2x0_unlock(readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
));
677 writel_relaxed(l2x0_saved_regs
.aux_ctrl
, l2x0_base
+
682 writel_relaxed(L2X0_CTRL_EN
, l2x0_base
+ L2X0_CTRL
);
686 static void pl310_resume(void)
690 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
)) {
691 /* restore pl310 setup */
692 writel_relaxed(l2x0_saved_regs
.tag_latency
,
693 l2x0_base
+ L2X0_TAG_LATENCY_CTRL
);
694 writel_relaxed(l2x0_saved_regs
.data_latency
,
695 l2x0_base
+ L2X0_DATA_LATENCY_CTRL
);
696 writel_relaxed(l2x0_saved_regs
.filter_end
,
697 l2x0_base
+ L2X0_ADDR_FILTER_END
);
698 writel_relaxed(l2x0_saved_regs
.filter_start
,
699 l2x0_base
+ L2X0_ADDR_FILTER_START
);
701 l2x0_revision
= readl_relaxed(l2x0_base
+ L2X0_CACHE_ID
) &
702 L2X0_CACHE_ID_RTL_MASK
;
704 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R2P0
) {
705 writel_relaxed(l2x0_saved_regs
.prefetch_ctrl
,
706 l2x0_base
+ L2X0_PREFETCH_CTRL
);
707 if (l2x0_revision
>= L2X0_CACHE_ID_RTL_R3P0
)
708 writel_relaxed(l2x0_saved_regs
.pwr_ctrl
,
709 l2x0_base
+ L2X0_POWER_CTRL
);
716 static void aurora_resume(void)
718 if (!(readl(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
)) {
719 writel_relaxed(l2x0_saved_regs
.aux_ctrl
,
720 l2x0_base
+ L2X0_AUX_CTRL
);
721 writel_relaxed(l2x0_saved_regs
.ctrl
, l2x0_base
+ L2X0_CTRL
);
725 static void __init
aurora_broadcast_l2_commands(void)
728 /* Enable Broadcasting of cache commands to L2*/
729 __asm__
__volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u
));
730 u
|= AURORA_CTRL_FW
; /* Set the FW bit */
731 __asm__
__volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u
));
735 static void __init
aurora_of_setup(const struct device_node
*np
,
736 u32
*aux_val
, u32
*aux_mask
)
738 u32 val
= AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU
;
739 u32 mask
= AURORA_ACR_REPLACEMENT_MASK
;
741 of_property_read_u32(np
, "cache-id-part",
742 &cache_id_part_number_from_dt
);
744 /* Determine and save the write policy */
745 l2_wt_override
= of_property_read_bool(np
, "wt-override");
747 if (l2_wt_override
) {
748 val
|= AURORA_ACR_FORCE_WRITE_THRO_POLICY
;
749 mask
|= AURORA_ACR_FORCE_WRITE_POLICY_MASK
;
757 static const struct l2x0_of_data pl310_data
= {
758 .setup
= pl310_of_setup
,
761 .resume
= pl310_resume
,
762 .inv_range
= l2x0_inv_range
,
763 .clean_range
= l2x0_clean_range
,
764 .flush_range
= l2x0_flush_range
,
765 .sync
= l2x0_cache_sync
,
766 .flush_all
= l2x0_flush_all
,
767 .inv_all
= l2x0_inv_all
,
768 .disable
= l2x0_disable
,
772 static const struct l2x0_of_data l2x0_data
= {
773 .setup
= l2x0_of_setup
,
776 .resume
= l2x0_resume
,
777 .inv_range
= l2x0_inv_range
,
778 .clean_range
= l2x0_clean_range
,
779 .flush_range
= l2x0_flush_range
,
780 .sync
= l2x0_cache_sync
,
781 .flush_all
= l2x0_flush_all
,
782 .inv_all
= l2x0_inv_all
,
783 .disable
= l2x0_disable
,
787 static const struct l2x0_of_data aurora_with_outer_data
= {
788 .setup
= aurora_of_setup
,
791 .resume
= aurora_resume
,
792 .inv_range
= aurora_inv_range
,
793 .clean_range
= aurora_clean_range
,
794 .flush_range
= aurora_flush_range
,
795 .sync
= l2x0_cache_sync
,
796 .flush_all
= l2x0_flush_all
,
797 .inv_all
= l2x0_inv_all
,
798 .disable
= l2x0_disable
,
802 static const struct l2x0_of_data aurora_no_outer_data
= {
803 .setup
= aurora_of_setup
,
806 .resume
= aurora_resume
,
810 static const struct of_device_id l2x0_ids
[] __initconst
= {
811 { .compatible
= "arm,pl310-cache", .data
= (void *)&pl310_data
},
812 { .compatible
= "arm,l220-cache", .data
= (void *)&l2x0_data
},
813 { .compatible
= "arm,l210-cache", .data
= (void *)&l2x0_data
},
814 { .compatible
= "marvell,aurora-system-cache",
815 .data
= (void *)&aurora_no_outer_data
},
816 { .compatible
= "marvell,aurora-outer-cache",
817 .data
= (void *)&aurora_with_outer_data
},
821 int __init
l2x0_of_init(u32 aux_val
, u32 aux_mask
)
823 struct device_node
*np
;
824 const struct l2x0_of_data
*data
;
827 np
= of_find_matching_node(NULL
, l2x0_ids
);
831 if (of_address_to_resource(np
, 0, &res
))
834 l2x0_base
= ioremap(res
.start
, resource_size(&res
));
838 l2x0_saved_regs
.phy_base
= res
.start
;
840 data
= of_match_node(l2x0_ids
, np
)->data
;
842 /* L2 configuration can only be changed if the cache is disabled */
843 if (!(readl_relaxed(l2x0_base
+ L2X0_CTRL
) & L2X0_CTRL_EN
)) {
845 data
->setup(np
, &aux_val
, &aux_mask
);
847 /* For aurora cache in no outer mode select the
848 * correct mode using the coprocessor*/
849 if (data
== &aurora_no_outer_data
)
850 aurora_broadcast_l2_commands();
857 memcpy(&outer_cache
, &data
->outer_cache
, sizeof(outer_cache
));
858 l2x0_init(l2x0_base
, aux_val
, aux_mask
);