1 #include <linux/linkage.h>
2 #include <linux/init.h>
3 #include <asm/assembler.h>
6 .equ SCTLR_C_BIT, 0x00000004
7 .equ SCTLR_I_BIT, 0x00001000
12 .equ CLIDR_FIELD_WIDTH, 3
15 ENTRY(__enable_icache)
17 orr x0, x0, #SCTLR_I_BIT
20 ENDPROC(__enable_icache)
22 ENTRY(__disable_icache)
24 bic x0, x0, #SCTLR_I_BIT
27 ENDPROC(__disable_icache)
29 /* might pollute x0 */
32 bic x0, x0, #SCTLR_C_BIT
39 ENTRY(__enable_dcache)
41 orr x0, x0, #SCTLR_C_BIT
47 ENDPROC(__enable_dcache)
49 ENTRY(__disable_dcache)
51 bic x0, x0, #SCTLR_C_BIT
57 ENDPROC(__disable_dcache)
61 orr x0, x0, #SCTLR_I_BIT
62 orr x0, x0, #SCTLR_C_BIT
68 ENDPROC(__enable_cache)
70 ENTRY(__disable_cache)
72 bic x0, x0, #SCTLR_I_BIT
73 bic x0, x0, #SCTLR_C_BIT
79 ENDPROC(__disable_cache)
81 /* ---------------------------------------------------------------
82 * Data cache operations by set/way to the level specified
84 * The main function, do_dcsw_op requires:
85 * x0: The operation type (0-2), as defined in arch.h
86 * x1: The first cache level to operate on
87 * x3: The last cache level to operate on
89 * and will carry out the operation on each data cache from level 0
90 * to the level in x3 in sequence
92 * The dcsw_op macro sets up the x3 and x9 parameters based on
93 * clidr_el1 cache information before invoking the main function
94 * ---------------------------------------------------------------
102 adr x14, dcsw_loop_table // compute inner loop address
103 add x14, x14, x0, lsl #5 // inner loop is 8x32-bit instructions
107 add x2, x10, x10, lsr #1 // work out 3x current cache level
108 lsr x1, x0, x2 // extract cache type bits from clidr
109 and x1, x1, #7 // mask the bits for current cache only
110 cmp x1, #2 // see what cache we have at this level
111 b.lt level_done // nothing to do if no cache or icache
113 msr csselr_el1, x10 // select current cache level in csselr
114 isb // isb to sych the new cssr&csidr
115 mrs x1, ccsidr_el1 // read the new ccsidr
116 and x2, x1, #7 // extract the length of the cache lines
117 add x2, x2, #4 // add 4 (line length offset)
118 ubfx x4, x1, #3, #10 // maximum way number
119 clz w5, w4 // bit position of way size increment
120 lsl w9, w4, w5 // w9 = aligned max way number
121 lsl w16, w8, w5 // w16 = way number loop decrement
122 orr w9, w10, w9 // w9 = combine way and cache number
123 ubfx w6, w1, #13, #15 // w6 = max set number
124 lsl w17, w8, w2 // w17 = set number loop decrement
125 dsb sy // barrier before we start this level
126 br x14 // jump to DC operation specific loop
130 lsl w7, w6, w2 // w7 = aligned max set number
133 orr w11, w9, w7 // combine cache, way and set number
135 subs w7, w7, w17 // decrement set number
138 subs x9, x9, x16 // decrement way number
145 add x10, x10, #2 // increment cache number
148 msr csselr_el1, xzr // select cache level 0 in csselr
149 dsb sy // barrier to complete final cache operation
160 .macro __inner_dcache_all mode
164 ubfx x3, x9, #24, #0x7 /* LOC as last cache level */
168 .macro __inner_dcache_L1 mode
176 .macro __inner_dcache_L2 mode
184 .macro __inner_dcache_L1_L2 mode
192 ENTRY(__inner_flush_dcache_all)
193 __inner_dcache_all #DCCISW
194 ENDPROC(__inner_flush_dcache_all)
196 ENTRY(__inner_flush_dcache_L1)
197 __inner_dcache_L1 #DCCISW
198 ENDPROC(__inner_flush_dcache_L1)
200 ENTRY(__inner_flush_dcache_L2)
201 __inner_dcache_L2 #DCCISW
202 ENDPROC(__inner_flush_dcache_L2)
204 ENTRY(__inner_clean_dcache_all)
205 __inner_dcache_all #DCCSW
206 ENDPROC(__inner_clean_dcache_all)
208 ENTRY(__inner_clean_dcache_L1)
209 __inner_dcache_L1 #DCCSW
210 ENDPROC(__inner_clean_dcache_L1)
212 ENTRY(__inner_clean_dcache_L2)
213 __inner_dcache_L2 #DCCSW
214 ENDPROC(__inner_clean_dcache_L2)
216 ENTRY(__inner_inv_dcache_all)
217 __inner_dcache_all #DCISW
218 ENDPROC(__inner_inv_dcache_all)
220 ENTRY(__inner_inv_dcache_L1)
221 __inner_dcache_L1 #DCISW
222 ENDPROC(__inner_clean_dcache_L1)
224 ENTRY(__inner_inv_dcache_L2)
225 __inner_dcache_L2 #DCISW
226 ENDPROC(__inner_clean_dcache_L2)
228 ENTRY(__disable_dcache__inner_flush_dcache_L1)
230 __inner_dcache_L1 #DCCISW
231 ENDPROC(__disable_dcache__inner_flush_dcache_L1)
233 ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
235 __inner_dcache_L1_L2 #DCCISW
236 ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2)
238 ENTRY(__disable_dcache__inner_clean_dcache_L1__inner_clean_dcache_L2)
240 __inner_dcache_L1_L2 #DCCSW
241 ENDPROC(__disable_dcache__inner_clean_dcache_L1__inner_clean_dcache_L2)
243 ENTRY(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)
245 /* since we need to do different operations for L1/L2,
246 and our current implementation would jump from do_dcsw_op to caller(who invokes the last bl) directly,
247 we need to construct stack frame by ourself here.
248 We use two caller-saved registers, x12 & x13, to save lr & sp,
249 to prevent any memory access during cache operation
250 NOTICE: any macro or function MUST not corrupt x12 & x13 here
255 bl __inner_flush_dcache_L1
258 __inner_dcache_L2 #DCCSW
259 ENDPROC(__disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2)