ARM: pm: add generic CPU suspend/resume support
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / mm / proc-mohawk.S
1 /*
2 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
3 *
4 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
5 *
6 * Heavily based on proc-arm926.S and proc-xsc3.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/linkage.h>
24 #include <linux/init.h>
25 #include <asm/assembler.h>
26 #include <asm/hwcap.h>
27 #include <asm/pgtable-hwdef.h>
28 #include <asm/pgtable.h>
29 #include <asm/page.h>
30 #include <asm/ptrace.h>
31 #include "proc-macros.S"
32
33 /*
34 * This is the maximum size of an area which will be flushed. If the
35 * area is larger than this, then we flush the whole cache.
36 */
37 #define CACHE_DLIMIT 32768
38
39 /*
40 * The cache line size of the L1 D cache.
41 */
42 #define CACHE_DLINESIZE 32
43
44 /*
45 * cpu_mohawk_proc_init()
46 */
47 ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr
49
50 /*
51 * cpu_mohawk_proc_fin()
52 */
53 ENTRY(cpu_mohawk_proc_fin)
54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
55 bic r0, r0, #0x1800 @ ...iz...........
56 bic r0, r0, #0x0006 @ .............ca.
57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
58 mov pc, lr
59
60 /*
61 * cpu_mohawk_reset(loc)
62 *
63 * Perform a soft reset of the system. Put the CPU into the
64 * same state as it would be if it had been reset, and branch
65 * to what would be the reset vector.
66 *
67 * loc: location to jump to for soft reset
68 *
69 * (same as arm926)
70 */
71 .align 5
72 ENTRY(cpu_mohawk_reset)
73 mov ip, #0
74 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
75 mcr p15, 0, ip, c7, c10, 4 @ drain WB
76 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
77 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
78 bic ip, ip, #0x0007 @ .............cam
79 bic ip, ip, #0x1100 @ ...i...s........
80 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
81 mov pc, r0
82
83 /*
84 * cpu_mohawk_do_idle()
85 *
86 * Called with IRQs disabled
87 */
88 .align 5
89 ENTRY(cpu_mohawk_do_idle)
90 mov r0, #0
91 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
92 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
93 mov pc, lr
94
95 /*
96 * flush_user_cache_all()
97 *
98 * Clean and invalidate all cache entries in a particular
99 * address space.
100 */
101 ENTRY(mohawk_flush_user_cache_all)
102 /* FALLTHROUGH */
103
104 /*
105 * flush_kern_cache_all()
106 *
107 * Clean and invalidate the entire cache.
108 */
109 ENTRY(mohawk_flush_kern_cache_all)
110 mov r2, #VM_EXEC
111 mov ip, #0
112 __flush_whole_cache:
113 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
114 tst r2, #VM_EXEC
115 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
116 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
117 mov pc, lr
118
119 /*
120 * flush_user_cache_range(start, end, flags)
121 *
122 * Clean and invalidate a range of cache entries in the
123 * specified address range.
124 *
125 * - start - start address (inclusive)
126 * - end - end address (exclusive)
127 * - flags - vm_flags describing address space
128 *
129 * (same as arm926)
130 */
131 ENTRY(mohawk_flush_user_cache_range)
132 mov ip, #0
133 sub r3, r1, r0 @ calculate total size
134 cmp r3, #CACHE_DLIMIT
135 bgt __flush_whole_cache
136 1: tst r2, #VM_EXEC
137 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
138 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
139 add r0, r0, #CACHE_DLINESIZE
140 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
141 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
142 add r0, r0, #CACHE_DLINESIZE
143 cmp r0, r1
144 blo 1b
145 tst r2, #VM_EXEC
146 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
147 mov pc, lr
148
149 /*
150 * coherent_kern_range(start, end)
151 *
152 * Ensure coherency between the Icache and the Dcache in the
153 * region described by start, end. If you have non-snooping
154 * Harvard caches, you need to implement this function.
155 *
156 * - start - virtual start address
157 * - end - virtual end address
158 */
159 ENTRY(mohawk_coherent_kern_range)
160 /* FALLTHROUGH */
161
162 /*
163 * coherent_user_range(start, end)
164 *
165 * Ensure coherency between the Icache and the Dcache in the
166 * region described by start, end. If you have non-snooping
167 * Harvard caches, you need to implement this function.
168 *
169 * - start - virtual start address
170 * - end - virtual end address
171 *
172 * (same as arm926)
173 */
174 ENTRY(mohawk_coherent_user_range)
175 bic r0, r0, #CACHE_DLINESIZE - 1
176 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
177 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
178 add r0, r0, #CACHE_DLINESIZE
179 cmp r0, r1
180 blo 1b
181 mcr p15, 0, r0, c7, c10, 4 @ drain WB
182 mov pc, lr
183
184 /*
185 * flush_kern_dcache_area(void *addr, size_t size)
186 *
187 * Ensure no D cache aliasing occurs, either with itself or
188 * the I cache
189 *
190 * - addr - kernel address
191 * - size - region size
192 */
193 ENTRY(mohawk_flush_kern_dcache_area)
194 add r1, r0, r1
195 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
196 add r0, r0, #CACHE_DLINESIZE
197 cmp r0, r1
198 blo 1b
199 mov r0, #0
200 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
201 mcr p15, 0, r0, c7, c10, 4 @ drain WB
202 mov pc, lr
203
204 /*
205 * dma_inv_range(start, end)
206 *
207 * Invalidate (discard) the specified virtual address range.
208 * May not write back any entries. If 'start' or 'end'
209 * are not cache line aligned, those lines must be written
210 * back.
211 *
212 * - start - virtual start address
213 * - end - virtual end address
214 *
215 * (same as v4wb)
216 */
217 mohawk_dma_inv_range:
218 tst r0, #CACHE_DLINESIZE - 1
219 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
220 tst r1, #CACHE_DLINESIZE - 1
221 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
222 bic r0, r0, #CACHE_DLINESIZE - 1
223 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
224 add r0, r0, #CACHE_DLINESIZE
225 cmp r0, r1
226 blo 1b
227 mcr p15, 0, r0, c7, c10, 4 @ drain WB
228 mov pc, lr
229
230 /*
231 * dma_clean_range(start, end)
232 *
233 * Clean the specified virtual address range.
234 *
235 * - start - virtual start address
236 * - end - virtual end address
237 *
238 * (same as v4wb)
239 */
240 mohawk_dma_clean_range:
241 bic r0, r0, #CACHE_DLINESIZE - 1
242 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
243 add r0, r0, #CACHE_DLINESIZE
244 cmp r0, r1
245 blo 1b
246 mcr p15, 0, r0, c7, c10, 4 @ drain WB
247 mov pc, lr
248
249 /*
250 * dma_flush_range(start, end)
251 *
252 * Clean and invalidate the specified virtual address range.
253 *
254 * - start - virtual start address
255 * - end - virtual end address
256 */
257 ENTRY(mohawk_dma_flush_range)
258 bic r0, r0, #CACHE_DLINESIZE - 1
259 1:
260 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
261 add r0, r0, #CACHE_DLINESIZE
262 cmp r0, r1
263 blo 1b
264 mcr p15, 0, r0, c7, c10, 4 @ drain WB
265 mov pc, lr
266
267 /*
268 * dma_map_area(start, size, dir)
269 * - start - kernel virtual start address
270 * - size - size of region
271 * - dir - DMA direction
272 */
273 ENTRY(mohawk_dma_map_area)
274 add r1, r1, r0
275 cmp r2, #DMA_TO_DEVICE
276 beq mohawk_dma_clean_range
277 bcs mohawk_dma_inv_range
278 b mohawk_dma_flush_range
279 ENDPROC(mohawk_dma_map_area)
280
281 /*
282 * dma_unmap_area(start, size, dir)
283 * - start - kernel virtual start address
284 * - size - size of region
285 * - dir - DMA direction
286 */
287 ENTRY(mohawk_dma_unmap_area)
288 mov pc, lr
289 ENDPROC(mohawk_dma_unmap_area)
290
291 ENTRY(mohawk_cache_fns)
292 .long mohawk_flush_kern_cache_all
293 .long mohawk_flush_user_cache_all
294 .long mohawk_flush_user_cache_range
295 .long mohawk_coherent_kern_range
296 .long mohawk_coherent_user_range
297 .long mohawk_flush_kern_dcache_area
298 .long mohawk_dma_map_area
299 .long mohawk_dma_unmap_area
300 .long mohawk_dma_flush_range
301
302 ENTRY(cpu_mohawk_dcache_clean_area)
303 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
304 add r0, r0, #CACHE_DLINESIZE
305 subs r1, r1, #CACHE_DLINESIZE
306 bhi 1b
307 mcr p15, 0, r0, c7, c10, 4 @ drain WB
308 mov pc, lr
309
310 /*
311 * cpu_mohawk_switch_mm(pgd)
312 *
313 * Set the translation base pointer to be as described by pgd.
314 *
315 * pgd: new page tables
316 */
317 .align 5
318 ENTRY(cpu_mohawk_switch_mm)
319 mov ip, #0
320 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
321 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
322 mcr p15, 0, ip, c7, c10, 4 @ drain WB
323 orr r0, r0, #0x18 @ cache the page table in L2
324 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
325 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
326 mov pc, lr
327
328 /*
329 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
330 *
331 * Set a PTE and flush it out
332 */
333 .align 5
334 ENTRY(cpu_mohawk_set_pte_ext)
335 armv3_set_pte_ext
336 mov r0, r0
337 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
338 mcr p15, 0, r0, c7, c10, 4 @ drain WB
339 mov pc, lr
340
341 __CPUINIT
342
343 .type __mohawk_setup, #function
344 __mohawk_setup:
345 mov r0, #0
346 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches
347 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
348 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs
349 orr r4, r4, #0x18 @ cache the page table in L2
350 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
351
352 mov r0, #0 @ don't allow CP access
353 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
354
355 adr r5, mohawk_crval
356 ldmia r5, {r5, r6}
357 mrc p15, 0, r0, c1, c0 @ get control register
358 bic r0, r0, r5
359 orr r0, r0, r6
360 mov pc, lr
361
362 .size __mohawk_setup, . - __mohawk_setup
363
364 /*
365 * R
366 * .RVI ZFRS BLDP WCAM
367 * .011 1001 ..00 0101
368 *
369 */
370 .type mohawk_crval, #object
371 mohawk_crval:
372 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
373
374 __INITDATA
375
376 /*
377 * Purpose : Function pointers used to access above functions - all calls
378 * come through these
379 */
380 .type mohawk_processor_functions, #object
381 mohawk_processor_functions:
382 .word v5t_early_abort
383 .word legacy_pabort
384 .word cpu_mohawk_proc_init
385 .word cpu_mohawk_proc_fin
386 .word cpu_mohawk_reset
387 .word cpu_mohawk_do_idle
388 .word cpu_mohawk_dcache_clean_area
389 .word cpu_mohawk_switch_mm
390 .word cpu_mohawk_set_pte_ext
391 .word 0
392 .word 0
393 .word 0
394 .size mohawk_processor_functions, . - mohawk_processor_functions
395
396 .section ".rodata"
397
398 .type cpu_arch_name, #object
399 cpu_arch_name:
400 .asciz "armv5te"
401 .size cpu_arch_name, . - cpu_arch_name
402
403 .type cpu_elf_name, #object
404 cpu_elf_name:
405 .asciz "v5"
406 .size cpu_elf_name, . - cpu_elf_name
407
408 .type cpu_mohawk_name, #object
409 cpu_mohawk_name:
410 .asciz "Marvell 88SV331x"
411 .size cpu_mohawk_name, . - cpu_mohawk_name
412
413 .align
414
415 .section ".proc.info.init", #alloc, #execinstr
416
417 .type __88sv331x_proc_info,#object
418 __88sv331x_proc_info:
419 .long 0x56158000 @ Marvell 88SV331x (MOHAWK)
420 .long 0xfffff000
421 .long PMD_TYPE_SECT | \
422 PMD_SECT_BUFFERABLE | \
423 PMD_SECT_CACHEABLE | \
424 PMD_BIT4 | \
425 PMD_SECT_AP_WRITE | \
426 PMD_SECT_AP_READ
427 .long PMD_TYPE_SECT | \
428 PMD_BIT4 | \
429 PMD_SECT_AP_WRITE | \
430 PMD_SECT_AP_READ
431 b __mohawk_setup
432 .long cpu_arch_name
433 .long cpu_elf_name
434 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
435 .long cpu_mohawk_name
436 .long mohawk_processor_functions
437 .long v4wbi_tlb_fns
438 .long v4wb_user_fns
439 .long mohawk_cache_fns
440 .size __88sv331x_proc_info, . - __88sv331x_proc_info