[ARM] orion5x: register the crypto device on SOCs that support it
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
10c2df65 5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4
LT
11#include <linux/linkage.h>
12
13/*
14 * Debugging stuff
15 *
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
19 */
20#ifdef DEBUG
5cd0c344 21
5cd0c344 22#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9
TL
23
24#ifdef CONFIG_CPU_V6
25 .macro loadsp, rb
26 .endm
27 .macro writeb, ch, rb
28 mcr p14, 0, \ch, c0, c5, 0
29 .endm
c633c3cf
JCPV
30#elif defined(CONFIG_CPU_XSCALE)
31 .macro loadsp, rb
32 .endm
33 .macro writeb, ch, rb
34 mcr p14, 0, \ch, c8, c0, 0
35 .endm
7d95ded9 36#else
1da177e4
LT
37 .macro loadsp, rb
38 .endm
224b5be6 39 .macro writeb, ch, rb
41a9e680 40 mcr p14, 0, \ch, c1, c0, 0
1da177e4 41 .endm
7d95ded9
TL
42#endif
43
5cd0c344 44#else
224b5be6 45
a09e64fb 46#include <mach/debug-macro.S>
224b5be6 47
5cd0c344
RK
48 .macro writeb, ch, rb
49 senduart \ch, \rb
1da177e4 50 .endm
5cd0c344 51
224b5be6 52#if defined(CONFIG_ARCH_SA1100)
1da177e4
LT
53 .macro loadsp, rb
54 mov \rb, #0x80000000 @ physical base address
224b5be6 55#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 56 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 57#else
1da177e4 58 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 59#endif
1da177e4 60 .endm
1da177e4 61#elif defined(CONFIG_ARCH_S3C2410)
5cd0c344 62 .macro loadsp, rb
1da177e4 63 mov \rb, #0x50000000
c7657846 64 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
1da177e4 65 .endm
1da177e4 66#else
224b5be6
RK
67 .macro loadsp, rb
68 addruart \rb
69 .endm
1da177e4 70#endif
5cd0c344 71#endif
1da177e4
LT
72#endif
73
74 .macro kputc,val
75 mov r0, \val
76 bl putc
77 .endm
78
79 .macro kphex,val,len
80 mov r0, \val
81 mov r1, #\len
82 bl phex
83 .endm
84
85 .macro debug_reloc_start
86#ifdef DEBUG
87 kputc #'\n'
88 kphex r6, 8 /* processor id */
89 kputc #':'
90 kphex r7, 8 /* architecture id */
f12d0d7c 91#ifdef CONFIG_CPU_CP15
1da177e4
LT
92 kputc #':'
93 mrc p15, 0, r0, c1, c0
94 kphex r0, 8 /* control reg */
f12d0d7c 95#endif
1da177e4
LT
96 kputc #'\n'
97 kphex r5, 8 /* decompressed kernel start */
98 kputc #'-'
f4619025 99 kphex r9, 8 /* decompressed kernel end */
1da177e4
LT
100 kputc #'>'
101 kphex r4, 8 /* kernel execution address */
102 kputc #'\n'
103#endif
104 .endm
105
106 .macro debug_reloc_end
107#ifdef DEBUG
108 kphex r5, 8 /* end of kernel */
109 kputc #'\n'
110 mov r0, r4
111 bl memdump /* dump 256 bytes at start of kernel */
112#endif
113 .endm
114
115 .section ".start", #alloc, #execinstr
116/*
117 * sort out different calling conventions
118 */
119 .align
120start:
121 .type start,#function
122 .rept 8
123 mov r0, r0
124 .endr
125
126 b 1f
127 .word 0x016f2818 @ Magic numbers to help the loader
128 .word start @ absolute load/run zImage address
129 .word _edata @ zImage end address
1301: mov r7, r1 @ save architecture ID
f4619025 131 mov r8, r2 @ save atags pointer
1da177e4
LT
132
133#ifndef __ARM_ARCH_2__
134 /*
135 * Booting from Angel - need to enter SVC mode and disable
136 * FIQs/IRQs (numeric definitions from angel arm.h source).
137 * We only do this if we were in user mode on entry.
138 */
139 mrs r2, cpsr @ get current mode
140 tst r2, #3 @ not user?
141 bne not_angel
142 mov r0, #0x17 @ angel_SWIreason_EnterSVC
143 swi 0x123456 @ angel_SWI_ARM
144not_angel:
145 mrs r2, cpsr @ turn off interrupts to
146 orr r2, r2, #0xc0 @ prevent angel from running
147 msr cpsr_c, r2
148#else
149 teqp pc, #0x0c000003 @ turn off interrupts
150#endif
151
152 /*
153 * Note that some cache flushing and other stuff may
154 * be needed here - is there an Angel SWI call for this?
155 */
156
157 /*
158 * some architecture specific code can be inserted
f4619025 159 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
160 */
161
162 .text
163 adr r0, LC0
164 ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp}
165 subs r0, r0, r1 @ calculate the delta offset
166
167 @ if delta is zero, we are
168 beq not_relocated @ running at the address we
169 @ were linked at.
170
171 /*
172 * We're running at a different address. We need to fix
173 * up various pointers:
174 * r5 - zImage base address
175 * r6 - GOT start
176 * ip - GOT end
177 */
178 add r5, r5, r0
179 add r6, r6, r0
180 add ip, ip, r0
181
182#ifndef CONFIG_ZBOOT_ROM
183 /*
184 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
185 * we need to fix up pointers into the BSS region.
186 * r2 - BSS start
187 * r3 - BSS end
188 * sp - stack pointer
189 */
190 add r2, r2, r0
191 add r3, r3, r0
192 add sp, sp, r0
193
194 /*
195 * Relocate all entries in the GOT table.
196 */
1971: ldr r1, [r6, #0] @ relocate entries in the GOT
198 add r1, r1, r0 @ table. This fixes up the
199 str r1, [r6], #4 @ C references.
200 cmp r6, ip
201 blo 1b
202#else
203
204 /*
205 * Relocate entries in the GOT table. We only relocate
206 * the entries that are outside the (relocated) BSS region.
207 */
2081: ldr r1, [r6, #0] @ relocate entries in the GOT
209 cmp r1, r2 @ entry < bss_start ||
210 cmphs r3, r1 @ _end < entry
211 addlo r1, r1, r0 @ table. This fixes up the
212 str r1, [r6], #4 @ C references.
213 cmp r6, ip
214 blo 1b
215#endif
216
217not_relocated: mov r0, #0
2181: str r0, [r2], #4 @ clear bss
219 str r0, [r2], #4
220 str r0, [r2], #4
221 str r0, [r2], #4
222 cmp r2, r3
223 blo 1b
224
225 /*
226 * The C runtime environment should now be setup
227 * sufficiently. Turn the cache on, set up some
228 * pointers, and start decompressing.
229 */
230 bl cache_on
231
232 mov r1, sp @ malloc space above stack
233 add r2, sp, #0x10000 @ 64k max
234
235/*
236 * Check to see if we will overwrite ourselves.
237 * r4 = final kernel address
238 * r5 = start of this image
239 * r2 = end of malloc space (and therefore this image)
240 * We basically want:
241 * r4 >= r2 -> OK
242 * r4 + image length <= r5 -> OK
243 */
244 cmp r4, r2
245 bhs wont_overwrite
2552fc27
LB
246 sub r3, sp, r5 @ > compressed kernel size
247 add r0, r4, r3, lsl #2 @ allow for 4x expansion
1da177e4
LT
248 cmp r0, r5
249 bls wont_overwrite
250
251 mov r5, r2 @ decompress after malloc space
252 mov r0, r5
253 mov r3, r7
254 bl decompress_kernel
255
c7341d43 256 add r0, r0, #127 + 128 @ alignment + stack
1da177e4
LT
257 bic r0, r0, #127 @ align the kernel length
258/*
259 * r0 = decompressed kernel length
260 * r1-r3 = unused
261 * r4 = kernel execution address
262 * r5 = decompressed kernel start
263 * r6 = processor ID
264 * r7 = architecture ID
f4619025
RK
265 * r8 = atags pointer
266 * r9-r14 = corrupted
1da177e4
LT
267 */
268 add r1, r5, r0 @ end of decompressed kernel
269 adr r2, reloc_start
270 ldr r3, LC1
271 add r3, r2, r3
f4619025
RK
2721: ldmia r2!, {r9 - r14} @ copy relocation code
273 stmia r1!, {r9 - r14}
274 ldmia r2!, {r9 - r14}
275 stmia r1!, {r9 - r14}
1da177e4
LT
276 cmp r2, r3
277 blo 1b
c7341d43 278 add sp, r1, #128 @ relocate the stack
1da177e4
LT
279
280 bl cache_clean_flush
281 add pc, r5, r0 @ call relocation code
282
283/*
284 * We're not in danger of overwriting ourselves. Do this the simple way.
285 *
286 * r4 = kernel execution address
287 * r7 = architecture ID
288 */
289wont_overwrite: mov r0, r4
290 mov r3, r7
291 bl decompress_kernel
292 b call_kernel
293
294 .type LC0, #object
295LC0: .word LC0 @ r1
296 .word __bss_start @ r2
297 .word _end @ r3
298 .word zreladdr @ r4
299 .word _start @ r5
300 .word _got_start @ r6
301 .word _got_end @ ip
302 .word user_stack+4096 @ sp
303LC1: .word reloc_end - reloc_start
304 .size LC0, . - LC0
305
306#ifdef CONFIG_ARCH_RPC
307 .globl params
308params: ldr r0, =params_phys
309 mov pc, lr
310 .ltorg
311 .align
312#endif
313
314/*
315 * Turn on the cache. We need to setup some page tables so that we
316 * can have both the I and D caches on.
317 *
318 * We place the page tables 16k down from the kernel execution address,
319 * and we hope that nothing else is using it. If we're using it, we
320 * will go pop!
321 *
322 * On entry,
323 * r4 = kernel execution address
324 * r6 = processor ID
325 * r7 = architecture number
f4619025
RK
326 * r8 = atags pointer
327 * r9 = run-time address of "start" (???)
1da177e4 328 * On exit,
f4619025 329 * r1, r2, r3, r9, r10, r12 corrupted
1da177e4 330 * This routine must preserve:
f4619025 331 * r4, r5, r6, r7, r8
1da177e4
LT
332 */
333 .align 5
334cache_on: mov r3, #8 @ cache_on function
335 b call_cache_fn
336
10c2df65
HC
337/*
338 * Initialize the highest priority protection region, PR7
339 * to cover all 32bit address and cacheable and bufferable.
340 */
341__armv4_mpu_cache_on:
342 mov r0, #0x3f @ 4G, the whole
343 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
344 mcr p15, 0, r0, c6, c7, 1
345
346 mov r0, #0x80 @ PR7
347 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
348 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
349 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
350
351 mov r0, #0xc000
352 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
353 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
354
355 mov r0, #0
356 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
357 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
358 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
359 mrc p15, 0, r0, c1, c0, 0 @ read control reg
360 @ ...I .... ..D. WC.M
361 orr r0, r0, #0x002d @ .... .... ..1. 11.1
362 orr r0, r0, #0x1000 @ ...1 .... .... ....
363
364 mcr p15, 0, r0, c1, c0, 0 @ write control reg
365
366 mov r0, #0
367 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
368 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
369 mov pc, lr
370
371__armv3_mpu_cache_on:
372 mov r0, #0x3f @ 4G, the whole
373 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
374
375 mov r0, #0x80 @ PR7
376 mcr p15, 0, r0, c2, c0, 0 @ cache on
377 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
378
379 mov r0, #0xc000
380 mcr p15, 0, r0, c5, c0, 0 @ access permission
381
382 mov r0, #0
383 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
384 mrc p15, 0, r0, c1, c0, 0 @ read control reg
385 @ .... .... .... WC.M
386 orr r0, r0, #0x000d @ .... .... .... 11.1
387 mov r0, #0
388 mcr p15, 0, r0, c1, c0, 0 @ write control reg
389
390 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
391 mov pc, lr
392
1da177e4
LT
393__setup_mmu: sub r3, r4, #16384 @ Page directory size
394 bic r3, r3, #0xff @ Align the pointer
395 bic r3, r3, #0x3f00
396/*
397 * Initialise the page tables, turning on the cacheable and bufferable
398 * bits for the RAM area only.
399 */
400 mov r0, r3
f4619025
RK
401 mov r9, r0, lsr #18
402 mov r9, r9, lsl #18 @ start of RAM
403 add r10, r9, #0x10000000 @ a reasonable RAM size
1da177e4
LT
404 mov r1, #0x12
405 orr r1, r1, #3 << 10
406 add r2, r3, #16384
265d5e48 4071: cmp r1, r9 @ if virt > start of RAM
1da177e4 408 orrhs r1, r1, #0x0c @ set cacheable, bufferable
f4619025 409 cmp r1, r10 @ if virt > end of RAM
1da177e4
LT
410 bichs r1, r1, #0x0c @ clear cacheable, bufferable
411 str r1, [r0], #4 @ 1:1 mapping
412 add r1, r1, #1048576
413 teq r0, r2
414 bne 1b
415/*
416 * If ever we are running from Flash, then we surely want the cache
417 * to be enabled also for our execution instance... We map 2MB of it
418 * so there is no map overlap problem for up to 1 MB compressed kernel.
419 * If the execution is in RAM then we would only be duplicating the above.
420 */
421 mov r1, #0x1e
422 orr r1, r1, #3 << 10
423 mov r2, pc, lsr #20
424 orr r1, r1, r2, lsl #20
425 add r0, r3, r2, lsl #2
426 str r1, [r0], #4
427 add r1, r1, #1048576
428 str r1, [r0]
429 mov pc, lr
93ed3970 430ENDPROC(__setup_mmu)
1da177e4 431
c76b6b41 432__armv4_mmu_cache_on:
1da177e4
LT
433 mov r12, lr
434 bl __setup_mmu
435 mov r0, #0
436 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
437 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
438 mrc p15, 0, r0, c1, c0, 0 @ read control reg
439 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
440 orr r0, r0, #0x0030
26584853
CM
441#ifdef CONFIG_CPU_ENDIAN_BE8
442 orr r0, r0, #1 << 25 @ big-endian page tables
443#endif
c76b6b41 444 bl __common_mmu_cache_on
1da177e4
LT
445 mov r0, #0
446 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
447 mov pc, r12
448
7d09e854
CM
449__armv7_mmu_cache_on:
450 mov r12, lr
451 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
452 tst r11, #0xf @ VMSA
453 blne __setup_mmu
454 mov r0, #0
455 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
456 tst r11, #0xf @ VMSA
457 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
458 mrc p15, 0, r0, c1, c0, 0 @ read control reg
459 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
460 orr r0, r0, #0x003c @ write buffer
26584853
CM
461#ifdef CONFIG_CPU_ENDIAN_BE8
462 orr r0, r0, #1 << 25 @ big-endian page tables
463#endif
7d09e854
CM
464 orrne r0, r0, #1 @ MMU enabled
465 movne r1, #-1
466 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
467 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
468 mcr p15, 0, r0, c1, c0, 0 @ load control register
469 mrc p15, 0, r0, c1, c0, 0 @ and read it back
470 mov r0, #0
471 mcr p15, 0, r0, c7, c5, 4 @ ISB
472 mov pc, r12
473
28853ac8
PZ
474__fa526_cache_on:
475 mov r12, lr
476 bl __setup_mmu
477 mov r0, #0
478 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
479 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
480 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
481 mrc p15, 0, r0, c1, c0, 0 @ read control reg
482 orr r0, r0, #0x1000 @ I-cache enable
483 bl __common_mmu_cache_on
484 mov r0, #0
485 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
486 mov pc, r12
487
c76b6b41 488__arm6_mmu_cache_on:
1da177e4
LT
489 mov r12, lr
490 bl __setup_mmu
491 mov r0, #0
492 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
493 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
494 mov r0, #0x30
c76b6b41 495 bl __common_mmu_cache_on
1da177e4
LT
496 mov r0, #0
497 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
498 mov pc, r12
499
c76b6b41 500__common_mmu_cache_on:
1da177e4
LT
501#ifndef DEBUG
502 orr r0, r0, #0x000d @ Write buffer, mmu
503#endif
504 mov r1, #-1
505 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
506 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
507 b 1f
508 .align 5 @ cache line aligned
5091: mcr p15, 0, r0, c1, c0, 0 @ load control register
510 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
511 sub pc, lr, r0, lsr #32 @ properly flush pipeline
1da177e4
LT
512
513/*
514 * All code following this line is relocatable. It is relocated by
515 * the above code to the end of the decompressed kernel image and
516 * executed there. During this time, we have no stacks.
517 *
518 * r0 = decompressed kernel length
519 * r1-r3 = unused
520 * r4 = kernel execution address
521 * r5 = decompressed kernel start
522 * r6 = processor ID
523 * r7 = architecture ID
f4619025
RK
524 * r8 = atags pointer
525 * r9-r14 = corrupted
1da177e4
LT
526 */
527 .align 5
f4619025 528reloc_start: add r9, r5, r0
c7341d43 529 sub r9, r9, #128 @ do not copy the stack
1da177e4
LT
530 debug_reloc_start
531 mov r1, r4
5321:
533 .rept 4
f4619025
RK
534 ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel
535 stmia r1!, {r0, r2, r3, r10 - r14}
1da177e4
LT
536 .endr
537
f4619025 538 cmp r5, r9
1da177e4 539 blo 1b
c7341d43 540 add sp, r1, #128 @ relocate the stack
1da177e4
LT
541 debug_reloc_end
542
543call_kernel: bl cache_clean_flush
544 bl cache_off
f4619025 545 mov r0, #0 @ must be zero
1da177e4 546 mov r1, r7 @ restore architecture number
f4619025 547 mov r2, r8 @ restore atags pointer
1da177e4
LT
548 mov pc, r4 @ call kernel
549
550/*
551 * Here follow the relocatable cache support functions for the
552 * various processors. This is a generic hook for locating an
553 * entry and jumping to an instruction at the specified offset
554 * from the start of the block. Please note this is all position
555 * independent code.
556 *
557 * r1 = corrupted
558 * r2 = corrupted
559 * r3 = block offset
560 * r6 = corrupted
561 * r12 = corrupted
562 */
563
564call_cache_fn: adr r12, proc_types
f12d0d7c 565#ifdef CONFIG_CPU_CP15
1da177e4 566 mrc p15, 0, r6, c0, c0 @ get processor ID
f12d0d7c
HC
567#else
568 ldr r6, =CONFIG_PROCESSOR_ID
569#endif
1da177e4
LT
5701: ldr r1, [r12, #0] @ get value
571 ldr r2, [r12, #4] @ get mask
572 eor r1, r1, r6 @ (real ^ match)
573 tst r1, r2 @ & mask
574 addeq pc, r12, r3 @ call cache function
575 add r12, r12, #4*5
576 b 1b
577
578/*
579 * Table for cache operations. This is basically:
580 * - CPU ID match
581 * - CPU ID mask
582 * - 'cache on' method instruction
583 * - 'cache off' method instruction
584 * - 'cache flush' method instruction
585 *
586 * We match an entry using: ((real_id ^ match) & mask) == 0
587 *
588 * Writethrough caches generally only need 'on' and 'off'
589 * methods. Writeback caches _must_ have the flush method
590 * defined.
591 */
592 .type proc_types,#object
593proc_types:
594 .word 0x41560600 @ ARM6/610
595 .word 0xffffffe0
c76b6b41
HC
596 b __arm6_mmu_cache_off @ works, but slow
597 b __arm6_mmu_cache_off
1da177e4 598 mov pc, lr
c76b6b41
HC
599@ b __arm6_mmu_cache_on @ untested
600@ b __arm6_mmu_cache_off
601@ b __armv3_mmu_cache_flush
1da177e4
LT
602
603 .word 0x00000000 @ old ARM ID
604 .word 0x0000f000
605 mov pc, lr
606 mov pc, lr
607 mov pc, lr
608
609 .word 0x41007000 @ ARM7/710
610 .word 0xfff8fe00
c76b6b41
HC
611 b __arm7_mmu_cache_off
612 b __arm7_mmu_cache_off
1da177e4
LT
613 mov pc, lr
614
615 .word 0x41807200 @ ARM720T (writethrough)
616 .word 0xffffff00
c76b6b41
HC
617 b __armv4_mmu_cache_on
618 b __armv4_mmu_cache_off
1da177e4
LT
619 mov pc, lr
620
10c2df65
HC
621 .word 0x41007400 @ ARM74x
622 .word 0xff00ff00
623 b __armv3_mpu_cache_on
624 b __armv3_mpu_cache_off
625 b __armv3_mpu_cache_flush
626
627 .word 0x41009400 @ ARM94x
628 .word 0xff00ff00
629 b __armv4_mpu_cache_on
630 b __armv4_mpu_cache_off
631 b __armv4_mpu_cache_flush
632
1da177e4
LT
633 .word 0x00007000 @ ARM7 IDs
634 .word 0x0000f000
635 mov pc, lr
636 mov pc, lr
637 mov pc, lr
638
639 @ Everything from here on will be the new ID system.
640
641 .word 0x4401a100 @ sa110 / sa1100
642 .word 0xffffffe0
c76b6b41
HC
643 b __armv4_mmu_cache_on
644 b __armv4_mmu_cache_off
645 b __armv4_mmu_cache_flush
1da177e4
LT
646
647 .word 0x6901b110 @ sa1110
648 .word 0xfffffff0
c76b6b41
HC
649 b __armv4_mmu_cache_on
650 b __armv4_mmu_cache_off
651 b __armv4_mmu_cache_flush
1da177e4 652
59c7bcd4
EM
653 .word 0x56056930
654 .word 0xff0ffff0 @ PXA935
655 b __armv4_mmu_cache_on
49cbe786
EM
656 b __armv4_mmu_cache_off
657 b __armv4_mmu_cache_flush
658
659 .word 0x56158000 @ PXA168
660 .word 0xfffff000
661 b __armv4_mmu_cache_on
662 b __armv4_mmu_cache_off
663 b __armv5tej_mmu_cache_flush
664
665 .word 0x56056930
666 .word 0xff0ffff0 @ PXA935
667 b __armv4_mmu_cache_on
c76b6b41
HC
668 b __armv4_mmu_cache_off
669 b __armv4_mmu_cache_flush
1da177e4 670
2e2023fe
NP
671 .word 0x56050000 @ Feroceon
672 .word 0xff0f0000
3ebb5a2b
NP
673 b __armv4_mmu_cache_on
674 b __armv4_mmu_cache_off
675 b __armv5tej_mmu_cache_flush
676
28853ac8
PZ
677 .word 0x66015261 @ FA526
678 .word 0xff01fff1
679 b __fa526_cache_on
680 b __armv4_mmu_cache_off
681 b __fa526_cache_flush
682
1da177e4
LT
683 @ These match on the architecture ID
684
685 .word 0x00020000 @ ARMv4T
686 .word 0x000f0000
c76b6b41
HC
687 b __armv4_mmu_cache_on
688 b __armv4_mmu_cache_off
689 b __armv4_mmu_cache_flush
1da177e4
LT
690
691 .word 0x00050000 @ ARMv5TE
692 .word 0x000f0000
c76b6b41
HC
693 b __armv4_mmu_cache_on
694 b __armv4_mmu_cache_off
695 b __armv4_mmu_cache_flush
1da177e4
LT
696
697 .word 0x00060000 @ ARMv5TEJ
698 .word 0x000f0000
c76b6b41
HC
699 b __armv4_mmu_cache_on
700 b __armv4_mmu_cache_off
15754bf9 701 b __armv5tej_mmu_cache_flush
1da177e4 702
45a7b9cf 703 .word 0x0007b000 @ ARMv6
7d09e854 704 .word 0x000ff000
c76b6b41
HC
705 b __armv4_mmu_cache_on
706 b __armv4_mmu_cache_off
707 b __armv6_mmu_cache_flush
1da177e4 708
7d09e854
CM
709 .word 0x000f0000 @ new CPU Id
710 .word 0x000f0000
711 b __armv7_mmu_cache_on
712 b __armv7_mmu_cache_off
713 b __armv7_mmu_cache_flush
714
1da177e4
LT
715 .word 0 @ unrecognised type
716 .word 0
717 mov pc, lr
718 mov pc, lr
719 mov pc, lr
720
721 .size proc_types, . - proc_types
722
723/*
724 * Turn off the Cache and MMU. ARMv3 does not support
725 * reading the control register, but ARMv4 does.
726 *
727 * On entry, r6 = processor ID
728 * On exit, r0, r1, r2, r3, r12 corrupted
729 * This routine must preserve: r4, r6, r7
730 */
731 .align 5
732cache_off: mov r3, #12 @ cache_off function
733 b call_cache_fn
734
10c2df65
HC
735__armv4_mpu_cache_off:
736 mrc p15, 0, r0, c1, c0
737 bic r0, r0, #0x000d
738 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
739 mov r0, #0
740 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
741 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
742 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
743 mov pc, lr
744
745__armv3_mpu_cache_off:
746 mrc p15, 0, r0, c1, c0
747 bic r0, r0, #0x000d
748 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
749 mov r0, #0
750 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
751 mov pc, lr
752
c76b6b41 753__armv4_mmu_cache_off:
1da177e4
LT
754 mrc p15, 0, r0, c1, c0
755 bic r0, r0, #0x000d
756 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
757 mov r0, #0
758 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
759 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
760 mov pc, lr
761
7d09e854
CM
762__armv7_mmu_cache_off:
763 mrc p15, 0, r0, c1, c0
764 bic r0, r0, #0x000d
765 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
766 mov r12, lr
767 bl __armv7_mmu_cache_flush
768 mov r0, #0
769 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
c30c2f99
CM
770 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
771 mcr p15, 0, r0, c7, c10, 4 @ DSB
772 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
773 mov pc, r12
774
c76b6b41 775__arm6_mmu_cache_off:
1da177e4 776 mov r0, #0x00000030 @ ARM6 control reg.
c76b6b41 777 b __armv3_mmu_cache_off
1da177e4 778
c76b6b41 779__arm7_mmu_cache_off:
1da177e4 780 mov r0, #0x00000070 @ ARM7 control reg.
c76b6b41 781 b __armv3_mmu_cache_off
1da177e4 782
c76b6b41 783__armv3_mmu_cache_off:
1da177e4
LT
784 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
785 mov r0, #0
786 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
787 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
788 mov pc, lr
789
790/*
791 * Clean and flush the cache to maintain consistency.
792 *
793 * On entry,
794 * r6 = processor ID
795 * On exit,
796 * r1, r2, r3, r11, r12 corrupted
797 * This routine must preserve:
798 * r0, r4, r5, r6, r7
799 */
800 .align 5
801cache_clean_flush:
802 mov r3, #16
803 b call_cache_fn
804
10c2df65
HC
805__armv4_mpu_cache_flush:
806 mov r2, #1
807 mov r3, #0
808 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
809 mov r1, #7 << 5 @ 8 segments
8101: orr r3, r1, #63 << 26 @ 64 entries
8112: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
812 subs r3, r3, #1 << 26
813 bcs 2b @ entries 63 to 0
814 subs r1, r1, #1 << 5
815 bcs 1b @ segments 7 to 0
816
817 teq r2, #0
818 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
819 mcr p15, 0, ip, c7, c10, 4 @ drain WB
820 mov pc, lr
821
28853ac8
PZ
822__fa526_cache_flush:
823 mov r1, #0
824 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
825 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
826 mcr p15, 0, r1, c7, c10, 4 @ drain WB
827 mov pc, lr
10c2df65 828
c76b6b41 829__armv6_mmu_cache_flush:
1da177e4
LT
830 mov r1, #0
831 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
832 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
833 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
834 mcr p15, 0, r1, c7, c10, 4 @ drain WB
835 mov pc, lr
836
7d09e854
CM
837__armv7_mmu_cache_flush:
838 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
839 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 840 mov r10, #0
c30c2f99 841 beq hierarchical
7d09e854
CM
842 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
843 b iflush
844hierarchical:
c30c2f99
CM
845 mcr p15, 0, r10, c7, c10, 5 @ DMB
846 stmfd sp!, {r0-r5, r7, r9, r11}
7d09e854
CM
847 mrc p15, 1, r0, c0, c0, 1 @ read clidr
848 ands r3, r0, #0x7000000 @ extract loc from clidr
849 mov r3, r3, lsr #23 @ left align loc bit field
850 beq finished @ if loc is 0, then no need to clean
851 mov r10, #0 @ start clean at cache level 0
852loop1:
853 add r2, r10, r10, lsr #1 @ work out 3x current cache level
854 mov r1, r0, lsr r2 @ extract cache type bits from clidr
855 and r1, r1, #7 @ mask of the bits for current cache only
856 cmp r1, #2 @ see what cache we have at this level
857 blt skip @ skip if no cache, or just i-cache
858 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
859 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
860 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
861 and r2, r1, #7 @ extract the length of the cache lines
862 add r2, r2, #4 @ add 4 (line length offset)
863 ldr r4, =0x3ff
864 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
000b5025 865 clz r5, r4 @ find bit position of way size increment
7d09e854
CM
866 ldr r7, =0x7fff
867 ands r7, r7, r1, lsr #13 @ extract max number of the index size
868loop2:
869 mov r9, r4 @ create working copy of max way size
870loop3:
871 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
872 orr r11, r11, r7, lsl r2 @ factor index number into r11
873 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
874 subs r9, r9, #1 @ decrement the way
875 bge loop3
876 subs r7, r7, #1 @ decrement the index
877 bge loop2
878skip:
879 add r10, r10, #2 @ increment cache number
880 cmp r3, r10
881 bgt loop1
882finished:
c30c2f99 883 ldmfd sp!, {r0-r5, r7, r9, r11}
7d09e854
CM
884 mov r10, #0 @ swith back to cache level 0
885 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
7d09e854 886iflush:
c30c2f99 887 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 888 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
889 mcr p15, 0, r10, c7, c10, 4 @ DSB
890 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
891 mov pc, lr
892
15754bf9
NP
893__armv5tej_mmu_cache_flush:
8941: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
895 bne 1b
896 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
897 mcr p15, 0, r0, c7, c10, 4 @ drain WB
898 mov pc, lr
899
c76b6b41 900__armv4_mmu_cache_flush:
1da177e4
LT
901 mov r2, #64*1024 @ default: 32K dcache size (*2)
902 mov r11, #32 @ default: 32 byte line size
903 mrc p15, 0, r3, c0, c0, 1 @ read cache type
904 teq r3, r6 @ cache ID register present?
905 beq no_cache_id
906 mov r1, r3, lsr #18
907 and r1, r1, #7
908 mov r2, #1024
909 mov r2, r2, lsl r1 @ base dcache size *2
910 tst r3, #1 << 14 @ test M bit
911 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
912 mov r3, r3, lsr #12
913 and r3, r3, #3
914 mov r11, #8
915 mov r11, r11, lsl r3 @ cache line size in bytes
916no_cache_id:
917 bic r1, pc, #63 @ align to longest cache line
918 add r2, r1, r2
9191: ldr r3, [r1], r11 @ s/w flush D cache
920 teq r1, r2
921 bne 1b
922
923 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
924 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
925 mcr p15, 0, r1, c7, c10, 4 @ drain WB
926 mov pc, lr
927
c76b6b41 928__armv3_mmu_cache_flush:
10c2df65 929__armv3_mpu_cache_flush:
1da177e4
LT
930 mov r1, #0
931 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
932 mov pc, lr
933
934/*
935 * Various debugging routines for printing hex characters and
936 * memory, which again must be relocatable.
937 */
938#ifdef DEBUG
939 .type phexbuf,#object
940phexbuf: .space 12
941 .size phexbuf, . - phexbuf
942
943phex: adr r3, phexbuf
944 mov r2, #0
945 strb r2, [r3, r1]
9461: subs r1, r1, #1
947 movmi r0, r3
948 bmi puts
949 and r2, r0, #15
950 mov r0, r0, lsr #4
951 cmp r2, #10
952 addge r2, r2, #7
953 add r2, r2, #'0'
954 strb r2, [r3, r1]
955 b 1b
956
957puts: loadsp r3
9581: ldrb r2, [r0], #1
959 teq r2, #0
960 moveq pc, lr
5cd0c344 9612: writeb r2, r3
1da177e4
LT
962 mov r1, #0x00020000
9633: subs r1, r1, #1
964 bne 3b
965 teq r2, #'\n'
966 moveq r2, #'\r'
967 beq 2b
968 teq r0, #0
969 bne 1b
970 mov pc, lr
971putc:
972 mov r2, r0
973 mov r0, #0
974 loadsp r3
975 b 2b
976
977memdump: mov r12, r0
978 mov r10, lr
979 mov r11, #0
9802: mov r0, r11, lsl #2
981 add r0, r0, r12
982 mov r1, #8
983 bl phex
984 mov r0, #':'
985 bl putc
9861: mov r0, #' '
987 bl putc
988 ldr r0, [r12, r11, lsl #2]
989 mov r1, #8
990 bl phex
991 and r0, r11, #7
992 teq r0, #3
993 moveq r0, #' '
994 bleq putc
995 and r0, r11, #7
996 add r11, r11, #1
997 teq r0, #7
998 bne 1b
999 mov r0, #'\n'
1000 bl putc
1001 cmp r11, #64
1002 blt 2b
1003 mov pc, r10
1004#endif
1005
92c83ff1 1006 .ltorg
1da177e4
LT
1007reloc_end:
1008
1009 .align
1010 .section ".stack", "w"
1011user_stack: .space 4096