ARM: zImage: the page table memory must be considered before relocation
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
10c2df65 5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4
LT
11#include <linux/linkage.h>
12
13/*
14 * Debugging stuff
15 *
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
19 */
20#ifdef DEBUG
5cd0c344 21
5cd0c344 22#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9 23
e399b1a4 24#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
4e6d488a 25 .macro loadsp, rb, tmp
7d95ded9
TL
26 .endm
27 .macro writeb, ch, rb
28 mcr p14, 0, \ch, c0, c5, 0
29 .endm
200b7a8d 30#elif defined(CONFIG_CPU_V7)
4e6d488a 31 .macro loadsp, rb, tmp
200b7a8d
TL
32 .endm
33 .macro writeb, ch, rb
34wait: mrc p14, 0, pc, c0, c1, 0
35 bcs wait
36 mcr p14, 0, \ch, c0, c5, 0
37 .endm
c633c3cf 38#elif defined(CONFIG_CPU_XSCALE)
4e6d488a 39 .macro loadsp, rb, tmp
c633c3cf
JCPV
40 .endm
41 .macro writeb, ch, rb
42 mcr p14, 0, \ch, c8, c0, 0
43 .endm
7d95ded9 44#else
4e6d488a 45 .macro loadsp, rb, tmp
1da177e4 46 .endm
224b5be6 47 .macro writeb, ch, rb
41a9e680 48 mcr p14, 0, \ch, c1, c0, 0
1da177e4 49 .endm
7d95ded9
TL
50#endif
51
5cd0c344 52#else
224b5be6 53
a09e64fb 54#include <mach/debug-macro.S>
224b5be6 55
5cd0c344
RK
56 .macro writeb, ch, rb
57 senduart \ch, \rb
1da177e4 58 .endm
5cd0c344 59
224b5be6 60#if defined(CONFIG_ARCH_SA1100)
4e6d488a 61 .macro loadsp, rb, tmp
1da177e4 62 mov \rb, #0x80000000 @ physical base address
224b5be6 63#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 64 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 65#else
1da177e4 66 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 67#endif
1da177e4 68 .endm
1da177e4 69#elif defined(CONFIG_ARCH_S3C2410)
4e6d488a 70 .macro loadsp, rb, tmp
1da177e4 71 mov \rb, #0x50000000
c7657846 72 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
1da177e4 73 .endm
1da177e4 74#else
4e6d488a
TL
75 .macro loadsp, rb, tmp
76 addruart \rb, \tmp
224b5be6 77 .endm
1da177e4 78#endif
5cd0c344 79#endif
1da177e4
LT
80#endif
81
82 .macro kputc,val
83 mov r0, \val
84 bl putc
85 .endm
86
87 .macro kphex,val,len
88 mov r0, \val
89 mov r1, #\len
90 bl phex
91 .endm
92
93 .macro debug_reloc_start
94#ifdef DEBUG
95 kputc #'\n'
96 kphex r6, 8 /* processor id */
97 kputc #':'
98 kphex r7, 8 /* architecture id */
f12d0d7c 99#ifdef CONFIG_CPU_CP15
1da177e4
LT
100 kputc #':'
101 mrc p15, 0, r0, c1, c0
102 kphex r0, 8 /* control reg */
f12d0d7c 103#endif
1da177e4
LT
104 kputc #'\n'
105 kphex r5, 8 /* decompressed kernel start */
106 kputc #'-'
f4619025 107 kphex r9, 8 /* decompressed kernel end */
1da177e4
LT
108 kputc #'>'
109 kphex r4, 8 /* kernel execution address */
110 kputc #'\n'
111#endif
112 .endm
113
114 .macro debug_reloc_end
115#ifdef DEBUG
116 kphex r5, 8 /* end of kernel */
117 kputc #'\n'
118 mov r0, r4
119 bl memdump /* dump 256 bytes at start of kernel */
120#endif
121 .endm
122
123 .section ".start", #alloc, #execinstr
124/*
125 * sort out different calling conventions
126 */
127 .align
26e5ca93 128 .arm @ Always enter in ARM state
1da177e4
LT
129start:
130 .type start,#function
b11fe388 131 .rept 7
1da177e4
LT
132 mov r0, r0
133 .endr
b11fe388
NP
134 ARM( mov r0, r0 )
135 ARM( b 1f )
136 THUMB( adr r12, BSYM(1f) )
137 THUMB( bx r12 )
1da177e4 138
1da177e4
LT
139 .word 0x016f2818 @ Magic numbers to help the loader
140 .word start @ absolute load/run zImage address
141 .word _edata @ zImage end address
26e5ca93 142 THUMB( .thumb )
1da177e4 1431: mov r7, r1 @ save architecture ID
f4619025 144 mov r8, r2 @ save atags pointer
1da177e4
LT
145
146#ifndef __ARM_ARCH_2__
147 /*
148 * Booting from Angel - need to enter SVC mode and disable
149 * FIQs/IRQs (numeric definitions from angel arm.h source).
150 * We only do this if we were in user mode on entry.
151 */
152 mrs r2, cpsr @ get current mode
153 tst r2, #3 @ not user?
154 bne not_angel
155 mov r0, #0x17 @ angel_SWIreason_EnterSVC
0e056f20
CM
156 ARM( swi 0x123456 ) @ angel_SWI_ARM
157 THUMB( svc 0xab ) @ angel_SWI_THUMB
1da177e4
LT
158not_angel:
159 mrs r2, cpsr @ turn off interrupts to
160 orr r2, r2, #0xc0 @ prevent angel from running
161 msr cpsr_c, r2
162#else
163 teqp pc, #0x0c000003 @ turn off interrupts
164#endif
165
166 /*
167 * Note that some cache flushing and other stuff may
168 * be needed here - is there an Angel SWI call for this?
169 */
170
171 /*
172 * some architecture specific code can be inserted
f4619025 173 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
174 */
175
176 .text
6d7d0ae5 177
e69edc79
EM
178#ifdef CONFIG_AUTO_ZRELADDR
179 @ determine final kernel image address
bfa64c4a
DM
180 mov r4, pc
181 and r4, r4, #0xf8000000
e69edc79
EM
182 add r4, r4, #TEXT_OFFSET
183#else
9e84ed63 184 ldr r4, =zreladdr
e69edc79 185#endif
1da177e4 186
6d7d0ae5
NP
187 bl cache_on
188
189restart: adr r0, LC0
adcc2591
NP
190 ldmia r0, {r1, r2, r3, r6, r9, r11, r12}
191 ldr sp, [r0, #28]
6d7d0ae5
NP
192
193 /*
194 * We might be running at a different address. We need
195 * to fix up various pointers.
196 */
197 sub r0, r0, r1 @ calculate the delta offset
6d7d0ae5 198 add r6, r6, r0 @ _edata
1da177e4 199
6d7d0ae5
NP
200#ifndef CONFIG_ZBOOT_ROM
201 /* malloc space is above the relocated stack (64k max) */
202 add sp, sp, r0
203 add r10, sp, #0x10000
204#else
1da177e4 205 /*
6d7d0ae5
NP
206 * With ZBOOT_ROM the bss/stack is non relocatable,
207 * but someone could still run this code from RAM,
208 * in which case our reference is _edata.
1da177e4 209 */
6d7d0ae5
NP
210 mov r10, r6
211#endif
212
213/*
214 * Check to see if we will overwrite ourselves.
215 * r4 = final kernel address
6d7d0ae5
NP
216 * r9 = size of decompressed image
217 * r10 = end of this image, including bss/stack/malloc space if non XIP
218 * We basically want:
ea9df3b1 219 * r4 - 16k page directory >= r10 -> OK
adcc2591 220 * r4 + image length <= current position (pc) -> OK
6d7d0ae5 221 */
ea9df3b1 222 add r10, r10, #16384
6d7d0ae5
NP
223 cmp r4, r10
224 bhs wont_overwrite
225 add r10, r4, r9
adcc2591
NP
226 ARM( cmp r10, pc )
227 THUMB( mov lr, pc )
228 THUMB( cmp r10, lr )
6d7d0ae5
NP
229 bls wont_overwrite
230
231/*
232 * Relocate ourselves past the end of the decompressed kernel.
6d7d0ae5
NP
233 * r6 = _edata
234 * r10 = end of the decompressed kernel
235 * Because we always copy ahead, we need to do it from the end and go
236 * backward in case the source and destination overlap.
237 */
adcc2591
NP
238 /*
239 * Bump to the next 256-byte boundary with the size of
240 * the relocation code added. This avoids overwriting
241 * ourself when the offset is small.
242 */
243 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
6d7d0ae5
NP
244 bic r10, r10, #255
245
adcc2591
NP
246 /* Get start of code we want to copy and align it down. */
247 adr r5, restart
248 bic r5, r5, #31
249
6d7d0ae5
NP
250 sub r9, r6, r5 @ size to copy
251 add r9, r9, #31 @ rounded up to a multiple
252 bic r9, r9, #31 @ ... of 32 bytes
253 add r6, r9, r5
254 add r9, r9, r10
255
2561: ldmdb r6!, {r0 - r3, r10 - r12, lr}
257 cmp r6, r5
258 stmdb r9!, {r0 - r3, r10 - r12, lr}
259 bhi 1b
260
261 /* Preserve offset to relocated code. */
262 sub r6, r9, r6
263
7c2527f0
TL
264#ifndef CONFIG_ZBOOT_ROM
265 /* cache_clean_flush may use the stack, so relocate it */
266 add sp, sp, r6
267#endif
268
6d7d0ae5
NP
269 bl cache_clean_flush
270
271 adr r0, BSYM(restart)
272 add r0, r0, r6
273 mov pc, r0
274
275wont_overwrite:
276/*
277 * If delta is zero, we are running at the address we were linked at.
278 * r0 = delta
279 * r2 = BSS start
280 * r3 = BSS end
281 * r4 = kernel execution address
282 * r7 = architecture ID
283 * r8 = atags pointer
284 * r11 = GOT start
285 * r12 = GOT end
286 * sp = stack pointer
287 */
288 teq r0, #0
289 beq not_relocated
98e12b5a 290 add r11, r11, r0
6d7d0ae5 291 add r12, r12, r0
1da177e4
LT
292
293#ifndef CONFIG_ZBOOT_ROM
294 /*
295 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
296 * we need to fix up pointers into the BSS region.
6d7d0ae5 297 * Note that the stack pointer has already been fixed up.
1da177e4
LT
298 */
299 add r2, r2, r0
300 add r3, r3, r0
1da177e4
LT
301
302 /*
303 * Relocate all entries in the GOT table.
304 */
98e12b5a 3051: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4 306 add r1, r1, r0 @ table. This fixes up the
98e12b5a 307 str r1, [r11], #4 @ C references.
6d7d0ae5 308 cmp r11, r12
1da177e4
LT
309 blo 1b
310#else
311
312 /*
313 * Relocate entries in the GOT table. We only relocate
314 * the entries that are outside the (relocated) BSS region.
315 */
98e12b5a 3161: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4
LT
317 cmp r1, r2 @ entry < bss_start ||
318 cmphs r3, r1 @ _end < entry
319 addlo r1, r1, r0 @ table. This fixes up the
98e12b5a 320 str r1, [r11], #4 @ C references.
6d7d0ae5 321 cmp r11, r12
1da177e4
LT
322 blo 1b
323#endif
324
325not_relocated: mov r0, #0
3261: str r0, [r2], #4 @ clear bss
327 str r0, [r2], #4
328 str r0, [r2], #4
329 str r0, [r2], #4
330 cmp r2, r3
331 blo 1b
332
1da177e4 333/*
6d7d0ae5
NP
334 * The C runtime environment should now be setup sufficiently.
335 * Set up some pointers, and start decompressing.
336 * r4 = kernel execution address
337 * r7 = architecture ID
338 * r8 = atags pointer
1da177e4 339 */
6d7d0ae5
NP
340 mov r0, r4
341 mov r1, sp @ malloc space above stack
342 add r2, sp, #0x10000 @ 64k max
1da177e4
LT
343 mov r3, r7
344 bl decompress_kernel
1da177e4 345 bl cache_clean_flush
6d7d0ae5
NP
346 bl cache_off
347 mov r0, #0 @ must be zero
348 mov r1, r7 @ restore architecture number
349 mov r2, r8 @ restore atags pointer
350 mov pc, r4 @ call kernel
1da177e4 351
88987ef9 352 .align 2
1da177e4
LT
353 .type LC0, #object
354LC0: .word LC0 @ r1
355 .word __bss_start @ r2
356 .word _end @ r3
6d7d0ae5
NP
357 .word _edata @ r6
358 .word _image_size @ r9
98e12b5a 359 .word _got_start @ r11
1da177e4 360 .word _got_end @ ip
88237c25 361 .word user_stack_end @ sp
1da177e4
LT
362 .size LC0, . - LC0
363
364#ifdef CONFIG_ARCH_RPC
365 .globl params
db7b2b4b 366params: ldr r0, =0x10000100 @ params_phys for RPC
1da177e4
LT
367 mov pc, lr
368 .ltorg
369 .align
370#endif
371
372/*
373 * Turn on the cache. We need to setup some page tables so that we
374 * can have both the I and D caches on.
375 *
376 * We place the page tables 16k down from the kernel execution address,
377 * and we hope that nothing else is using it. If we're using it, we
378 * will go pop!
379 *
380 * On entry,
381 * r4 = kernel execution address
1da177e4 382 * r7 = architecture number
f4619025 383 * r8 = atags pointer
1da177e4 384 * On exit,
21b2841d 385 * r0, r1, r2, r3, r9, r10, r12 corrupted
1da177e4 386 * This routine must preserve:
6d7d0ae5 387 * r4, r7, r8
1da177e4
LT
388 */
389 .align 5
390cache_on: mov r3, #8 @ cache_on function
391 b call_cache_fn
392
10c2df65
HC
393/*
394 * Initialize the highest priority protection region, PR7
395 * to cover all 32bit address and cacheable and bufferable.
396 */
397__armv4_mpu_cache_on:
398 mov r0, #0x3f @ 4G, the whole
399 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
400 mcr p15, 0, r0, c6, c7, 1
401
402 mov r0, #0x80 @ PR7
403 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
404 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
405 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
406
407 mov r0, #0xc000
408 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
409 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
410
411 mov r0, #0
412 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
413 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
414 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
415 mrc p15, 0, r0, c1, c0, 0 @ read control reg
416 @ ...I .... ..D. WC.M
417 orr r0, r0, #0x002d @ .... .... ..1. 11.1
418 orr r0, r0, #0x1000 @ ...1 .... .... ....
419
420 mcr p15, 0, r0, c1, c0, 0 @ write control reg
421
422 mov r0, #0
423 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
424 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
425 mov pc, lr
426
427__armv3_mpu_cache_on:
428 mov r0, #0x3f @ 4G, the whole
429 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
430
431 mov r0, #0x80 @ PR7
432 mcr p15, 0, r0, c2, c0, 0 @ cache on
433 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
434
435 mov r0, #0xc000
436 mcr p15, 0, r0, c5, c0, 0 @ access permission
437
438 mov r0, #0
439 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
4a8d57a5
UKK
440 /*
441 * ?? ARMv3 MMU does not allow reading the control register,
442 * does this really work on ARMv3 MPU?
443 */
10c2df65
HC
444 mrc p15, 0, r0, c1, c0, 0 @ read control reg
445 @ .... .... .... WC.M
446 orr r0, r0, #0x000d @ .... .... .... 11.1
4a8d57a5 447 /* ?? this overwrites the value constructed above? */
10c2df65
HC
448 mov r0, #0
449 mcr p15, 0, r0, c1, c0, 0 @ write control reg
450
4a8d57a5 451 /* ?? invalidate for the second time? */
10c2df65
HC
452 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
453 mov pc, lr
454
1da177e4
LT
455__setup_mmu: sub r3, r4, #16384 @ Page directory size
456 bic r3, r3, #0xff @ Align the pointer
457 bic r3, r3, #0x3f00
458/*
459 * Initialise the page tables, turning on the cacheable and bufferable
460 * bits for the RAM area only.
461 */
462 mov r0, r3
f4619025
RK
463 mov r9, r0, lsr #18
464 mov r9, r9, lsl #18 @ start of RAM
465 add r10, r9, #0x10000000 @ a reasonable RAM size
1da177e4
LT
466 mov r1, #0x12
467 orr r1, r1, #3 << 10
468 add r2, r3, #16384
265d5e48 4691: cmp r1, r9 @ if virt > start of RAM
1da177e4 470 orrhs r1, r1, #0x0c @ set cacheable, bufferable
f4619025 471 cmp r1, r10 @ if virt > end of RAM
1da177e4
LT
472 bichs r1, r1, #0x0c @ clear cacheable, bufferable
473 str r1, [r0], #4 @ 1:1 mapping
474 add r1, r1, #1048576
475 teq r0, r2
476 bne 1b
477/*
478 * If ever we are running from Flash, then we surely want the cache
479 * to be enabled also for our execution instance... We map 2MB of it
480 * so there is no map overlap problem for up to 1 MB compressed kernel.
481 * If the execution is in RAM then we would only be duplicating the above.
482 */
483 mov r1, #0x1e
484 orr r1, r1, #3 << 10
bfa64c4a
DM
485 mov r2, pc
486 mov r2, r2, lsr #20
1da177e4
LT
487 orr r1, r1, r2, lsl #20
488 add r0, r3, r2, lsl #2
489 str r1, [r0], #4
490 add r1, r1, #1048576
491 str r1, [r0]
492 mov pc, lr
93ed3970 493ENDPROC(__setup_mmu)
1da177e4 494
c76b6b41 495__armv4_mmu_cache_on:
1da177e4 496 mov r12, lr
8bdca0ac 497#ifdef CONFIG_MMU
1da177e4
LT
498 bl __setup_mmu
499 mov r0, #0
500 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
501 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
502 mrc p15, 0, r0, c1, c0, 0 @ read control reg
503 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
504 orr r0, r0, #0x0030
26584853
CM
505#ifdef CONFIG_CPU_ENDIAN_BE8
506 orr r0, r0, #1 << 25 @ big-endian page tables
507#endif
c76b6b41 508 bl __common_mmu_cache_on
1da177e4
LT
509 mov r0, #0
510 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 511#endif
1da177e4
LT
512 mov pc, r12
513
7d09e854
CM
514__armv7_mmu_cache_on:
515 mov r12, lr
8bdca0ac 516#ifdef CONFIG_MMU
7d09e854
CM
517 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
518 tst r11, #0xf @ VMSA
519 blne __setup_mmu
520 mov r0, #0
521 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
522 tst r11, #0xf @ VMSA
523 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 524#endif
7d09e854
CM
525 mrc p15, 0, r0, c1, c0, 0 @ read control reg
526 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
527 orr r0, r0, #0x003c @ write buffer
8bdca0ac 528#ifdef CONFIG_MMU
26584853
CM
529#ifdef CONFIG_CPU_ENDIAN_BE8
530 orr r0, r0, #1 << 25 @ big-endian page tables
531#endif
7d09e854
CM
532 orrne r0, r0, #1 @ MMU enabled
533 movne r1, #-1
534 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
535 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
8bdca0ac 536#endif
7d09e854
CM
537 mcr p15, 0, r0, c1, c0, 0 @ load control register
538 mrc p15, 0, r0, c1, c0, 0 @ and read it back
539 mov r0, #0
540 mcr p15, 0, r0, c7, c5, 4 @ ISB
541 mov pc, r12
542
28853ac8
PZ
543__fa526_cache_on:
544 mov r12, lr
545 bl __setup_mmu
546 mov r0, #0
547 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
548 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
549 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
550 mrc p15, 0, r0, c1, c0, 0 @ read control reg
551 orr r0, r0, #0x1000 @ I-cache enable
552 bl __common_mmu_cache_on
553 mov r0, #0
554 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
555 mov pc, r12
556
c76b6b41 557__arm6_mmu_cache_on:
1da177e4
LT
558 mov r12, lr
559 bl __setup_mmu
560 mov r0, #0
561 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
562 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
563 mov r0, #0x30
c76b6b41 564 bl __common_mmu_cache_on
1da177e4
LT
565 mov r0, #0
566 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
567 mov pc, r12
568
c76b6b41 569__common_mmu_cache_on:
0e056f20 570#ifndef CONFIG_THUMB2_KERNEL
1da177e4
LT
571#ifndef DEBUG
572 orr r0, r0, #0x000d @ Write buffer, mmu
573#endif
574 mov r1, #-1
575 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
576 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
577 b 1f
578 .align 5 @ cache line aligned
5791: mcr p15, 0, r0, c1, c0, 0 @ load control register
580 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
581 sub pc, lr, r0, lsr #32 @ properly flush pipeline
0e056f20 582#endif
1da177e4 583
1da177e4
LT
584/*
585 * Here follow the relocatable cache support functions for the
586 * various processors. This is a generic hook for locating an
587 * entry and jumping to an instruction at the specified offset
588 * from the start of the block. Please note this is all position
589 * independent code.
590 *
591 * r1 = corrupted
592 * r2 = corrupted
593 * r3 = block offset
98e12b5a 594 * r9 = corrupted
1da177e4
LT
595 * r12 = corrupted
596 */
597
598call_cache_fn: adr r12, proc_types
f12d0d7c 599#ifdef CONFIG_CPU_CP15
98e12b5a 600 mrc p15, 0, r9, c0, c0 @ get processor ID
f12d0d7c 601#else
98e12b5a 602 ldr r9, =CONFIG_PROCESSOR_ID
f12d0d7c 603#endif
1da177e4
LT
6041: ldr r1, [r12, #0] @ get value
605 ldr r2, [r12, #4] @ get mask
98e12b5a 606 eor r1, r1, r9 @ (real ^ match)
1da177e4 607 tst r1, r2 @ & mask
0e056f20
CM
608 ARM( addeq pc, r12, r3 ) @ call cache function
609 THUMB( addeq r12, r3 )
610 THUMB( moveq pc, r12 ) @ call cache function
1da177e4
LT
611 add r12, r12, #4*5
612 b 1b
613
614/*
615 * Table for cache operations. This is basically:
616 * - CPU ID match
617 * - CPU ID mask
618 * - 'cache on' method instruction
619 * - 'cache off' method instruction
620 * - 'cache flush' method instruction
621 *
622 * We match an entry using: ((real_id ^ match) & mask) == 0
623 *
624 * Writethrough caches generally only need 'on' and 'off'
625 * methods. Writeback caches _must_ have the flush method
626 * defined.
627 */
88987ef9 628 .align 2
1da177e4
LT
629 .type proc_types,#object
630proc_types:
631 .word 0x41560600 @ ARM6/610
632 .word 0xffffffe0
0e056f20
CM
633 W(b) __arm6_mmu_cache_off @ works, but slow
634 W(b) __arm6_mmu_cache_off
1da177e4 635 mov pc, lr
0e056f20 636 THUMB( nop )
c76b6b41
HC
637@ b __arm6_mmu_cache_on @ untested
638@ b __arm6_mmu_cache_off
639@ b __armv3_mmu_cache_flush
1da177e4
LT
640
641 .word 0x00000000 @ old ARM ID
642 .word 0x0000f000
643 mov pc, lr
0e056f20 644 THUMB( nop )
1da177e4 645 mov pc, lr
0e056f20 646 THUMB( nop )
1da177e4 647 mov pc, lr
0e056f20 648 THUMB( nop )
1da177e4
LT
649
650 .word 0x41007000 @ ARM7/710
651 .word 0xfff8fe00
0e056f20
CM
652 W(b) __arm7_mmu_cache_off
653 W(b) __arm7_mmu_cache_off
1da177e4 654 mov pc, lr
0e056f20 655 THUMB( nop )
1da177e4
LT
656
657 .word 0x41807200 @ ARM720T (writethrough)
658 .word 0xffffff00
0e056f20
CM
659 W(b) __armv4_mmu_cache_on
660 W(b) __armv4_mmu_cache_off
1da177e4 661 mov pc, lr
0e056f20 662 THUMB( nop )
1da177e4 663
10c2df65
HC
664 .word 0x41007400 @ ARM74x
665 .word 0xff00ff00
0e056f20
CM
666 W(b) __armv3_mpu_cache_on
667 W(b) __armv3_mpu_cache_off
668 W(b) __armv3_mpu_cache_flush
10c2df65
HC
669
670 .word 0x41009400 @ ARM94x
671 .word 0xff00ff00
0e056f20
CM
672 W(b) __armv4_mpu_cache_on
673 W(b) __armv4_mpu_cache_off
674 W(b) __armv4_mpu_cache_flush
10c2df65 675
1da177e4
LT
676 .word 0x00007000 @ ARM7 IDs
677 .word 0x0000f000
678 mov pc, lr
0e056f20 679 THUMB( nop )
1da177e4 680 mov pc, lr
0e056f20 681 THUMB( nop )
1da177e4 682 mov pc, lr
0e056f20 683 THUMB( nop )
1da177e4
LT
684
685 @ Everything from here on will be the new ID system.
686
687 .word 0x4401a100 @ sa110 / sa1100
688 .word 0xffffffe0
0e056f20
CM
689 W(b) __armv4_mmu_cache_on
690 W(b) __armv4_mmu_cache_off
691 W(b) __armv4_mmu_cache_flush
1da177e4
LT
692
693 .word 0x6901b110 @ sa1110
694 .word 0xfffffff0
0e056f20
CM
695 W(b) __armv4_mmu_cache_on
696 W(b) __armv4_mmu_cache_off
697 W(b) __armv4_mmu_cache_flush
1da177e4 698
4157d317
HZ
699 .word 0x56056900
700 .word 0xffffff00 @ PXA9xx
0e056f20
CM
701 W(b) __armv4_mmu_cache_on
702 W(b) __armv4_mmu_cache_off
703 W(b) __armv4_mmu_cache_flush
49cbe786
EM
704
705 .word 0x56158000 @ PXA168
706 .word 0xfffff000
0e056f20
CM
707 W(b) __armv4_mmu_cache_on
708 W(b) __armv4_mmu_cache_off
709 W(b) __armv5tej_mmu_cache_flush
49cbe786 710
2e2023fe
NP
711 .word 0x56050000 @ Feroceon
712 .word 0xff0f0000
0e056f20
CM
713 W(b) __armv4_mmu_cache_on
714 W(b) __armv4_mmu_cache_off
715 W(b) __armv5tej_mmu_cache_flush
3ebb5a2b 716
5587931c
JS
717#ifdef CONFIG_CPU_FEROCEON_OLD_ID
718 /* this conflicts with the standard ARMv5TE entry */
719 .long 0x41009260 @ Old Feroceon
720 .long 0xff00fff0
721 b __armv4_mmu_cache_on
722 b __armv4_mmu_cache_off
723 b __armv5tej_mmu_cache_flush
724#endif
725
28853ac8
PZ
726 .word 0x66015261 @ FA526
727 .word 0xff01fff1
0e056f20
CM
728 W(b) __fa526_cache_on
729 W(b) __armv4_mmu_cache_off
730 W(b) __fa526_cache_flush
28853ac8 731
1da177e4
LT
732 @ These match on the architecture ID
733
734 .word 0x00020000 @ ARMv4T
735 .word 0x000f0000
0e056f20
CM
736 W(b) __armv4_mmu_cache_on
737 W(b) __armv4_mmu_cache_off
738 W(b) __armv4_mmu_cache_flush
1da177e4
LT
739
740 .word 0x00050000 @ ARMv5TE
741 .word 0x000f0000
0e056f20
CM
742 W(b) __armv4_mmu_cache_on
743 W(b) __armv4_mmu_cache_off
744 W(b) __armv4_mmu_cache_flush
1da177e4
LT
745
746 .word 0x00060000 @ ARMv5TEJ
747 .word 0x000f0000
0e056f20
CM
748 W(b) __armv4_mmu_cache_on
749 W(b) __armv4_mmu_cache_off
75216859 750 W(b) __armv5tej_mmu_cache_flush
1da177e4 751
45a7b9cf 752 .word 0x0007b000 @ ARMv6
7d09e854 753 .word 0x000ff000
0e056f20
CM
754 W(b) __armv4_mmu_cache_on
755 W(b) __armv4_mmu_cache_off
756 W(b) __armv6_mmu_cache_flush
1da177e4 757
edabd38e
SB
758 .word 0x560f5810 @ Marvell PJ4 ARMv6
759 .word 0xff0ffff0
760 W(b) __armv4_mmu_cache_on
761 W(b) __armv4_mmu_cache_off
762 W(b) __armv6_mmu_cache_flush
763
7d09e854
CM
764 .word 0x000f0000 @ new CPU Id
765 .word 0x000f0000
0e056f20
CM
766 W(b) __armv7_mmu_cache_on
767 W(b) __armv7_mmu_cache_off
768 W(b) __armv7_mmu_cache_flush
7d09e854 769
1da177e4
LT
770 .word 0 @ unrecognised type
771 .word 0
772 mov pc, lr
0e056f20 773 THUMB( nop )
1da177e4 774 mov pc, lr
0e056f20 775 THUMB( nop )
1da177e4 776 mov pc, lr
0e056f20 777 THUMB( nop )
1da177e4
LT
778
779 .size proc_types, . - proc_types
780
781/*
782 * Turn off the Cache and MMU. ARMv3 does not support
783 * reading the control register, but ARMv4 does.
784 *
21b2841d
UKK
785 * On exit,
786 * r0, r1, r2, r3, r9, r12 corrupted
787 * This routine must preserve:
6d7d0ae5 788 * r4, r7, r8
1da177e4
LT
789 */
790 .align 5
791cache_off: mov r3, #12 @ cache_off function
792 b call_cache_fn
793
10c2df65
HC
794__armv4_mpu_cache_off:
795 mrc p15, 0, r0, c1, c0
796 bic r0, r0, #0x000d
797 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
798 mov r0, #0
799 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
800 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
801 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
802 mov pc, lr
803
804__armv3_mpu_cache_off:
805 mrc p15, 0, r0, c1, c0
806 bic r0, r0, #0x000d
807 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
808 mov r0, #0
809 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
810 mov pc, lr
811
c76b6b41 812__armv4_mmu_cache_off:
8bdca0ac 813#ifdef CONFIG_MMU
1da177e4
LT
814 mrc p15, 0, r0, c1, c0
815 bic r0, r0, #0x000d
816 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
817 mov r0, #0
818 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
819 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
8bdca0ac 820#endif
1da177e4
LT
821 mov pc, lr
822
7d09e854
CM
823__armv7_mmu_cache_off:
824 mrc p15, 0, r0, c1, c0
8bdca0ac 825#ifdef CONFIG_MMU
7d09e854 826 bic r0, r0, #0x000d
8bdca0ac
CM
827#else
828 bic r0, r0, #0x000c
829#endif
7d09e854
CM
830 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
831 mov r12, lr
832 bl __armv7_mmu_cache_flush
833 mov r0, #0
8bdca0ac 834#ifdef CONFIG_MMU
7d09e854 835 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
8bdca0ac 836#endif
c30c2f99
CM
837 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
838 mcr p15, 0, r0, c7, c10, 4 @ DSB
839 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
840 mov pc, r12
841
c76b6b41 842__arm6_mmu_cache_off:
1da177e4 843 mov r0, #0x00000030 @ ARM6 control reg.
c76b6b41 844 b __armv3_mmu_cache_off
1da177e4 845
c76b6b41 846__arm7_mmu_cache_off:
1da177e4 847 mov r0, #0x00000070 @ ARM7 control reg.
c76b6b41 848 b __armv3_mmu_cache_off
1da177e4 849
c76b6b41 850__armv3_mmu_cache_off:
1da177e4
LT
851 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
852 mov r0, #0
853 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
854 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
855 mov pc, lr
856
857/*
858 * Clean and flush the cache to maintain consistency.
859 *
1da177e4 860 * On exit,
21b2841d 861 * r1, r2, r3, r9, r10, r11, r12 corrupted
1da177e4 862 * This routine must preserve:
6d7d0ae5 863 * r4, r6, r7, r8
1da177e4
LT
864 */
865 .align 5
866cache_clean_flush:
867 mov r3, #16
868 b call_cache_fn
869
10c2df65
HC
870__armv4_mpu_cache_flush:
871 mov r2, #1
872 mov r3, #0
873 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
874 mov r1, #7 << 5 @ 8 segments
8751: orr r3, r1, #63 << 26 @ 64 entries
8762: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
877 subs r3, r3, #1 << 26
878 bcs 2b @ entries 63 to 0
879 subs r1, r1, #1 << 5
880 bcs 1b @ segments 7 to 0
881
882 teq r2, #0
883 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
884 mcr p15, 0, ip, c7, c10, 4 @ drain WB
885 mov pc, lr
886
28853ac8
PZ
887__fa526_cache_flush:
888 mov r1, #0
889 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
890 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
891 mcr p15, 0, r1, c7, c10, 4 @ drain WB
892 mov pc, lr
10c2df65 893
c76b6b41 894__armv6_mmu_cache_flush:
1da177e4
LT
895 mov r1, #0
896 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
897 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
898 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
899 mcr p15, 0, r1, c7, c10, 4 @ drain WB
900 mov pc, lr
901
7d09e854
CM
902__armv7_mmu_cache_flush:
903 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
904 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 905 mov r10, #0
c30c2f99 906 beq hierarchical
7d09e854
CM
907 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
908 b iflush
909hierarchical:
c30c2f99 910 mcr p15, 0, r10, c7, c10, 5 @ DMB
0e056f20 911 stmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
912 mrc p15, 1, r0, c0, c0, 1 @ read clidr
913 ands r3, r0, #0x7000000 @ extract loc from clidr
914 mov r3, r3, lsr #23 @ left align loc bit field
915 beq finished @ if loc is 0, then no need to clean
916 mov r10, #0 @ start clean at cache level 0
917loop1:
918 add r2, r10, r10, lsr #1 @ work out 3x current cache level
919 mov r1, r0, lsr r2 @ extract cache type bits from clidr
920 and r1, r1, #7 @ mask of the bits for current cache only
921 cmp r1, #2 @ see what cache we have at this level
922 blt skip @ skip if no cache, or just i-cache
923 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
924 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
925 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
926 and r2, r1, #7 @ extract the length of the cache lines
927 add r2, r2, #4 @ add 4 (line length offset)
928 ldr r4, =0x3ff
929 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
000b5025 930 clz r5, r4 @ find bit position of way size increment
7d09e854
CM
931 ldr r7, =0x7fff
932 ands r7, r7, r1, lsr #13 @ extract max number of the index size
933loop2:
934 mov r9, r4 @ create working copy of max way size
935loop3:
0e056f20
CM
936 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
937 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
938 THUMB( lsl r6, r9, r5 )
939 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
940 THUMB( lsl r6, r7, r2 )
941 THUMB( orr r11, r11, r6 ) @ factor index number into r11
7d09e854
CM
942 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
943 subs r9, r9, #1 @ decrement the way
944 bge loop3
945 subs r7, r7, #1 @ decrement the index
946 bge loop2
947skip:
948 add r10, r10, #2 @ increment cache number
949 cmp r3, r10
950 bgt loop1
951finished:
0e056f20 952 ldmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
953 mov r10, #0 @ swith back to cache level 0
954 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
7d09e854 955iflush:
c30c2f99 956 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 957 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
958 mcr p15, 0, r10, c7, c10, 4 @ DSB
959 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
960 mov pc, lr
961
15754bf9
NP
962__armv5tej_mmu_cache_flush:
9631: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
964 bne 1b
965 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
966 mcr p15, 0, r0, c7, c10, 4 @ drain WB
967 mov pc, lr
968
c76b6b41 969__armv4_mmu_cache_flush:
1da177e4
LT
970 mov r2, #64*1024 @ default: 32K dcache size (*2)
971 mov r11, #32 @ default: 32 byte line size
972 mrc p15, 0, r3, c0, c0, 1 @ read cache type
98e12b5a 973 teq r3, r9 @ cache ID register present?
1da177e4
LT
974 beq no_cache_id
975 mov r1, r3, lsr #18
976 and r1, r1, #7
977 mov r2, #1024
978 mov r2, r2, lsl r1 @ base dcache size *2
979 tst r3, #1 << 14 @ test M bit
980 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
981 mov r3, r3, lsr #12
982 and r3, r3, #3
983 mov r11, #8
984 mov r11, r11, lsl r3 @ cache line size in bytes
985no_cache_id:
0e056f20
CM
986 mov r1, pc
987 bic r1, r1, #63 @ align to longest cache line
1da177e4 988 add r2, r1, r2
0e056f20
CM
9891:
990 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
991 THUMB( ldr r3, [r1] ) @ s/w flush D cache
992 THUMB( add r1, r1, r11 )
1da177e4
LT
993 teq r1, r2
994 bne 1b
995
996 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
997 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
998 mcr p15, 0, r1, c7, c10, 4 @ drain WB
999 mov pc, lr
1000
c76b6b41 1001__armv3_mmu_cache_flush:
10c2df65 1002__armv3_mpu_cache_flush:
1da177e4 1003 mov r1, #0
63fa7187 1004 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1da177e4
LT
1005 mov pc, lr
1006
1007/*
1008 * Various debugging routines for printing hex characters and
1009 * memory, which again must be relocatable.
1010 */
1011#ifdef DEBUG
88987ef9 1012 .align 2
1da177e4
LT
1013 .type phexbuf,#object
1014phexbuf: .space 12
1015 .size phexbuf, . - phexbuf
1016
be6f9f00 1017@ phex corrupts {r0, r1, r2, r3}
1da177e4
LT
1018phex: adr r3, phexbuf
1019 mov r2, #0
1020 strb r2, [r3, r1]
10211: subs r1, r1, #1
1022 movmi r0, r3
1023 bmi puts
1024 and r2, r0, #15
1025 mov r0, r0, lsr #4
1026 cmp r2, #10
1027 addge r2, r2, #7
1028 add r2, r2, #'0'
1029 strb r2, [r3, r1]
1030 b 1b
1031
be6f9f00 1032@ puts corrupts {r0, r1, r2, r3}
4e6d488a 1033puts: loadsp r3, r1
1da177e4
LT
10341: ldrb r2, [r0], #1
1035 teq r2, #0
1036 moveq pc, lr
5cd0c344 10372: writeb r2, r3
1da177e4
LT
1038 mov r1, #0x00020000
10393: subs r1, r1, #1
1040 bne 3b
1041 teq r2, #'\n'
1042 moveq r2, #'\r'
1043 beq 2b
1044 teq r0, #0
1045 bne 1b
1046 mov pc, lr
be6f9f00 1047@ putc corrupts {r0, r1, r2, r3}
1da177e4
LT
1048putc:
1049 mov r2, r0
1050 mov r0, #0
4e6d488a 1051 loadsp r3, r1
1da177e4
LT
1052 b 2b
1053
be6f9f00 1054@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1da177e4
LT
1055memdump: mov r12, r0
1056 mov r10, lr
1057 mov r11, #0
10582: mov r0, r11, lsl #2
1059 add r0, r0, r12
1060 mov r1, #8
1061 bl phex
1062 mov r0, #':'
1063 bl putc
10641: mov r0, #' '
1065 bl putc
1066 ldr r0, [r12, r11, lsl #2]
1067 mov r1, #8
1068 bl phex
1069 and r0, r11, #7
1070 teq r0, #3
1071 moveq r0, #' '
1072 bleq putc
1073 and r0, r11, #7
1074 add r11, r11, #1
1075 teq r0, #7
1076 bne 1b
1077 mov r0, #'\n'
1078 bl putc
1079 cmp r11, #64
1080 blt 2b
1081 mov pc, r10
1082#endif
1083
92c83ff1 1084 .ltorg
adcc2591 1085reloc_code_end:
1da177e4
LT
1086
1087 .align
b0c4d4ee 1088 .section ".stack", "aw", %nobits
1da177e4 1089user_stack: .space 4096
88237c25 1090user_stack_end: