import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
10c2df65 5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4 11#include <linux/linkage.h>
424e5994 12#include <asm/assembler.h>
1da177e4 13
da94a829 14 .arch armv7-a
1da177e4
LT
15/*
16 * Debugging stuff
17 *
18 * Note that these macros must not contain any code which is not
19 * 100% relocatable. Any attempt to do so will result in a crash.
20 * Please select one of the following when turning on debugging.
21 */
22#ifdef DEBUG
5cd0c344 23
5cd0c344 24#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9 25
dfad549d 26#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
4e6d488a 27 .macro loadsp, rb, tmp
7d95ded9
TL
28 .endm
29 .macro writeb, ch, rb
30 mcr p14, 0, \ch, c0, c5, 0
31 .endm
c633c3cf 32#elif defined(CONFIG_CPU_XSCALE)
4e6d488a 33 .macro loadsp, rb, tmp
c633c3cf
JCPV
34 .endm
35 .macro writeb, ch, rb
36 mcr p14, 0, \ch, c8, c0, 0
37 .endm
7d95ded9 38#else
4e6d488a 39 .macro loadsp, rb, tmp
1da177e4 40 .endm
224b5be6 41 .macro writeb, ch, rb
41a9e680 42 mcr p14, 0, \ch, c1, c0, 0
1da177e4 43 .endm
7d95ded9
TL
44#endif
45
5cd0c344 46#else
224b5be6 47
4beba08b 48#include CONFIG_DEBUG_LL_INCLUDE
224b5be6 49
5cd0c344
RK
50 .macro writeb, ch, rb
51 senduart \ch, \rb
1da177e4 52 .endm
5cd0c344 53
224b5be6 54#if defined(CONFIG_ARCH_SA1100)
4e6d488a 55 .macro loadsp, rb, tmp
1da177e4 56 mov \rb, #0x80000000 @ physical base address
224b5be6 57#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 58 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 59#else
1da177e4 60 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 61#endif
1da177e4 62 .endm
b130d5c2 63#elif defined(CONFIG_ARCH_S3C24XX)
4e6d488a 64 .macro loadsp, rb, tmp
1da177e4 65 mov \rb, #0x50000000
c7657846 66 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
1da177e4 67 .endm
1da177e4 68#else
4e6d488a
TL
69 .macro loadsp, rb, tmp
70 addruart \rb, \tmp
224b5be6 71 .endm
1da177e4 72#endif
5cd0c344 73#endif
1da177e4
LT
74#endif
75
76 .macro kputc,val
77 mov r0, \val
78 bl putc
79 .endm
80
81 .macro kphex,val,len
82 mov r0, \val
83 mov r1, #\len
84 bl phex
85 .endm
86
87 .macro debug_reloc_start
88#ifdef DEBUG
89 kputc #'\n'
90 kphex r6, 8 /* processor id */
91 kputc #':'
92 kphex r7, 8 /* architecture id */
f12d0d7c 93#ifdef CONFIG_CPU_CP15
1da177e4
LT
94 kputc #':'
95 mrc p15, 0, r0, c1, c0
96 kphex r0, 8 /* control reg */
f12d0d7c 97#endif
1da177e4
LT
98 kputc #'\n'
99 kphex r5, 8 /* decompressed kernel start */
100 kputc #'-'
f4619025 101 kphex r9, 8 /* decompressed kernel end */
1da177e4
LT
102 kputc #'>'
103 kphex r4, 8 /* kernel execution address */
104 kputc #'\n'
105#endif
106 .endm
107
108 .macro debug_reloc_end
109#ifdef DEBUG
110 kphex r5, 8 /* end of kernel */
111 kputc #'\n'
112 mov r0, r4
113 bl memdump /* dump 256 bytes at start of kernel */
114#endif
115 .endm
116
117 .section ".start", #alloc, #execinstr
118/*
119 * sort out different calling conventions
120 */
121 .align
26e5ca93 122 .arm @ Always enter in ARM state
1da177e4
LT
123start:
124 .type start,#function
b11fe388 125 .rept 7
1da177e4
LT
126 mov r0, r0
127 .endr
b11fe388
NP
128 ARM( mov r0, r0 )
129 ARM( b 1f )
130 THUMB( adr r12, BSYM(1f) )
131 THUMB( bx r12 )
1da177e4 132
1da177e4
LT
133 .word 0x016f2818 @ Magic numbers to help the loader
134 .word start @ absolute load/run zImage address
135 .word _edata @ zImage end address
26e5ca93 136 THUMB( .thumb )
424e5994
DM
1371:
138 mrs r9, cpsr
139#ifdef CONFIG_ARM_VIRT_EXT
140 bl __hyp_stub_install @ get into SVC mode, reversibly
141#endif
142 mov r7, r1 @ save architecture ID
f4619025 143 mov r8, r2 @ save atags pointer
1da177e4
LT
144
145#ifndef __ARM_ARCH_2__
146 /*
147 * Booting from Angel - need to enter SVC mode and disable
148 * FIQs/IRQs (numeric definitions from angel arm.h source).
149 * We only do this if we were in user mode on entry.
150 */
151 mrs r2, cpsr @ get current mode
152 tst r2, #3 @ not user?
153 bne not_angel
154 mov r0, #0x17 @ angel_SWIreason_EnterSVC
0e056f20
CM
155 ARM( swi 0x123456 ) @ angel_SWI_ARM
156 THUMB( svc 0xab ) @ angel_SWI_THUMB
1da177e4 157not_angel:
424e5994
DM
158 safe_svcmode_maskall r0
159 msr spsr_cxsf, r9 @ Save the CPU boot mode in
160 @ SPSR
1da177e4
LT
161#else
162 teqp pc, #0x0c000003 @ turn off interrupts
163#endif
164
165 /*
166 * Note that some cache flushing and other stuff may
167 * be needed here - is there an Angel SWI call for this?
168 */
169
170 /*
171 * some architecture specific code can be inserted
f4619025 172 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
173 */
174
175 .text
6d7d0ae5 176
e69edc79
EM
177#ifdef CONFIG_AUTO_ZRELADDR
178 @ determine final kernel image address
bfa64c4a
DM
179 mov r4, pc
180 and r4, r4, #0xf8000000
e69edc79
EM
181 add r4, r4, #TEXT_OFFSET
182#else
9e84ed63 183 ldr r4, =zreladdr
e69edc79 184#endif
1da177e4 185
6d7d0ae5
NP
186 bl cache_on
187
188restart: adr r0, LC0
34cc1a8f 189 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
adcc2591 190 ldr sp, [r0, #28]
6d7d0ae5
NP
191
192 /*
193 * We might be running at a different address. We need
194 * to fix up various pointers.
195 */
196 sub r0, r0, r1 @ calculate the delta offset
6d7d0ae5 197 add r6, r6, r0 @ _edata
34cc1a8f
NP
198 add r10, r10, r0 @ inflated kernel size location
199
200 /*
201 * The kernel build system appends the size of the
202 * decompressed kernel at the end of the compressed data
203 * in little-endian form.
204 */
205 ldrb r9, [r10, #0]
206 ldrb lr, [r10, #1]
207 orr r9, r9, lr, lsl #8
208 ldrb lr, [r10, #2]
209 ldrb r10, [r10, #3]
210 orr r9, r9, lr, lsl #16
211 orr r9, r9, r10, lsl #24
1da177e4 212
6d7d0ae5
NP
213#ifndef CONFIG_ZBOOT_ROM
214 /* malloc space is above the relocated stack (64k max) */
215 add sp, sp, r0
216 add r10, sp, #0x10000
217#else
1da177e4 218 /*
6d7d0ae5
NP
219 * With ZBOOT_ROM the bss/stack is non relocatable,
220 * but someone could still run this code from RAM,
221 * in which case our reference is _edata.
1da177e4 222 */
6d7d0ae5
NP
223 mov r10, r6
224#endif
225
e2a6a3aa
JB
226 mov r5, #0 @ init dtb size to 0
227#ifdef CONFIG_ARM_APPENDED_DTB
228/*
229 * r0 = delta
230 * r2 = BSS start
231 * r3 = BSS end
232 * r4 = final kernel address
233 * r5 = appended dtb size (still unknown)
234 * r6 = _edata
235 * r7 = architecture ID
236 * r8 = atags/device tree pointer
237 * r9 = size of decompressed image
238 * r10 = end of this image, including bss/stack/malloc space if non XIP
239 * r11 = GOT start
240 * r12 = GOT end
241 * sp = stack pointer
242 *
243 * if there are device trees (dtb) appended to zImage, advance r10 so that the
244 * dtb data will get relocated along with the kernel if necessary.
245 */
246
247 ldr lr, [r6, #0]
248#ifndef __ARMEB__
249 ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
250#else
251 ldr r1, =0xd00dfeed
252#endif
253 cmp lr, r1
254 bne dtb_check_done @ not found
255
b90b9a38
NP
256#ifdef CONFIG_ARM_ATAG_DTB_COMPAT
257 /*
258 * OK... Let's do some funky business here.
259 * If we do have a DTB appended to zImage, and we do have
260 * an ATAG list around, we want the later to be translated
261 * and folded into the former here. To be on the safe side,
262 * let's temporarily move the stack away into the malloc
263 * area. No GOT fixup has occurred yet, but none of the
264 * code we're about to call uses any global variable.
265 */
266 add sp, sp, #0x10000
267 stmfd sp!, {r0-r3, ip, lr}
268 mov r0, r8
269 mov r1, r6
270 sub r2, sp, r6
271 bl atags_to_fdt
272
273 /*
274 * If returned value is 1, there is no ATAG at the location
275 * pointed by r8. Try the typical 0x100 offset from start
276 * of RAM and hope for the best.
277 */
278 cmp r0, #1
531a6a94
NP
279 sub r0, r4, #TEXT_OFFSET
280 add r0, r0, #0x100
b90b9a38
NP
281 mov r1, r6
282 sub r2, sp, r6
9c5fd9e8 283 bleq atags_to_fdt
b90b9a38
NP
284
285 ldmfd sp!, {r0-r3, ip, lr}
286 sub sp, sp, #0x10000
287#endif
288
e2a6a3aa
JB
289 mov r8, r6 @ use the appended device tree
290
5ffb04f6
NP
291 /*
292 * Make sure that the DTB doesn't end up in the final
293 * kernel's .bss area. To do so, we adjust the decompressed
294 * kernel size to compensate if that .bss size is larger
295 * than the relocated code.
296 */
297 ldr r5, =_kernel_bss_size
298 adr r1, wont_overwrite
299 sub r1, r6, r1
300 subs r1, r5, r1
301 addhi r9, r9, r1
302
e2a6a3aa
JB
303 /* Get the dtb's size */
304 ldr r5, [r6, #4]
305#ifndef __ARMEB__
306 /* convert r5 (dtb size) to little endian */
307 eor r1, r5, r5, ror #16
308 bic r1, r1, #0x00ff0000
309 mov r5, r5, ror #8
310 eor r5, r5, r1, lsr #8
311#endif
312
313 /* preserve 64-bit alignment */
314 add r5, r5, #7
315 bic r5, r5, #7
316
317 /* relocate some pointers past the appended dtb */
318 add r6, r6, r5
319 add r10, r10, r5
320 add sp, sp, r5
321dtb_check_done:
322#endif
323
6d7d0ae5
NP
324/*
325 * Check to see if we will overwrite ourselves.
326 * r4 = final kernel address
6d7d0ae5
NP
327 * r9 = size of decompressed image
328 * r10 = end of this image, including bss/stack/malloc space if non XIP
329 * We basically want:
ea9df3b1 330 * r4 - 16k page directory >= r10 -> OK
5ffb04f6 331 * r4 + image length <= address of wont_overwrite -> OK
6d7d0ae5 332 */
ea9df3b1 333 add r10, r10, #16384
6d7d0ae5
NP
334 cmp r4, r10
335 bhs wont_overwrite
336 add r10, r4, r9
5ffb04f6
NP
337 adr r9, wont_overwrite
338 cmp r10, r9
6d7d0ae5
NP
339 bls wont_overwrite
340
341/*
342 * Relocate ourselves past the end of the decompressed kernel.
6d7d0ae5
NP
343 * r6 = _edata
344 * r10 = end of the decompressed kernel
345 * Because we always copy ahead, we need to do it from the end and go
346 * backward in case the source and destination overlap.
347 */
adcc2591
NP
348 /*
349 * Bump to the next 256-byte boundary with the size of
350 * the relocation code added. This avoids overwriting
351 * ourself when the offset is small.
352 */
353 add r10, r10, #((reloc_code_end - restart + 256) & ~255)
6d7d0ae5
NP
354 bic r10, r10, #255
355
adcc2591
NP
356 /* Get start of code we want to copy and align it down. */
357 adr r5, restart
358 bic r5, r5, #31
359
424e5994
DM
360/* Relocate the hyp vector base if necessary */
361#ifdef CONFIG_ARM_VIRT_EXT
362 mrs r0, spsr
363 and r0, r0, #MODE_MASK
364 cmp r0, #HYP_MODE
365 bne 1f
366
367 bl __hyp_get_vectors
368 sub r0, r0, r5
369 add r0, r0, r10
370 bl __hyp_set_vectors
3711:
372#endif
373
6d7d0ae5
NP
374 sub r9, r6, r5 @ size to copy
375 add r9, r9, #31 @ rounded up to a multiple
376 bic r9, r9, #31 @ ... of 32 bytes
377 add r6, r9, r5
378 add r9, r9, r10
379
3801: ldmdb r6!, {r0 - r3, r10 - r12, lr}
381 cmp r6, r5
382 stmdb r9!, {r0 - r3, r10 - r12, lr}
383 bhi 1b
384
385 /* Preserve offset to relocated code. */
386 sub r6, r9, r6
387
7c2527f0
TL
388#ifndef CONFIG_ZBOOT_ROM
389 /* cache_clean_flush may use the stack, so relocate it */
390 add sp, sp, r6
391#endif
392
6d7d0ae5
NP
393 bl cache_clean_flush
394
395 adr r0, BSYM(restart)
396 add r0, r0, r6
397 mov pc, r0
398
399wont_overwrite:
400/*
401 * If delta is zero, we are running at the address we were linked at.
402 * r0 = delta
403 * r2 = BSS start
404 * r3 = BSS end
405 * r4 = kernel execution address
e2a6a3aa 406 * r5 = appended dtb size (0 if not present)
6d7d0ae5
NP
407 * r7 = architecture ID
408 * r8 = atags pointer
409 * r11 = GOT start
410 * r12 = GOT end
411 * sp = stack pointer
412 */
e2a6a3aa 413 orrs r1, r0, r5
6d7d0ae5 414 beq not_relocated
e2a6a3aa 415
98e12b5a 416 add r11, r11, r0
6d7d0ae5 417 add r12, r12, r0
1da177e4
LT
418
419#ifndef CONFIG_ZBOOT_ROM
420 /*
421 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
422 * we need to fix up pointers into the BSS region.
6d7d0ae5 423 * Note that the stack pointer has already been fixed up.
1da177e4
LT
424 */
425 add r2, r2, r0
426 add r3, r3, r0
1da177e4
LT
427
428 /*
429 * Relocate all entries in the GOT table.
e2a6a3aa 430 * Bump bss entries to _edata + dtb size
1da177e4 431 */
98e12b5a 4321: ldr r1, [r11, #0] @ relocate entries in the GOT
e2a6a3aa
JB
433 add r1, r1, r0 @ This fixes up C references
434 cmp r1, r2 @ if entry >= bss_start &&
435 cmphs r3, r1 @ bss_end > entry
436 addhi r1, r1, r5 @ entry += dtb size
437 str r1, [r11], #4 @ next entry
6d7d0ae5 438 cmp r11, r12
1da177e4 439 blo 1b
e2a6a3aa
JB
440
441 /* bump our bss pointers too */
442 add r2, r2, r5
443 add r3, r3, r5
444
1da177e4
LT
445#else
446
447 /*
448 * Relocate entries in the GOT table. We only relocate
449 * the entries that are outside the (relocated) BSS region.
450 */
98e12b5a 4511: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4
LT
452 cmp r1, r2 @ entry < bss_start ||
453 cmphs r3, r1 @ _end < entry
454 addlo r1, r1, r0 @ table. This fixes up the
98e12b5a 455 str r1, [r11], #4 @ C references.
6d7d0ae5 456 cmp r11, r12
1da177e4
LT
457 blo 1b
458#endif
459
460not_relocated: mov r0, #0
4611: str r0, [r2], #4 @ clear bss
462 str r0, [r2], #4
463 str r0, [r2], #4
464 str r0, [r2], #4
465 cmp r2, r3
466 blo 1b
467
1da177e4 468/*
6d7d0ae5
NP
469 * The C runtime environment should now be setup sufficiently.
470 * Set up some pointers, and start decompressing.
471 * r4 = kernel execution address
472 * r7 = architecture ID
473 * r8 = atags pointer
1da177e4 474 */
6d7d0ae5
NP
475 mov r0, r4
476 mov r1, sp @ malloc space above stack
477 add r2, sp, #0x10000 @ 64k max
1da177e4
LT
478 mov r3, r7
479 bl decompress_kernel
1da177e4 480 bl cache_clean_flush
6d7d0ae5 481 bl cache_off
6d7d0ae5
NP
482 mov r1, r7 @ restore architecture number
483 mov r2, r8 @ restore atags pointer
424e5994
DM
484
485#ifdef CONFIG_ARM_VIRT_EXT
486 mrs r0, spsr @ Get saved CPU boot mode
487 and r0, r0, #MODE_MASK
488 cmp r0, #HYP_MODE @ if not booted in HYP mode...
489 bne __enter_kernel @ boot kernel directly
490
491 adr r12, .L__hyp_reentry_vectors_offset
492 ldr r0, [r12]
493 add r0, r0, r12
494
495 bl __hyp_set_vectors
496 __HVC(0) @ otherwise bounce to hyp mode
497
498 b . @ should never be reached
499
500 .align 2
501.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
502#else
503 b __enter_kernel
504#endif
1da177e4 505
88987ef9 506 .align 2
1da177e4
LT
507 .type LC0, #object
508LC0: .word LC0 @ r1
509 .word __bss_start @ r2
510 .word _end @ r3
6d7d0ae5 511 .word _edata @ r6
34cc1a8f 512 .word input_data_end - 4 @ r10 (inflated size location)
98e12b5a 513 .word _got_start @ r11
1da177e4 514 .word _got_end @ ip
8d7e4cc2 515 .word .L_user_stack_end @ sp
1da177e4
LT
516 .size LC0, . - LC0
517
518#ifdef CONFIG_ARCH_RPC
519 .globl params
db7b2b4b 520params: ldr r0, =0x10000100 @ params_phys for RPC
1da177e4
LT
521 mov pc, lr
522 .ltorg
523 .align
524#endif
525
526/*
527 * Turn on the cache. We need to setup some page tables so that we
528 * can have both the I and D caches on.
529 *
530 * We place the page tables 16k down from the kernel execution address,
531 * and we hope that nothing else is using it. If we're using it, we
532 * will go pop!
533 *
534 * On entry,
535 * r4 = kernel execution address
1da177e4 536 * r7 = architecture number
f4619025 537 * r8 = atags pointer
1da177e4 538 * On exit,
21b2841d 539 * r0, r1, r2, r3, r9, r10, r12 corrupted
1da177e4 540 * This routine must preserve:
6d7d0ae5 541 * r4, r7, r8
1da177e4
LT
542 */
543 .align 5
544cache_on: mov r3, #8 @ cache_on function
545 b call_cache_fn
546
10c2df65
HC
547/*
548 * Initialize the highest priority protection region, PR7
549 * to cover all 32bit address and cacheable and bufferable.
550 */
551__armv4_mpu_cache_on:
552 mov r0, #0x3f @ 4G, the whole
553 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
554 mcr p15, 0, r0, c6, c7, 1
555
556 mov r0, #0x80 @ PR7
557 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
558 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
559 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
560
561 mov r0, #0xc000
562 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
563 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
564
565 mov r0, #0
566 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
567 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
568 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
569 mrc p15, 0, r0, c1, c0, 0 @ read control reg
570 @ ...I .... ..D. WC.M
571 orr r0, r0, #0x002d @ .... .... ..1. 11.1
572 orr r0, r0, #0x1000 @ ...1 .... .... ....
573
574 mcr p15, 0, r0, c1, c0, 0 @ write control reg
575
576 mov r0, #0
577 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
578 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
579 mov pc, lr
580
581__armv3_mpu_cache_on:
582 mov r0, #0x3f @ 4G, the whole
583 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
584
585 mov r0, #0x80 @ PR7
586 mcr p15, 0, r0, c2, c0, 0 @ cache on
587 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
588
589 mov r0, #0xc000
590 mcr p15, 0, r0, c5, c0, 0 @ access permission
591
592 mov r0, #0
593 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
4a8d57a5
UKK
594 /*
595 * ?? ARMv3 MMU does not allow reading the control register,
596 * does this really work on ARMv3 MPU?
597 */
10c2df65
HC
598 mrc p15, 0, r0, c1, c0, 0 @ read control reg
599 @ .... .... .... WC.M
600 orr r0, r0, #0x000d @ .... .... .... 11.1
4a8d57a5 601 /* ?? this overwrites the value constructed above? */
10c2df65
HC
602 mov r0, #0
603 mcr p15, 0, r0, c1, c0, 0 @ write control reg
604
4a8d57a5 605 /* ?? invalidate for the second time? */
10c2df65
HC
606 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
607 mov pc, lr
608
1fdc08ab
RK
609#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
610#define CB_BITS 0x08
611#else
612#define CB_BITS 0x0c
613#endif
614
1da177e4
LT
615__setup_mmu: sub r3, r4, #16384 @ Page directory size
616 bic r3, r3, #0xff @ Align the pointer
617 bic r3, r3, #0x3f00
618/*
619 * Initialise the page tables, turning on the cacheable and bufferable
620 * bits for the RAM area only.
621 */
622 mov r0, r3
f4619025
RK
623 mov r9, r0, lsr #18
624 mov r9, r9, lsl #18 @ start of RAM
625 add r10, r9, #0x10000000 @ a reasonable RAM size
1fdc08ab
RK
626 mov r1, #0x12 @ XN|U + section mapping
627 orr r1, r1, #3 << 10 @ AP=11
1da177e4 628 add r2, r3, #16384
265d5e48 6291: cmp r1, r9 @ if virt > start of RAM
1fdc08ab
RK
630 cmphs r10, r1 @ && end of RAM > virt
631 bic r1, r1, #0x1c @ clear XN|U + C + B
632 orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
633 orrhs r1, r1, r6 @ set RAM section settings
1da177e4
LT
634 str r1, [r0], #4 @ 1:1 mapping
635 add r1, r1, #1048576
636 teq r0, r2
637 bne 1b
638/*
639 * If ever we are running from Flash, then we surely want the cache
640 * to be enabled also for our execution instance... We map 2MB of it
641 * so there is no map overlap problem for up to 1 MB compressed kernel.
642 * If the execution is in RAM then we would only be duplicating the above.
643 */
1fdc08ab 644 orr r1, r6, #0x04 @ ensure B is set for this
1da177e4 645 orr r1, r1, #3 << 10
bfa64c4a
DM
646 mov r2, pc
647 mov r2, r2, lsr #20
1da177e4
LT
648 orr r1, r1, r2, lsl #20
649 add r0, r3, r2, lsl #2
650 str r1, [r0], #4
651 add r1, r1, #1048576
652 str r1, [r0]
653 mov pc, lr
93ed3970 654ENDPROC(__setup_mmu)
1da177e4 655
5010192d
DM
656@ Enable unaligned access on v6, to allow better code generation
657@ for the decompressor C code:
658__armv6_mmu_cache_on:
659 mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
660 bic r0, r0, #2 @ A (no unaligned access fault)
661 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
662 mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
663 b __armv4_mmu_cache_on
664
af3e4fd3
MG
665__arm926ejs_mmu_cache_on:
666#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
667 mov r0, #4 @ put dcache in WT mode
668 mcr p15, 7, r0, c15, c0, 0
669#endif
670
c76b6b41 671__armv4_mmu_cache_on:
1da177e4 672 mov r12, lr
8bdca0ac 673#ifdef CONFIG_MMU
1fdc08ab 674 mov r6, #CB_BITS | 0x12 @ U
1da177e4
LT
675 bl __setup_mmu
676 mov r0, #0
677 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
678 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
679 mrc p15, 0, r0, c1, c0, 0 @ read control reg
680 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
681 orr r0, r0, #0x0030
26584853
CM
682#ifdef CONFIG_CPU_ENDIAN_BE8
683 orr r0, r0, #1 << 25 @ big-endian page tables
684#endif
c76b6b41 685 bl __common_mmu_cache_on
1da177e4
LT
686 mov r0, #0
687 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 688#endif
1da177e4
LT
689 mov pc, r12
690
7d09e854
CM
691__armv7_mmu_cache_on:
692 mov r12, lr
8bdca0ac 693#ifdef CONFIG_MMU
7d09e854
CM
694 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
695 tst r11, #0xf @ VMSA
1fdc08ab 696 movne r6, #CB_BITS | 0x02 @ !XN
7d09e854
CM
697 blne __setup_mmu
698 mov r0, #0
699 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
700 tst r11, #0xf @ VMSA
701 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 702#endif
7d09e854 703 mrc p15, 0, r0, c1, c0, 0 @ read control reg
e1e5b7e4 704 bic r0, r0, #1 << 28 @ clear SCTLR.TRE
7d09e854
CM
705 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
706 orr r0, r0, #0x003c @ write buffer
5010192d
DM
707 bic r0, r0, #2 @ A (no unaligned access fault)
708 orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
709 @ (needed for ARM1176)
8bdca0ac 710#ifdef CONFIG_MMU
26584853
CM
711#ifdef CONFIG_CPU_ENDIAN_BE8
712 orr r0, r0, #1 << 25 @ big-endian page tables
713#endif
dbece458 714 mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
7d09e854 715 orrne r0, r0, #1 @ MMU enabled
1fdc08ab 716 movne r1, #0xfffffffd @ domain 0 = client
dbece458
WD
717 bic r6, r6, #1 << 31 @ 32-bit translation system
718 bic r6, r6, #3 << 0 @ use only ttbr0
7d09e854 719 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
6fa3eb70
S
720 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
721 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854 722 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
dbece458 723 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
8bdca0ac 724#endif
d675d0bc 725 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
726 mcr p15, 0, r0, c1, c0, 0 @ load control register
727 mrc p15, 0, r0, c1, c0, 0 @ and read it back
728 mov r0, #0
729 mcr p15, 0, r0, c7, c5, 4 @ ISB
730 mov pc, r12
731
28853ac8
PZ
732__fa526_cache_on:
733 mov r12, lr
1fdc08ab 734 mov r6, #CB_BITS | 0x12 @ U
28853ac8
PZ
735 bl __setup_mmu
736 mov r0, #0
737 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
738 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
739 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
740 mrc p15, 0, r0, c1, c0, 0 @ read control reg
741 orr r0, r0, #0x1000 @ I-cache enable
742 bl __common_mmu_cache_on
743 mov r0, #0
744 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
745 mov pc, r12
746
c76b6b41 747__common_mmu_cache_on:
0e056f20 748#ifndef CONFIG_THUMB2_KERNEL
1da177e4
LT
749#ifndef DEBUG
750 orr r0, r0, #0x000d @ Write buffer, mmu
751#endif
752 mov r1, #-1
753 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
754 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
755 b 1f
756 .align 5 @ cache line aligned
7571: mcr p15, 0, r0, c1, c0, 0 @ load control register
758 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
759 sub pc, lr, r0, lsr #32 @ properly flush pipeline
0e056f20 760#endif
1da177e4 761
946a105e
DM
762#define PROC_ENTRY_SIZE (4*5)
763
1da177e4
LT
764/*
765 * Here follow the relocatable cache support functions for the
766 * various processors. This is a generic hook for locating an
767 * entry and jumping to an instruction at the specified offset
768 * from the start of the block. Please note this is all position
769 * independent code.
770 *
771 * r1 = corrupted
772 * r2 = corrupted
773 * r3 = block offset
98e12b5a 774 * r9 = corrupted
1da177e4
LT
775 * r12 = corrupted
776 */
777
778call_cache_fn: adr r12, proc_types
f12d0d7c 779#ifdef CONFIG_CPU_CP15
98e12b5a 780 mrc p15, 0, r9, c0, c0 @ get processor ID
f12d0d7c 781#else
98e12b5a 782 ldr r9, =CONFIG_PROCESSOR_ID
f12d0d7c 783#endif
1da177e4
LT
7841: ldr r1, [r12, #0] @ get value
785 ldr r2, [r12, #4] @ get mask
98e12b5a 786 eor r1, r1, r9 @ (real ^ match)
1da177e4 787 tst r1, r2 @ & mask
0e056f20
CM
788 ARM( addeq pc, r12, r3 ) @ call cache function
789 THUMB( addeq r12, r3 )
790 THUMB( moveq pc, r12 ) @ call cache function
946a105e 791 add r12, r12, #PROC_ENTRY_SIZE
1da177e4
LT
792 b 1b
793
794/*
795 * Table for cache operations. This is basically:
796 * - CPU ID match
797 * - CPU ID mask
798 * - 'cache on' method instruction
799 * - 'cache off' method instruction
800 * - 'cache flush' method instruction
801 *
802 * We match an entry using: ((real_id ^ match) & mask) == 0
803 *
804 * Writethrough caches generally only need 'on' and 'off'
805 * methods. Writeback caches _must_ have the flush method
806 * defined.
807 */
88987ef9 808 .align 2
1da177e4
LT
809 .type proc_types,#object
810proc_types:
ced2a3b8
M
811 .word 0x41000000 @ old ARM ID
812 .word 0xff00f000
1da177e4 813 mov pc, lr
0e056f20 814 THUMB( nop )
1da177e4 815 mov pc, lr
0e056f20 816 THUMB( nop )
1da177e4 817 mov pc, lr
0e056f20 818 THUMB( nop )
1da177e4
LT
819
820 .word 0x41007000 @ ARM7/710
821 .word 0xfff8fe00
4cdfc2ec
RK
822 mov pc, lr
823 THUMB( nop )
824 mov pc, lr
825 THUMB( nop )
1da177e4 826 mov pc, lr
0e056f20 827 THUMB( nop )
1da177e4
LT
828
829 .word 0x41807200 @ ARM720T (writethrough)
830 .word 0xffffff00
0e056f20
CM
831 W(b) __armv4_mmu_cache_on
832 W(b) __armv4_mmu_cache_off
1da177e4 833 mov pc, lr
0e056f20 834 THUMB( nop )
1da177e4 835
10c2df65
HC
836 .word 0x41007400 @ ARM74x
837 .word 0xff00ff00
0e056f20
CM
838 W(b) __armv3_mpu_cache_on
839 W(b) __armv3_mpu_cache_off
840 W(b) __armv3_mpu_cache_flush
10c2df65
HC
841
842 .word 0x41009400 @ ARM94x
843 .word 0xff00ff00
0e056f20
CM
844 W(b) __armv4_mpu_cache_on
845 W(b) __armv4_mpu_cache_off
846 W(b) __armv4_mpu_cache_flush
10c2df65 847
af3e4fd3
MG
848 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
849 .word 0xff0ffff0
720c60e1
NP
850 W(b) __arm926ejs_mmu_cache_on
851 W(b) __armv4_mmu_cache_off
852 W(b) __armv5tej_mmu_cache_flush
10c2df65 853
1da177e4
LT
854 .word 0x00007000 @ ARM7 IDs
855 .word 0x0000f000
856 mov pc, lr
0e056f20 857 THUMB( nop )
1da177e4 858 mov pc, lr
0e056f20 859 THUMB( nop )
1da177e4 860 mov pc, lr
0e056f20 861 THUMB( nop )
1da177e4
LT
862
863 @ Everything from here on will be the new ID system.
864
865 .word 0x4401a100 @ sa110 / sa1100
866 .word 0xffffffe0
0e056f20
CM
867 W(b) __armv4_mmu_cache_on
868 W(b) __armv4_mmu_cache_off
869 W(b) __armv4_mmu_cache_flush
1da177e4
LT
870
871 .word 0x6901b110 @ sa1110
872 .word 0xfffffff0
0e056f20
CM
873 W(b) __armv4_mmu_cache_on
874 W(b) __armv4_mmu_cache_off
875 W(b) __armv4_mmu_cache_flush
1da177e4 876
4157d317
HZ
877 .word 0x56056900
878 .word 0xffffff00 @ PXA9xx
0e056f20
CM
879 W(b) __armv4_mmu_cache_on
880 W(b) __armv4_mmu_cache_off
881 W(b) __armv4_mmu_cache_flush
49cbe786
EM
882
883 .word 0x56158000 @ PXA168
884 .word 0xfffff000
0e056f20
CM
885 W(b) __armv4_mmu_cache_on
886 W(b) __armv4_mmu_cache_off
887 W(b) __armv5tej_mmu_cache_flush
49cbe786 888
2e2023fe
NP
889 .word 0x56050000 @ Feroceon
890 .word 0xff0f0000
0e056f20
CM
891 W(b) __armv4_mmu_cache_on
892 W(b) __armv4_mmu_cache_off
893 W(b) __armv5tej_mmu_cache_flush
3ebb5a2b 894
5587931c
JS
895#ifdef CONFIG_CPU_FEROCEON_OLD_ID
896 /* this conflicts with the standard ARMv5TE entry */
897 .long 0x41009260 @ Old Feroceon
898 .long 0xff00fff0
899 b __armv4_mmu_cache_on
900 b __armv4_mmu_cache_off
901 b __armv5tej_mmu_cache_flush
902#endif
903
28853ac8
PZ
904 .word 0x66015261 @ FA526
905 .word 0xff01fff1
0e056f20
CM
906 W(b) __fa526_cache_on
907 W(b) __armv4_mmu_cache_off
908 W(b) __fa526_cache_flush
28853ac8 909
1da177e4
LT
910 @ These match on the architecture ID
911
912 .word 0x00020000 @ ARMv4T
913 .word 0x000f0000
0e056f20
CM
914 W(b) __armv4_mmu_cache_on
915 W(b) __armv4_mmu_cache_off
916 W(b) __armv4_mmu_cache_flush
1da177e4
LT
917
918 .word 0x00050000 @ ARMv5TE
919 .word 0x000f0000
0e056f20
CM
920 W(b) __armv4_mmu_cache_on
921 W(b) __armv4_mmu_cache_off
922 W(b) __armv4_mmu_cache_flush
1da177e4
LT
923
924 .word 0x00060000 @ ARMv5TEJ
925 .word 0x000f0000
0e056f20
CM
926 W(b) __armv4_mmu_cache_on
927 W(b) __armv4_mmu_cache_off
75216859 928 W(b) __armv5tej_mmu_cache_flush
1da177e4 929
45a7b9cf 930 .word 0x0007b000 @ ARMv6
7d09e854 931 .word 0x000ff000
5010192d 932 W(b) __armv6_mmu_cache_on
0e056f20
CM
933 W(b) __armv4_mmu_cache_off
934 W(b) __armv6_mmu_cache_flush
1da177e4 935
7d09e854
CM
936 .word 0x000f0000 @ new CPU Id
937 .word 0x000f0000
0e056f20
CM
938 W(b) __armv7_mmu_cache_on
939 W(b) __armv7_mmu_cache_off
940 W(b) __armv7_mmu_cache_flush
7d09e854 941
1da177e4
LT
942 .word 0 @ unrecognised type
943 .word 0
944 mov pc, lr
0e056f20 945 THUMB( nop )
1da177e4 946 mov pc, lr
0e056f20 947 THUMB( nop )
1da177e4 948 mov pc, lr
0e056f20 949 THUMB( nop )
1da177e4
LT
950
951 .size proc_types, . - proc_types
952
946a105e
DM
953 /*
954 * If you get a "non-constant expression in ".if" statement"
955 * error from the assembler on this line, check that you have
956 * not accidentally written a "b" instruction where you should
957 * have written W(b).
958 */
959 .if (. - proc_types) % PROC_ENTRY_SIZE != 0
960 .error "The size of one or more proc_types entries is wrong."
961 .endif
962
1da177e4
LT
963/*
964 * Turn off the Cache and MMU. ARMv3 does not support
965 * reading the control register, but ARMv4 does.
966 *
21b2841d
UKK
967 * On exit,
968 * r0, r1, r2, r3, r9, r12 corrupted
969 * This routine must preserve:
6d7d0ae5 970 * r4, r7, r8
1da177e4
LT
971 */
972 .align 5
973cache_off: mov r3, #12 @ cache_off function
974 b call_cache_fn
975
10c2df65
HC
976__armv4_mpu_cache_off:
977 mrc p15, 0, r0, c1, c0
978 bic r0, r0, #0x000d
979 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
980 mov r0, #0
981 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
982 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
983 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
984 mov pc, lr
985
986__armv3_mpu_cache_off:
987 mrc p15, 0, r0, c1, c0
988 bic r0, r0, #0x000d
989 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
990 mov r0, #0
991 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
992 mov pc, lr
993
c76b6b41 994__armv4_mmu_cache_off:
8bdca0ac 995#ifdef CONFIG_MMU
1da177e4
LT
996 mrc p15, 0, r0, c1, c0
997 bic r0, r0, #0x000d
998 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
999 mov r0, #0
1000 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
1001 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
8bdca0ac 1002#endif
1da177e4
LT
1003 mov pc, lr
1004
7d09e854
CM
1005__armv7_mmu_cache_off:
1006 mrc p15, 0, r0, c1, c0
8bdca0ac 1007#ifdef CONFIG_MMU
7d09e854 1008 bic r0, r0, #0x000d
8bdca0ac
CM
1009#else
1010 bic r0, r0, #0x000c
1011#endif
7d09e854
CM
1012 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1013 mov r12, lr
1014 bl __armv7_mmu_cache_flush
1015 mov r0, #0
8bdca0ac 1016#ifdef CONFIG_MMU
7d09e854 1017 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
8bdca0ac 1018#endif
c30c2f99
CM
1019 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
1020 mcr p15, 0, r0, c7, c10, 4 @ DSB
1021 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
1022 mov pc, r12
1023
1da177e4
LT
1024/*
1025 * Clean and flush the cache to maintain consistency.
1026 *
1da177e4 1027 * On exit,
21b2841d 1028 * r1, r2, r3, r9, r10, r11, r12 corrupted
1da177e4 1029 * This routine must preserve:
6d7d0ae5 1030 * r4, r6, r7, r8
1da177e4
LT
1031 */
1032 .align 5
1033cache_clean_flush:
1034 mov r3, #16
1035 b call_cache_fn
1036
10c2df65
HC
1037__armv4_mpu_cache_flush:
1038 mov r2, #1
1039 mov r3, #0
1040 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
1041 mov r1, #7 << 5 @ 8 segments
10421: orr r3, r1, #63 << 26 @ 64 entries
10432: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
1044 subs r3, r3, #1 << 26
1045 bcs 2b @ entries 63 to 0
1046 subs r1, r1, #1 << 5
1047 bcs 1b @ segments 7 to 0
1048
1049 teq r2, #0
1050 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
1051 mcr p15, 0, ip, c7, c10, 4 @ drain WB
1052 mov pc, lr
1053
28853ac8
PZ
1054__fa526_cache_flush:
1055 mov r1, #0
1056 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1057 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1058 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1059 mov pc, lr
10c2df65 1060
c76b6b41 1061__armv6_mmu_cache_flush:
1da177e4
LT
1062 mov r1, #0
1063 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1064 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1065 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1066 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1067 mov pc, lr
1068
7d09e854
CM
1069__armv7_mmu_cache_flush:
1070 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1071 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 1072 mov r10, #0
c30c2f99 1073 beq hierarchical
7d09e854
CM
1074 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
1075 b iflush
1076hierarchical:
c30c2f99 1077 mcr p15, 0, r10, c7, c10, 5 @ DMB
0e056f20 1078 stmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
1079 mrc p15, 1, r0, c0, c0, 1 @ read clidr
1080 ands r3, r0, #0x7000000 @ extract loc from clidr
1081 mov r3, r3, lsr #23 @ left align loc bit field
1082 beq finished @ if loc is 0, then no need to clean
1083 mov r10, #0 @ start clean at cache level 0
1084loop1:
1085 add r2, r10, r10, lsr #1 @ work out 3x current cache level
1086 mov r1, r0, lsr r2 @ extract cache type bits from clidr
1087 and r1, r1, #7 @ mask of the bits for current cache only
1088 cmp r1, #2 @ see what cache we have at this level
1089 blt skip @ skip if no cache, or just i-cache
1090 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1091 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1092 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1093 and r2, r1, #7 @ extract the length of the cache lines
1094 add r2, r2, #4 @ add 4 (line length offset)
1095 ldr r4, =0x3ff
1096 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
000b5025 1097 clz r5, r4 @ find bit position of way size increment
7d09e854
CM
1098 ldr r7, =0x7fff
1099 ands r7, r7, r1, lsr #13 @ extract max number of the index size
1100loop2:
1101 mov r9, r4 @ create working copy of max way size
1102loop3:
0e056f20
CM
1103 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1104 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1105 THUMB( lsl r6, r9, r5 )
1106 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1107 THUMB( lsl r6, r7, r2 )
1108 THUMB( orr r11, r11, r6 ) @ factor index number into r11
7d09e854
CM
1109 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1110 subs r9, r9, #1 @ decrement the way
1111 bge loop3
1112 subs r7, r7, #1 @ decrement the index
1113 bge loop2
1114skip:
1115 add r10, r10, #2 @ increment cache number
1116 cmp r3, r10
1117 bgt loop1
1118finished:
0e056f20 1119 ldmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
1120 mov r10, #0 @ swith back to cache level 0
1121 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
7d09e854 1122iflush:
c30c2f99 1123 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 1124 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
1125 mcr p15, 0, r10, c7, c10, 4 @ DSB
1126 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
1127 mov pc, lr
1128
15754bf9
NP
1129__armv5tej_mmu_cache_flush:
11301: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1131 bne 1b
1132 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
1133 mcr p15, 0, r0, c7, c10, 4 @ drain WB
1134 mov pc, lr
1135
c76b6b41 1136__armv4_mmu_cache_flush:
1da177e4
LT
1137 mov r2, #64*1024 @ default: 32K dcache size (*2)
1138 mov r11, #32 @ default: 32 byte line size
1139 mrc p15, 0, r3, c0, c0, 1 @ read cache type
98e12b5a 1140 teq r3, r9 @ cache ID register present?
1da177e4
LT
1141 beq no_cache_id
1142 mov r1, r3, lsr #18
1143 and r1, r1, #7
1144 mov r2, #1024
1145 mov r2, r2, lsl r1 @ base dcache size *2
1146 tst r3, #1 << 14 @ test M bit
1147 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
1148 mov r3, r3, lsr #12
1149 and r3, r3, #3
1150 mov r11, #8
1151 mov r11, r11, lsl r3 @ cache line size in bytes
1152no_cache_id:
0e056f20
CM
1153 mov r1, pc
1154 bic r1, r1, #63 @ align to longest cache line
1da177e4 1155 add r2, r1, r2
0e056f20
CM
11561:
1157 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
1158 THUMB( ldr r3, [r1] ) @ s/w flush D cache
1159 THUMB( add r1, r1, r11 )
1da177e4
LT
1160 teq r1, r2
1161 bne 1b
1162
1163 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
1164 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
1165 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1166 mov pc, lr
1167
c76b6b41 1168__armv3_mmu_cache_flush:
10c2df65 1169__armv3_mpu_cache_flush:
1da177e4 1170 mov r1, #0
63fa7187 1171 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1da177e4
LT
1172 mov pc, lr
1173
1174/*
1175 * Various debugging routines for printing hex characters and
1176 * memory, which again must be relocatable.
1177 */
1178#ifdef DEBUG
88987ef9 1179 .align 2
1da177e4
LT
1180 .type phexbuf,#object
1181phexbuf: .space 12
1182 .size phexbuf, . - phexbuf
1183
be6f9f00 1184@ phex corrupts {r0, r1, r2, r3}
1da177e4
LT
1185phex: adr r3, phexbuf
1186 mov r2, #0
1187 strb r2, [r3, r1]
11881: subs r1, r1, #1
1189 movmi r0, r3
1190 bmi puts
1191 and r2, r0, #15
1192 mov r0, r0, lsr #4
1193 cmp r2, #10
1194 addge r2, r2, #7
1195 add r2, r2, #'0'
1196 strb r2, [r3, r1]
1197 b 1b
1198
be6f9f00 1199@ puts corrupts {r0, r1, r2, r3}
4e6d488a 1200puts: loadsp r3, r1
1da177e4
LT
12011: ldrb r2, [r0], #1
1202 teq r2, #0
1203 moveq pc, lr
5cd0c344 12042: writeb r2, r3
1da177e4
LT
1205 mov r1, #0x00020000
12063: subs r1, r1, #1
1207 bne 3b
1208 teq r2, #'\n'
1209 moveq r2, #'\r'
1210 beq 2b
1211 teq r0, #0
1212 bne 1b
1213 mov pc, lr
be6f9f00 1214@ putc corrupts {r0, r1, r2, r3}
1da177e4
LT
1215putc:
1216 mov r2, r0
1217 mov r0, #0
4e6d488a 1218 loadsp r3, r1
1da177e4
LT
1219 b 2b
1220
be6f9f00 1221@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1da177e4
LT
1222memdump: mov r12, r0
1223 mov r10, lr
1224 mov r11, #0
12252: mov r0, r11, lsl #2
1226 add r0, r0, r12
1227 mov r1, #8
1228 bl phex
1229 mov r0, #':'
1230 bl putc
12311: mov r0, #' '
1232 bl putc
1233 ldr r0, [r12, r11, lsl #2]
1234 mov r1, #8
1235 bl phex
1236 and r0, r11, #7
1237 teq r0, #3
1238 moveq r0, #' '
1239 bleq putc
1240 and r0, r11, #7
1241 add r11, r11, #1
1242 teq r0, #7
1243 bne 1b
1244 mov r0, #'\n'
1245 bl putc
1246 cmp r11, #64
1247 blt 2b
1248 mov pc, r10
1249#endif
1250
92c83ff1 1251 .ltorg
424e5994
DM
1252
1253#ifdef CONFIG_ARM_VIRT_EXT
1254.align 5
1255__hyp_reentry_vectors:
1256 W(b) . @ reset
1257 W(b) . @ undef
1258 W(b) . @ svc
1259 W(b) . @ pabort
1260 W(b) . @ dabort
1261 W(b) __enter_kernel @ hyp
1262 W(b) . @ irq
1263 W(b) . @ fiq
1264#endif /* CONFIG_ARM_VIRT_EXT */
1265
1266__enter_kernel:
1267 mov r0, #0 @ must be 0
1268 ARM( mov pc, r4 ) @ call kernel
1269 THUMB( bx r4 ) @ entry point is always ARM
1270
adcc2591 1271reloc_code_end:
1da177e4
LT
1272
1273 .align
b0c4d4ee 1274 .section ".stack", "aw", %nobits
8d7e4cc2
NP
1275.L_user_stack: .space 4096
1276.L_user_stack_end: