ARM: deprecate support for old way to pass kernel parameters
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / boot / compressed / head.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/boot/compressed/head.S
3 *
4 * Copyright (C) 1996-2002 Russell King
10c2df65 5 * Copyright (C) 2004 Hyok S. Choi (MPU support)
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
1da177e4
LT
11#include <linux/linkage.h>
12
13/*
14 * Debugging stuff
15 *
16 * Note that these macros must not contain any code which is not
17 * 100% relocatable. Any attempt to do so will result in a crash.
18 * Please select one of the following when turning on debugging.
19 */
20#ifdef DEBUG
5cd0c344 21
5cd0c344 22#if defined(CONFIG_DEBUG_ICEDCC)
7d95ded9
TL
23
24#ifdef CONFIG_CPU_V6
4e6d488a 25 .macro loadsp, rb, tmp
7d95ded9
TL
26 .endm
27 .macro writeb, ch, rb
28 mcr p14, 0, \ch, c0, c5, 0
29 .endm
200b7a8d 30#elif defined(CONFIG_CPU_V7)
4e6d488a 31 .macro loadsp, rb, tmp
200b7a8d
TL
32 .endm
33 .macro writeb, ch, rb
34wait: mrc p14, 0, pc, c0, c1, 0
35 bcs wait
36 mcr p14, 0, \ch, c0, c5, 0
37 .endm
c633c3cf 38#elif defined(CONFIG_CPU_XSCALE)
4e6d488a 39 .macro loadsp, rb, tmp
c633c3cf
JCPV
40 .endm
41 .macro writeb, ch, rb
42 mcr p14, 0, \ch, c8, c0, 0
43 .endm
7d95ded9 44#else
4e6d488a 45 .macro loadsp, rb, tmp
1da177e4 46 .endm
224b5be6 47 .macro writeb, ch, rb
41a9e680 48 mcr p14, 0, \ch, c1, c0, 0
1da177e4 49 .endm
7d95ded9
TL
50#endif
51
5cd0c344 52#else
224b5be6 53
a09e64fb 54#include <mach/debug-macro.S>
224b5be6 55
5cd0c344
RK
56 .macro writeb, ch, rb
57 senduart \ch, \rb
1da177e4 58 .endm
5cd0c344 59
224b5be6 60#if defined(CONFIG_ARCH_SA1100)
4e6d488a 61 .macro loadsp, rb, tmp
1da177e4 62 mov \rb, #0x80000000 @ physical base address
224b5be6 63#ifdef CONFIG_DEBUG_LL_SER3
1da177e4 64 add \rb, \rb, #0x00050000 @ Ser3
224b5be6 65#else
1da177e4 66 add \rb, \rb, #0x00010000 @ Ser1
224b5be6 67#endif
1da177e4 68 .endm
1da177e4 69#elif defined(CONFIG_ARCH_S3C2410)
4e6d488a 70 .macro loadsp, rb, tmp
1da177e4 71 mov \rb, #0x50000000
c7657846 72 add \rb, \rb, #0x4000 * CONFIG_S3C_LOWLEVEL_UART_PORT
1da177e4 73 .endm
1da177e4 74#else
4e6d488a
TL
75 .macro loadsp, rb, tmp
76 addruart \rb, \tmp
224b5be6 77 .endm
1da177e4 78#endif
5cd0c344 79#endif
1da177e4
LT
80#endif
81
82 .macro kputc,val
83 mov r0, \val
84 bl putc
85 .endm
86
87 .macro kphex,val,len
88 mov r0, \val
89 mov r1, #\len
90 bl phex
91 .endm
92
93 .macro debug_reloc_start
94#ifdef DEBUG
95 kputc #'\n'
96 kphex r6, 8 /* processor id */
97 kputc #':'
98 kphex r7, 8 /* architecture id */
f12d0d7c 99#ifdef CONFIG_CPU_CP15
1da177e4
LT
100 kputc #':'
101 mrc p15, 0, r0, c1, c0
102 kphex r0, 8 /* control reg */
f12d0d7c 103#endif
1da177e4
LT
104 kputc #'\n'
105 kphex r5, 8 /* decompressed kernel start */
106 kputc #'-'
f4619025 107 kphex r9, 8 /* decompressed kernel end */
1da177e4
LT
108 kputc #'>'
109 kphex r4, 8 /* kernel execution address */
110 kputc #'\n'
111#endif
112 .endm
113
114 .macro debug_reloc_end
115#ifdef DEBUG
116 kphex r5, 8 /* end of kernel */
117 kputc #'\n'
118 mov r0, r4
119 bl memdump /* dump 256 bytes at start of kernel */
120#endif
121 .endm
122
123 .section ".start", #alloc, #execinstr
124/*
125 * sort out different calling conventions
126 */
127 .align
128start:
129 .type start,#function
130 .rept 8
131 mov r0, r0
132 .endr
133
134 b 1f
135 .word 0x016f2818 @ Magic numbers to help the loader
136 .word start @ absolute load/run zImage address
137 .word _edata @ zImage end address
1381: mov r7, r1 @ save architecture ID
f4619025 139 mov r8, r2 @ save atags pointer
1da177e4
LT
140
141#ifndef __ARM_ARCH_2__
142 /*
143 * Booting from Angel - need to enter SVC mode and disable
144 * FIQs/IRQs (numeric definitions from angel arm.h source).
145 * We only do this if we were in user mode on entry.
146 */
147 mrs r2, cpsr @ get current mode
148 tst r2, #3 @ not user?
149 bne not_angel
150 mov r0, #0x17 @ angel_SWIreason_EnterSVC
0e056f20
CM
151 ARM( swi 0x123456 ) @ angel_SWI_ARM
152 THUMB( svc 0xab ) @ angel_SWI_THUMB
1da177e4
LT
153not_angel:
154 mrs r2, cpsr @ turn off interrupts to
155 orr r2, r2, #0xc0 @ prevent angel from running
156 msr cpsr_c, r2
157#else
158 teqp pc, #0x0c000003 @ turn off interrupts
159#endif
160
161 /*
162 * Note that some cache flushing and other stuff may
163 * be needed here - is there an Angel SWI call for this?
164 */
165
166 /*
167 * some architecture specific code can be inserted
f4619025 168 * by the linker here, but it should preserve r7, r8, and r9.
1da177e4
LT
169 */
170
171 .text
172 adr r0, LC0
98e12b5a
RK
173 ARM( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip, sp})
174 THUMB( ldmia r0, {r1, r2, r3, r4, r5, r6, r11, ip} )
d4d9959c 175 THUMB( ldr sp, [r0, #32] )
1da177e4
LT
176 subs r0, r0, r1 @ calculate the delta offset
177
178 @ if delta is zero, we are
179 beq not_relocated @ running at the address we
180 @ were linked at.
181
182 /*
183 * We're running at a different address. We need to fix
184 * up various pointers:
98e12b5a
RK
185 * r5 - zImage base address (_start)
186 * r6 - size of decompressed image
187 * r11 - GOT start
1da177e4
LT
188 * ip - GOT end
189 */
190 add r5, r5, r0
98e12b5a 191 add r11, r11, r0
1da177e4
LT
192 add ip, ip, r0
193
194#ifndef CONFIG_ZBOOT_ROM
195 /*
196 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
197 * we need to fix up pointers into the BSS region.
198 * r2 - BSS start
199 * r3 - BSS end
200 * sp - stack pointer
201 */
202 add r2, r2, r0
203 add r3, r3, r0
204 add sp, sp, r0
205
206 /*
207 * Relocate all entries in the GOT table.
208 */
98e12b5a 2091: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4 210 add r1, r1, r0 @ table. This fixes up the
98e12b5a
RK
211 str r1, [r11], #4 @ C references.
212 cmp r11, ip
1da177e4
LT
213 blo 1b
214#else
215
216 /*
217 * Relocate entries in the GOT table. We only relocate
218 * the entries that are outside the (relocated) BSS region.
219 */
98e12b5a 2201: ldr r1, [r11, #0] @ relocate entries in the GOT
1da177e4
LT
221 cmp r1, r2 @ entry < bss_start ||
222 cmphs r3, r1 @ _end < entry
223 addlo r1, r1, r0 @ table. This fixes up the
98e12b5a
RK
224 str r1, [r11], #4 @ C references.
225 cmp r11, ip
1da177e4
LT
226 blo 1b
227#endif
228
229not_relocated: mov r0, #0
2301: str r0, [r2], #4 @ clear bss
231 str r0, [r2], #4
232 str r0, [r2], #4
233 str r0, [r2], #4
234 cmp r2, r3
235 blo 1b
236
237 /*
238 * The C runtime environment should now be setup
239 * sufficiently. Turn the cache on, set up some
240 * pointers, and start decompressing.
241 */
242 bl cache_on
243
244 mov r1, sp @ malloc space above stack
245 add r2, sp, #0x10000 @ 64k max
246
247/*
248 * Check to see if we will overwrite ourselves.
249 * r4 = final kernel address
250 * r5 = start of this image
98e12b5a 251 * r6 = size of decompressed image
1da177e4
LT
252 * r2 = end of malloc space (and therefore this image)
253 * We basically want:
254 * r4 >= r2 -> OK
255 * r4 + image length <= r5 -> OK
256 */
257 cmp r4, r2
258 bhs wont_overwrite
98e12b5a 259 add r0, r4, r6
1da177e4
LT
260 cmp r0, r5
261 bls wont_overwrite
262
263 mov r5, r2 @ decompress after malloc space
264 mov r0, r5
265 mov r3, r7
266 bl decompress_kernel
267
c7341d43 268 add r0, r0, #127 + 128 @ alignment + stack
1da177e4
LT
269 bic r0, r0, #127 @ align the kernel length
270/*
271 * r0 = decompressed kernel length
272 * r1-r3 = unused
273 * r4 = kernel execution address
274 * r5 = decompressed kernel start
1da177e4 275 * r7 = architecture ID
f4619025 276 * r8 = atags pointer
0e056f20 277 * r9-r12,r14 = corrupted
1da177e4
LT
278 */
279 add r1, r5, r0 @ end of decompressed kernel
280 adr r2, reloc_start
281 ldr r3, LC1
282 add r3, r2, r3
0e056f20
CM
2831: ldmia r2!, {r9 - r12, r14} @ copy relocation code
284 stmia r1!, {r9 - r12, r14}
285 ldmia r2!, {r9 - r12, r14}
286 stmia r1!, {r9 - r12, r14}
1da177e4
LT
287 cmp r2, r3
288 blo 1b
0e056f20
CM
289 mov sp, r1
290 add sp, sp, #128 @ relocate the stack
1da177e4
LT
291
292 bl cache_clean_flush
0e056f20
CM
293 ARM( add pc, r5, r0 ) @ call relocation code
294 THUMB( add r12, r5, r0 )
295 THUMB( mov pc, r12 ) @ call relocation code
1da177e4
LT
296
297/*
298 * We're not in danger of overwriting ourselves. Do this the simple way.
299 *
300 * r4 = kernel execution address
301 * r7 = architecture ID
302 */
303wont_overwrite: mov r0, r4
304 mov r3, r7
305 bl decompress_kernel
306 b call_kernel
307
88987ef9 308 .align 2
1da177e4
LT
309 .type LC0, #object
310LC0: .word LC0 @ r1
311 .word __bss_start @ r2
312 .word _end @ r3
313 .word zreladdr @ r4
314 .word _start @ r5
98e12b5a
RK
315 .word _image_size @ r6
316 .word _got_start @ r11
1da177e4 317 .word _got_end @ ip
88237c25 318 .word user_stack_end @ sp
1da177e4
LT
319LC1: .word reloc_end - reloc_start
320 .size LC0, . - LC0
321
322#ifdef CONFIG_ARCH_RPC
323 .globl params
324params: ldr r0, =params_phys
325 mov pc, lr
326 .ltorg
327 .align
328#endif
329
330/*
331 * Turn on the cache. We need to setup some page tables so that we
332 * can have both the I and D caches on.
333 *
334 * We place the page tables 16k down from the kernel execution address,
335 * and we hope that nothing else is using it. If we're using it, we
336 * will go pop!
337 *
338 * On entry,
339 * r4 = kernel execution address
1da177e4 340 * r7 = architecture number
f4619025 341 * r8 = atags pointer
1da177e4 342 * On exit,
21b2841d 343 * r0, r1, r2, r3, r9, r10, r12 corrupted
1da177e4 344 * This routine must preserve:
f4619025 345 * r4, r5, r6, r7, r8
1da177e4
LT
346 */
347 .align 5
348cache_on: mov r3, #8 @ cache_on function
349 b call_cache_fn
350
10c2df65
HC
351/*
352 * Initialize the highest priority protection region, PR7
353 * to cover all 32bit address and cacheable and bufferable.
354 */
355__armv4_mpu_cache_on:
356 mov r0, #0x3f @ 4G, the whole
357 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
358 mcr p15, 0, r0, c6, c7, 1
359
360 mov r0, #0x80 @ PR7
361 mcr p15, 0, r0, c2, c0, 0 @ D-cache on
362 mcr p15, 0, r0, c2, c0, 1 @ I-cache on
363 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
364
365 mov r0, #0xc000
366 mcr p15, 0, r0, c5, c0, 1 @ I-access permission
367 mcr p15, 0, r0, c5, c0, 0 @ D-access permission
368
369 mov r0, #0
370 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
371 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
372 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
373 mrc p15, 0, r0, c1, c0, 0 @ read control reg
374 @ ...I .... ..D. WC.M
375 orr r0, r0, #0x002d @ .... .... ..1. 11.1
376 orr r0, r0, #0x1000 @ ...1 .... .... ....
377
378 mcr p15, 0, r0, c1, c0, 0 @ write control reg
379
380 mov r0, #0
381 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
382 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
383 mov pc, lr
384
385__armv3_mpu_cache_on:
386 mov r0, #0x3f @ 4G, the whole
387 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
388
389 mov r0, #0x80 @ PR7
390 mcr p15, 0, r0, c2, c0, 0 @ cache on
391 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
392
393 mov r0, #0xc000
394 mcr p15, 0, r0, c5, c0, 0 @ access permission
395
396 mov r0, #0
397 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
4a8d57a5
UKK
398 /*
399 * ?? ARMv3 MMU does not allow reading the control register,
400 * does this really work on ARMv3 MPU?
401 */
10c2df65
HC
402 mrc p15, 0, r0, c1, c0, 0 @ read control reg
403 @ .... .... .... WC.M
404 orr r0, r0, #0x000d @ .... .... .... 11.1
4a8d57a5 405 /* ?? this overwrites the value constructed above? */
10c2df65
HC
406 mov r0, #0
407 mcr p15, 0, r0, c1, c0, 0 @ write control reg
408
4a8d57a5 409 /* ?? invalidate for the second time? */
10c2df65
HC
410 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
411 mov pc, lr
412
1da177e4
LT
413__setup_mmu: sub r3, r4, #16384 @ Page directory size
414 bic r3, r3, #0xff @ Align the pointer
415 bic r3, r3, #0x3f00
416/*
417 * Initialise the page tables, turning on the cacheable and bufferable
418 * bits for the RAM area only.
419 */
420 mov r0, r3
f4619025
RK
421 mov r9, r0, lsr #18
422 mov r9, r9, lsl #18 @ start of RAM
423 add r10, r9, #0x10000000 @ a reasonable RAM size
1da177e4
LT
424 mov r1, #0x12
425 orr r1, r1, #3 << 10
426 add r2, r3, #16384
265d5e48 4271: cmp r1, r9 @ if virt > start of RAM
1da177e4 428 orrhs r1, r1, #0x0c @ set cacheable, bufferable
f4619025 429 cmp r1, r10 @ if virt > end of RAM
1da177e4
LT
430 bichs r1, r1, #0x0c @ clear cacheable, bufferable
431 str r1, [r0], #4 @ 1:1 mapping
432 add r1, r1, #1048576
433 teq r0, r2
434 bne 1b
435/*
436 * If ever we are running from Flash, then we surely want the cache
437 * to be enabled also for our execution instance... We map 2MB of it
438 * so there is no map overlap problem for up to 1 MB compressed kernel.
439 * If the execution is in RAM then we would only be duplicating the above.
440 */
441 mov r1, #0x1e
442 orr r1, r1, #3 << 10
443 mov r2, pc, lsr #20
444 orr r1, r1, r2, lsl #20
445 add r0, r3, r2, lsl #2
446 str r1, [r0], #4
447 add r1, r1, #1048576
448 str r1, [r0]
449 mov pc, lr
93ed3970 450ENDPROC(__setup_mmu)
1da177e4 451
c76b6b41 452__armv4_mmu_cache_on:
1da177e4 453 mov r12, lr
8bdca0ac 454#ifdef CONFIG_MMU
1da177e4
LT
455 bl __setup_mmu
456 mov r0, #0
457 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
458 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
459 mrc p15, 0, r0, c1, c0, 0 @ read control reg
460 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
461 orr r0, r0, #0x0030
26584853
CM
462#ifdef CONFIG_CPU_ENDIAN_BE8
463 orr r0, r0, #1 << 25 @ big-endian page tables
464#endif
c76b6b41 465 bl __common_mmu_cache_on
1da177e4
LT
466 mov r0, #0
467 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 468#endif
1da177e4
LT
469 mov pc, r12
470
7d09e854
CM
471__armv7_mmu_cache_on:
472 mov r12, lr
8bdca0ac 473#ifdef CONFIG_MMU
7d09e854
CM
474 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
475 tst r11, #0xf @ VMSA
476 blne __setup_mmu
477 mov r0, #0
478 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
479 tst r11, #0xf @ VMSA
480 mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
8bdca0ac 481#endif
7d09e854
CM
482 mrc p15, 0, r0, c1, c0, 0 @ read control reg
483 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
484 orr r0, r0, #0x003c @ write buffer
8bdca0ac 485#ifdef CONFIG_MMU
26584853
CM
486#ifdef CONFIG_CPU_ENDIAN_BE8
487 orr r0, r0, #1 << 25 @ big-endian page tables
488#endif
7d09e854
CM
489 orrne r0, r0, #1 @ MMU enabled
490 movne r1, #-1
491 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
492 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
8bdca0ac 493#endif
7d09e854
CM
494 mcr p15, 0, r0, c1, c0, 0 @ load control register
495 mrc p15, 0, r0, c1, c0, 0 @ and read it back
496 mov r0, #0
497 mcr p15, 0, r0, c7, c5, 4 @ ISB
498 mov pc, r12
499
28853ac8
PZ
500__fa526_cache_on:
501 mov r12, lr
502 bl __setup_mmu
503 mov r0, #0
504 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
505 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
506 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
507 mrc p15, 0, r0, c1, c0, 0 @ read control reg
508 orr r0, r0, #0x1000 @ I-cache enable
509 bl __common_mmu_cache_on
510 mov r0, #0
511 mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
512 mov pc, r12
513
c76b6b41 514__arm6_mmu_cache_on:
1da177e4
LT
515 mov r12, lr
516 bl __setup_mmu
517 mov r0, #0
518 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
519 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
520 mov r0, #0x30
c76b6b41 521 bl __common_mmu_cache_on
1da177e4
LT
522 mov r0, #0
523 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
524 mov pc, r12
525
c76b6b41 526__common_mmu_cache_on:
0e056f20 527#ifndef CONFIG_THUMB2_KERNEL
1da177e4
LT
528#ifndef DEBUG
529 orr r0, r0, #0x000d @ Write buffer, mmu
530#endif
531 mov r1, #-1
532 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
533 mcr p15, 0, r1, c3, c0, 0 @ load domain access control
2dc7667b
NP
534 b 1f
535 .align 5 @ cache line aligned
5361: mcr p15, 0, r0, c1, c0, 0 @ load control register
537 mrc p15, 0, r0, c1, c0, 0 @ and read it back to
538 sub pc, lr, r0, lsr #32 @ properly flush pipeline
0e056f20 539#endif
1da177e4
LT
540
541/*
542 * All code following this line is relocatable. It is relocated by
543 * the above code to the end of the decompressed kernel image and
544 * executed there. During this time, we have no stacks.
545 *
546 * r0 = decompressed kernel length
547 * r1-r3 = unused
548 * r4 = kernel execution address
549 * r5 = decompressed kernel start
1da177e4 550 * r7 = architecture ID
f4619025 551 * r8 = atags pointer
0e056f20 552 * r9-r12,r14 = corrupted
1da177e4
LT
553 */
554 .align 5
f4619025 555reloc_start: add r9, r5, r0
c7341d43 556 sub r9, r9, #128 @ do not copy the stack
1da177e4
LT
557 debug_reloc_start
558 mov r1, r4
5591:
560 .rept 4
0e056f20
CM
561 ldmia r5!, {r0, r2, r3, r10 - r12, r14} @ relocate kernel
562 stmia r1!, {r0, r2, r3, r10 - r12, r14}
1da177e4
LT
563 .endr
564
f4619025 565 cmp r5, r9
1da177e4 566 blo 1b
0e056f20
CM
567 mov sp, r1
568 add sp, sp, #128 @ relocate the stack
1da177e4
LT
569 debug_reloc_end
570
571call_kernel: bl cache_clean_flush
572 bl cache_off
f4619025 573 mov r0, #0 @ must be zero
1da177e4 574 mov r1, r7 @ restore architecture number
f4619025 575 mov r2, r8 @ restore atags pointer
1da177e4
LT
576 mov pc, r4 @ call kernel
577
578/*
579 * Here follow the relocatable cache support functions for the
580 * various processors. This is a generic hook for locating an
581 * entry and jumping to an instruction at the specified offset
582 * from the start of the block. Please note this is all position
583 * independent code.
584 *
585 * r1 = corrupted
586 * r2 = corrupted
587 * r3 = block offset
98e12b5a 588 * r9 = corrupted
1da177e4
LT
589 * r12 = corrupted
590 */
591
592call_cache_fn: adr r12, proc_types
f12d0d7c 593#ifdef CONFIG_CPU_CP15
98e12b5a 594 mrc p15, 0, r9, c0, c0 @ get processor ID
f12d0d7c 595#else
98e12b5a 596 ldr r9, =CONFIG_PROCESSOR_ID
f12d0d7c 597#endif
1da177e4
LT
5981: ldr r1, [r12, #0] @ get value
599 ldr r2, [r12, #4] @ get mask
98e12b5a 600 eor r1, r1, r9 @ (real ^ match)
1da177e4 601 tst r1, r2 @ & mask
0e056f20
CM
602 ARM( addeq pc, r12, r3 ) @ call cache function
603 THUMB( addeq r12, r3 )
604 THUMB( moveq pc, r12 ) @ call cache function
1da177e4
LT
605 add r12, r12, #4*5
606 b 1b
607
608/*
609 * Table for cache operations. This is basically:
610 * - CPU ID match
611 * - CPU ID mask
612 * - 'cache on' method instruction
613 * - 'cache off' method instruction
614 * - 'cache flush' method instruction
615 *
616 * We match an entry using: ((real_id ^ match) & mask) == 0
617 *
618 * Writethrough caches generally only need 'on' and 'off'
619 * methods. Writeback caches _must_ have the flush method
620 * defined.
621 */
88987ef9 622 .align 2
1da177e4
LT
623 .type proc_types,#object
624proc_types:
625 .word 0x41560600 @ ARM6/610
626 .word 0xffffffe0
0e056f20
CM
627 W(b) __arm6_mmu_cache_off @ works, but slow
628 W(b) __arm6_mmu_cache_off
1da177e4 629 mov pc, lr
0e056f20 630 THUMB( nop )
c76b6b41
HC
631@ b __arm6_mmu_cache_on @ untested
632@ b __arm6_mmu_cache_off
633@ b __armv3_mmu_cache_flush
1da177e4
LT
634
635 .word 0x00000000 @ old ARM ID
636 .word 0x0000f000
637 mov pc, lr
0e056f20 638 THUMB( nop )
1da177e4 639 mov pc, lr
0e056f20 640 THUMB( nop )
1da177e4 641 mov pc, lr
0e056f20 642 THUMB( nop )
1da177e4
LT
643
644 .word 0x41007000 @ ARM7/710
645 .word 0xfff8fe00
0e056f20
CM
646 W(b) __arm7_mmu_cache_off
647 W(b) __arm7_mmu_cache_off
1da177e4 648 mov pc, lr
0e056f20 649 THUMB( nop )
1da177e4
LT
650
651 .word 0x41807200 @ ARM720T (writethrough)
652 .word 0xffffff00
0e056f20
CM
653 W(b) __armv4_mmu_cache_on
654 W(b) __armv4_mmu_cache_off
1da177e4 655 mov pc, lr
0e056f20 656 THUMB( nop )
1da177e4 657
10c2df65
HC
658 .word 0x41007400 @ ARM74x
659 .word 0xff00ff00
0e056f20
CM
660 W(b) __armv3_mpu_cache_on
661 W(b) __armv3_mpu_cache_off
662 W(b) __armv3_mpu_cache_flush
10c2df65
HC
663
664 .word 0x41009400 @ ARM94x
665 .word 0xff00ff00
0e056f20
CM
666 W(b) __armv4_mpu_cache_on
667 W(b) __armv4_mpu_cache_off
668 W(b) __armv4_mpu_cache_flush
10c2df65 669
1da177e4
LT
670 .word 0x00007000 @ ARM7 IDs
671 .word 0x0000f000
672 mov pc, lr
0e056f20 673 THUMB( nop )
1da177e4 674 mov pc, lr
0e056f20 675 THUMB( nop )
1da177e4 676 mov pc, lr
0e056f20 677 THUMB( nop )
1da177e4
LT
678
679 @ Everything from here on will be the new ID system.
680
681 .word 0x4401a100 @ sa110 / sa1100
682 .word 0xffffffe0
0e056f20
CM
683 W(b) __armv4_mmu_cache_on
684 W(b) __armv4_mmu_cache_off
685 W(b) __armv4_mmu_cache_flush
1da177e4
LT
686
687 .word 0x6901b110 @ sa1110
688 .word 0xfffffff0
0e056f20
CM
689 W(b) __armv4_mmu_cache_on
690 W(b) __armv4_mmu_cache_off
691 W(b) __armv4_mmu_cache_flush
1da177e4 692
4157d317
HZ
693 .word 0x56056900
694 .word 0xffffff00 @ PXA9xx
0e056f20
CM
695 W(b) __armv4_mmu_cache_on
696 W(b) __armv4_mmu_cache_off
697 W(b) __armv4_mmu_cache_flush
49cbe786
EM
698
699 .word 0x56158000 @ PXA168
700 .word 0xfffff000
0e056f20
CM
701 W(b) __armv4_mmu_cache_on
702 W(b) __armv4_mmu_cache_off
703 W(b) __armv5tej_mmu_cache_flush
49cbe786 704
2e2023fe
NP
705 .word 0x56050000 @ Feroceon
706 .word 0xff0f0000
0e056f20
CM
707 W(b) __armv4_mmu_cache_on
708 W(b) __armv4_mmu_cache_off
709 W(b) __armv5tej_mmu_cache_flush
3ebb5a2b 710
5587931c
JS
711#ifdef CONFIG_CPU_FEROCEON_OLD_ID
712 /* this conflicts with the standard ARMv5TE entry */
713 .long 0x41009260 @ Old Feroceon
714 .long 0xff00fff0
715 b __armv4_mmu_cache_on
716 b __armv4_mmu_cache_off
717 b __armv5tej_mmu_cache_flush
718#endif
719
28853ac8
PZ
720 .word 0x66015261 @ FA526
721 .word 0xff01fff1
0e056f20
CM
722 W(b) __fa526_cache_on
723 W(b) __armv4_mmu_cache_off
724 W(b) __fa526_cache_flush
28853ac8 725
1da177e4
LT
726 @ These match on the architecture ID
727
728 .word 0x00020000 @ ARMv4T
729 .word 0x000f0000
0e056f20
CM
730 W(b) __armv4_mmu_cache_on
731 W(b) __armv4_mmu_cache_off
732 W(b) __armv4_mmu_cache_flush
1da177e4
LT
733
734 .word 0x00050000 @ ARMv5TE
735 .word 0x000f0000
0e056f20
CM
736 W(b) __armv4_mmu_cache_on
737 W(b) __armv4_mmu_cache_off
738 W(b) __armv4_mmu_cache_flush
1da177e4
LT
739
740 .word 0x00060000 @ ARMv5TEJ
741 .word 0x000f0000
0e056f20
CM
742 W(b) __armv4_mmu_cache_on
743 W(b) __armv4_mmu_cache_off
75216859 744 W(b) __armv5tej_mmu_cache_flush
1da177e4 745
45a7b9cf 746 .word 0x0007b000 @ ARMv6
7d09e854 747 .word 0x000ff000
0e056f20
CM
748 W(b) __armv4_mmu_cache_on
749 W(b) __armv4_mmu_cache_off
750 W(b) __armv6_mmu_cache_flush
1da177e4 751
edabd38e
SB
752 .word 0x560f5810 @ Marvell PJ4 ARMv6
753 .word 0xff0ffff0
754 W(b) __armv4_mmu_cache_on
755 W(b) __armv4_mmu_cache_off
756 W(b) __armv6_mmu_cache_flush
757
7d09e854
CM
758 .word 0x000f0000 @ new CPU Id
759 .word 0x000f0000
0e056f20
CM
760 W(b) __armv7_mmu_cache_on
761 W(b) __armv7_mmu_cache_off
762 W(b) __armv7_mmu_cache_flush
7d09e854 763
1da177e4
LT
764 .word 0 @ unrecognised type
765 .word 0
766 mov pc, lr
0e056f20 767 THUMB( nop )
1da177e4 768 mov pc, lr
0e056f20 769 THUMB( nop )
1da177e4 770 mov pc, lr
0e056f20 771 THUMB( nop )
1da177e4
LT
772
773 .size proc_types, . - proc_types
774
775/*
776 * Turn off the Cache and MMU. ARMv3 does not support
777 * reading the control register, but ARMv4 does.
778 *
21b2841d
UKK
779 * On exit,
780 * r0, r1, r2, r3, r9, r12 corrupted
781 * This routine must preserve:
782 * r4, r6, r7
1da177e4
LT
783 */
784 .align 5
785cache_off: mov r3, #12 @ cache_off function
786 b call_cache_fn
787
10c2df65
HC
788__armv4_mpu_cache_off:
789 mrc p15, 0, r0, c1, c0
790 bic r0, r0, #0x000d
791 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
792 mov r0, #0
793 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
794 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
795 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
796 mov pc, lr
797
798__armv3_mpu_cache_off:
799 mrc p15, 0, r0, c1, c0
800 bic r0, r0, #0x000d
801 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
802 mov r0, #0
803 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
804 mov pc, lr
805
c76b6b41 806__armv4_mmu_cache_off:
8bdca0ac 807#ifdef CONFIG_MMU
1da177e4
LT
808 mrc p15, 0, r0, c1, c0
809 bic r0, r0, #0x000d
810 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
811 mov r0, #0
812 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
813 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
8bdca0ac 814#endif
1da177e4
LT
815 mov pc, lr
816
7d09e854
CM
817__armv7_mmu_cache_off:
818 mrc p15, 0, r0, c1, c0
8bdca0ac 819#ifdef CONFIG_MMU
7d09e854 820 bic r0, r0, #0x000d
8bdca0ac
CM
821#else
822 bic r0, r0, #0x000c
823#endif
7d09e854
CM
824 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
825 mov r12, lr
826 bl __armv7_mmu_cache_flush
827 mov r0, #0
8bdca0ac 828#ifdef CONFIG_MMU
7d09e854 829 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
8bdca0ac 830#endif
c30c2f99
CM
831 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
832 mcr p15, 0, r0, c7, c10, 4 @ DSB
833 mcr p15, 0, r0, c7, c5, 4 @ ISB
7d09e854
CM
834 mov pc, r12
835
c76b6b41 836__arm6_mmu_cache_off:
1da177e4 837 mov r0, #0x00000030 @ ARM6 control reg.
c76b6b41 838 b __armv3_mmu_cache_off
1da177e4 839
c76b6b41 840__arm7_mmu_cache_off:
1da177e4 841 mov r0, #0x00000070 @ ARM7 control reg.
c76b6b41 842 b __armv3_mmu_cache_off
1da177e4 843
c76b6b41 844__armv3_mmu_cache_off:
1da177e4
LT
845 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off
846 mov r0, #0
847 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
848 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3
849 mov pc, lr
850
851/*
852 * Clean and flush the cache to maintain consistency.
853 *
1da177e4 854 * On exit,
21b2841d 855 * r1, r2, r3, r9, r10, r11, r12 corrupted
1da177e4
LT
856 * This routine must preserve:
857 * r0, r4, r5, r6, r7
858 */
859 .align 5
860cache_clean_flush:
861 mov r3, #16
862 b call_cache_fn
863
10c2df65
HC
864__armv4_mpu_cache_flush:
865 mov r2, #1
866 mov r3, #0
867 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
868 mov r1, #7 << 5 @ 8 segments
8691: orr r3, r1, #63 << 26 @ 64 entries
8702: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
871 subs r3, r3, #1 << 26
872 bcs 2b @ entries 63 to 0
873 subs r1, r1, #1 << 5
874 bcs 1b @ segments 7 to 0
875
876 teq r2, #0
877 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
878 mcr p15, 0, ip, c7, c10, 4 @ drain WB
879 mov pc, lr
880
28853ac8
PZ
881__fa526_cache_flush:
882 mov r1, #0
883 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
884 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
885 mcr p15, 0, r1, c7, c10, 4 @ drain WB
886 mov pc, lr
10c2df65 887
c76b6b41 888__armv6_mmu_cache_flush:
1da177e4
LT
889 mov r1, #0
890 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
891 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
892 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
893 mcr p15, 0, r1, c7, c10, 4 @ drain WB
894 mov pc, lr
895
7d09e854
CM
896__armv7_mmu_cache_flush:
897 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
898 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
7d09e854 899 mov r10, #0
c30c2f99 900 beq hierarchical
7d09e854
CM
901 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
902 b iflush
903hierarchical:
c30c2f99 904 mcr p15, 0, r10, c7, c10, 5 @ DMB
0e056f20 905 stmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
906 mrc p15, 1, r0, c0, c0, 1 @ read clidr
907 ands r3, r0, #0x7000000 @ extract loc from clidr
908 mov r3, r3, lsr #23 @ left align loc bit field
909 beq finished @ if loc is 0, then no need to clean
910 mov r10, #0 @ start clean at cache level 0
911loop1:
912 add r2, r10, r10, lsr #1 @ work out 3x current cache level
913 mov r1, r0, lsr r2 @ extract cache type bits from clidr
914 and r1, r1, #7 @ mask of the bits for current cache only
915 cmp r1, #2 @ see what cache we have at this level
916 blt skip @ skip if no cache, or just i-cache
917 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
918 mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
919 mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
920 and r2, r1, #7 @ extract the length of the cache lines
921 add r2, r2, #4 @ add 4 (line length offset)
922 ldr r4, =0x3ff
923 ands r4, r4, r1, lsr #3 @ find maximum number on the way size
000b5025 924 clz r5, r4 @ find bit position of way size increment
7d09e854
CM
925 ldr r7, =0x7fff
926 ands r7, r7, r1, lsr #13 @ extract max number of the index size
927loop2:
928 mov r9, r4 @ create working copy of max way size
929loop3:
0e056f20
CM
930 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
931 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
932 THUMB( lsl r6, r9, r5 )
933 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
934 THUMB( lsl r6, r7, r2 )
935 THUMB( orr r11, r11, r6 ) @ factor index number into r11
7d09e854
CM
936 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
937 subs r9, r9, #1 @ decrement the way
938 bge loop3
939 subs r7, r7, #1 @ decrement the index
940 bge loop2
941skip:
942 add r10, r10, #2 @ increment cache number
943 cmp r3, r10
944 bgt loop1
945finished:
0e056f20 946 ldmfd sp!, {r0-r7, r9-r11}
7d09e854
CM
947 mov r10, #0 @ swith back to cache level 0
948 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
7d09e854 949iflush:
c30c2f99 950 mcr p15, 0, r10, c7, c10, 4 @ DSB
7d09e854 951 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
c30c2f99
CM
952 mcr p15, 0, r10, c7, c10, 4 @ DSB
953 mcr p15, 0, r10, c7, c5, 4 @ ISB
7d09e854
CM
954 mov pc, lr
955
15754bf9
NP
956__armv5tej_mmu_cache_flush:
9571: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
958 bne 1b
959 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
960 mcr p15, 0, r0, c7, c10, 4 @ drain WB
961 mov pc, lr
962
c76b6b41 963__armv4_mmu_cache_flush:
1da177e4
LT
964 mov r2, #64*1024 @ default: 32K dcache size (*2)
965 mov r11, #32 @ default: 32 byte line size
966 mrc p15, 0, r3, c0, c0, 1 @ read cache type
98e12b5a 967 teq r3, r9 @ cache ID register present?
1da177e4
LT
968 beq no_cache_id
969 mov r1, r3, lsr #18
970 and r1, r1, #7
971 mov r2, #1024
972 mov r2, r2, lsl r1 @ base dcache size *2
973 tst r3, #1 << 14 @ test M bit
974 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
975 mov r3, r3, lsr #12
976 and r3, r3, #3
977 mov r11, #8
978 mov r11, r11, lsl r3 @ cache line size in bytes
979no_cache_id:
0e056f20
CM
980 mov r1, pc
981 bic r1, r1, #63 @ align to longest cache line
1da177e4 982 add r2, r1, r2
0e056f20
CM
9831:
984 ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
985 THUMB( ldr r3, [r1] ) @ s/w flush D cache
986 THUMB( add r1, r1, r11 )
1da177e4
LT
987 teq r1, r2
988 bne 1b
989
990 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
991 mcr p15, 0, r1, c7, c6, 0 @ flush D cache
992 mcr p15, 0, r1, c7, c10, 4 @ drain WB
993 mov pc, lr
994
c76b6b41 995__armv3_mmu_cache_flush:
10c2df65 996__armv3_mpu_cache_flush:
1da177e4 997 mov r1, #0
63fa7187 998 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1da177e4
LT
999 mov pc, lr
1000
1001/*
1002 * Various debugging routines for printing hex characters and
1003 * memory, which again must be relocatable.
1004 */
1005#ifdef DEBUG
88987ef9 1006 .align 2
1da177e4
LT
1007 .type phexbuf,#object
1008phexbuf: .space 12
1009 .size phexbuf, . - phexbuf
1010
be6f9f00 1011@ phex corrupts {r0, r1, r2, r3}
1da177e4
LT
1012phex: adr r3, phexbuf
1013 mov r2, #0
1014 strb r2, [r3, r1]
10151: subs r1, r1, #1
1016 movmi r0, r3
1017 bmi puts
1018 and r2, r0, #15
1019 mov r0, r0, lsr #4
1020 cmp r2, #10
1021 addge r2, r2, #7
1022 add r2, r2, #'0'
1023 strb r2, [r3, r1]
1024 b 1b
1025
be6f9f00 1026@ puts corrupts {r0, r1, r2, r3}
4e6d488a 1027puts: loadsp r3, r1
1da177e4
LT
10281: ldrb r2, [r0], #1
1029 teq r2, #0
1030 moveq pc, lr
5cd0c344 10312: writeb r2, r3
1da177e4
LT
1032 mov r1, #0x00020000
10333: subs r1, r1, #1
1034 bne 3b
1035 teq r2, #'\n'
1036 moveq r2, #'\r'
1037 beq 2b
1038 teq r0, #0
1039 bne 1b
1040 mov pc, lr
be6f9f00 1041@ putc corrupts {r0, r1, r2, r3}
1da177e4
LT
1042putc:
1043 mov r2, r0
1044 mov r0, #0
4e6d488a 1045 loadsp r3, r1
1da177e4
LT
1046 b 2b
1047
be6f9f00 1048@ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
1da177e4
LT
1049memdump: mov r12, r0
1050 mov r10, lr
1051 mov r11, #0
10522: mov r0, r11, lsl #2
1053 add r0, r0, r12
1054 mov r1, #8
1055 bl phex
1056 mov r0, #':'
1057 bl putc
10581: mov r0, #' '
1059 bl putc
1060 ldr r0, [r12, r11, lsl #2]
1061 mov r1, #8
1062 bl phex
1063 and r0, r11, #7
1064 teq r0, #3
1065 moveq r0, #' '
1066 bleq putc
1067 and r0, r11, #7
1068 add r11, r11, #1
1069 teq r0, #7
1070 bne 1b
1071 mov r0, #'\n'
1072 bl putc
1073 cmp r11, #64
1074 blt 2b
1075 mov pc, r10
1076#endif
1077
92c83ff1 1078 .ltorg
1da177e4
LT
1079reloc_end:
1080
1081 .align
1082 .section ".stack", "w"
1083user_stack: .space 4096
88237c25 1084user_stack_end: