Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * arch/sh64/kernel/head.S | |
7 | * | |
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | |
9 | * Copyright (C) 2003, 2004 Paul Mundt | |
10 | * | |
11 | * | |
12 | * benedict.gaster@superh.com: 2nd May 2002 | |
13 | * Moved definition of empty_zero_page to its own section allowing | |
14 | * it to be placed at an absolute address known at load time. | |
15 | * | |
16 | * lethal@linux-sh.org: 9th May 2003 | |
17 | * Kill off GLOBAL_NAME() usage. | |
18 | * | |
19 | * lethal@linux-sh.org: 8th May 2004 | |
20 | * Add early SCIF console DTLB mapping. | |
21 | */ | |
1da177e4 | 22 | #include <asm/page.h> |
1da177e4 LT |
23 | #include <asm/cache.h> |
24 | #include <asm/tlb.h> | |
959f7d58 PM |
25 | #include <asm/cpu/registers.h> |
26 | #include <asm/cpu/mmu_context.h> | |
1da177e4 LT |
27 | #include <asm/thread_info.h> |
28 | ||
29 | /* | |
30 | * MMU defines: TLB boundaries. | |
31 | */ | |
32 | ||
33 | #define MMUIR_FIRST ITLB_FIXED | |
34 | #define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP | |
35 | #define MMUIR_STEP TLB_STEP | |
36 | ||
37 | #define MMUDR_FIRST DTLB_FIXED | |
38 | #define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP | |
39 | #define MMUDR_STEP TLB_STEP | |
40 | ||
36763b22 PM |
41 | /* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */ |
42 | #if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1)) | |
43 | #error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb" | |
1da177e4 LT |
44 | #endif |
45 | ||
46 | /* | |
47 | * MMU defines: Fixed TLBs. | |
48 | */ | |
49 | /* Deal safely with the case where the base of RAM is not 512Mb aligned */ | |
50 | ||
51 | #define ALIGN_512M_MASK (0xffffffffe0000000) | |
36763b22 | 52 | #define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK) |
1da177e4 LT |
53 | #define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK) |
54 | ||
55 | #define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE) | |
56 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | |
57 | ||
58 | #define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL) | |
59 | /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */ | |
60 | ||
61 | #define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE | |
62 | /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ | |
63 | #define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL | |
64 | /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */ | |
65 | ||
66 | #ifdef CONFIG_ICACHE_DISABLED | |
67 | #define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */ | |
68 | #else | |
69 | #define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ | |
70 | #endif | |
71 | #define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ | |
72 | ||
73 | #if defined (CONFIG_DCACHE_DISABLED) | |
74 | #define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */ | |
75 | #elif defined (CONFIG_DCACHE_WRITE_THROUGH) | |
76 | #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */ | |
77 | /* WT, invalidate */ | |
78 | #elif defined (CONFIG_DCACHE_WRITE_BACK) | |
79 | #define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */ | |
80 | /* WB, invalidate */ | |
81 | #else | |
82 | #error preprocessor flag CONFIG_DCACHE_... not recognized! | |
83 | #endif | |
84 | ||
85 | #define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ | |
86 | ||
87 | .section .empty_zero_page, "aw" | |
88 | .global empty_zero_page | |
89 | ||
90 | empty_zero_page: | |
91 | .long 1 /* MOUNT_ROOT_RDONLY */ | |
92 | .long 0 /* RAMDISK_FLAGS */ | |
93 | .long 0x0200 /* ORIG_ROOT_DEV */ | |
94 | .long 1 /* LOADER_TYPE */ | |
95 | .long 0x00800000 /* INITRD_START */ | |
96 | .long 0x00800000 /* INITRD_SIZE */ | |
97 | .long 0 | |
98 | ||
99 | .text | |
100 | .balign 4096,0,4096 | |
101 | ||
102 | .section .data, "aw" | |
103 | .balign PAGE_SIZE | |
104 | ||
105 | .section .data, "aw" | |
106 | .balign PAGE_SIZE | |
107 | ||
061854fd PM |
108 | .global mmu_pdtp_cache |
109 | mmu_pdtp_cache: | |
1da177e4 LT |
110 | .space PAGE_SIZE, 0 |
111 | ||
112 | .global empty_bad_page | |
113 | empty_bad_page: | |
114 | .space PAGE_SIZE, 0 | |
115 | ||
116 | .global empty_bad_pte_table | |
117 | empty_bad_pte_table: | |
118 | .space PAGE_SIZE, 0 | |
119 | ||
120 | .global fpu_in_use | |
121 | fpu_in_use: .quad 0 | |
122 | ||
123 | ||
56982002 | 124 | .section .text.head, "ax" |
1da177e4 LT |
125 | .balign L1_CACHE_BYTES |
126 | /* | |
127 | * Condition at the entry of __stext: | |
128 | * . Reset state: | |
129 | * . SR.FD = 1 (FPU disabled) | |
130 | * . SR.BL = 1 (Exceptions disabled) | |
131 | * . SR.MD = 1 (Privileged Mode) | |
132 | * . SR.MMU = 0 (MMU Disabled) | |
133 | * . SR.CD = 0 (CTC User Visible) | |
134 | * . SR.IMASK = Undefined (Interrupt Mask) | |
135 | * | |
136 | * Operations supposed to be performed by __stext: | |
137 | * . prevent speculative fetch onto device memory while MMU is off | |
138 | * . reflect as much as possible SH5 ABI (r15, r26, r27, r18) | |
139 | * . first, save CPU state and set it to something harmless | |
140 | * . any CPU detection and/or endianness settings (?) | |
141 | * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD | |
142 | * . set initial TLB entries for cached and uncached regions | |
143 | * (no fine granularity paging) | |
144 | * . set initial cache state | |
145 | * . enable MMU and caches | |
146 | * . set CPU to a consistent state | |
147 | * . registers (including stack pointer and current/KCR0) | |
148 | * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR | |
149 | * at this stage. This is all to later Linux initialization steps. | |
150 | * . initialize FPU | |
151 | * . clear BSS | |
152 | * . jump into start_kernel() | |
153 | * . be prepared to hopeless start_kernel() returns. | |
154 | * | |
155 | */ | |
156 | .global _stext | |
157 | _stext: | |
158 | /* | |
159 | * Prevent speculative fetch on device memory due to | |
160 | * uninitialized target registers. | |
161 | */ | |
162 | ptabs/u ZERO, tr0 | |
163 | ptabs/u ZERO, tr1 | |
164 | ptabs/u ZERO, tr2 | |
165 | ptabs/u ZERO, tr3 | |
166 | ptabs/u ZERO, tr4 | |
167 | ptabs/u ZERO, tr5 | |
168 | ptabs/u ZERO, tr6 | |
169 | ptabs/u ZERO, tr7 | |
170 | synci | |
171 | ||
172 | /* | |
173 | * Read/Set CPU state. After this block: | |
174 | * r29 = Initial SR | |
175 | */ | |
176 | getcon SR, r29 | |
177 | movi SR_HARMLESS, r20 | |
178 | putcon r20, SR | |
179 | ||
180 | /* | |
181 | * Initialize EMI/LMI. To Be Done. | |
182 | */ | |
183 | ||
184 | /* | |
185 | * CPU detection and/or endianness settings (?). To Be Done. | |
186 | * Pure PIC code here, please ! Just save state into r30. | |
187 | * After this block: | |
188 | * r30 = CPU type/Platform Endianness | |
189 | */ | |
190 | ||
191 | /* | |
192 | * Set initial TLB entries for cached and uncached regions. | |
193 | * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! | |
194 | */ | |
195 | /* Clear ITLBs */ | |
196 | pta clear_ITLB, tr1 | |
197 | movi MMUIR_FIRST, r21 | |
198 | movi MMUIR_END, r22 | |
199 | clear_ITLB: | |
200 | putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ | |
201 | addi r21, MMUIR_STEP, r21 | |
202 | bne r21, r22, tr1 | |
203 | ||
204 | /* Clear DTLBs */ | |
205 | pta clear_DTLB, tr1 | |
206 | movi MMUDR_FIRST, r21 | |
207 | movi MMUDR_END, r22 | |
208 | clear_DTLB: | |
209 | putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ | |
210 | addi r21, MMUDR_STEP, r21 | |
211 | bne r21, r22, tr1 | |
212 | ||
213 | /* Map one big (512Mb) page for ITLB */ | |
214 | movi MMUIR_FIRST, r21 | |
215 | movi MMUIR_TEXT_L, r22 /* PTEL first */ | |
216 | add.l r22, r63, r22 /* Sign extend */ | |
217 | putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ | |
218 | movi MMUIR_TEXT_H, r22 /* PTEH last */ | |
219 | add.l r22, r63, r22 /* Sign extend */ | |
220 | putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ | |
221 | ||
222 | /* Map one big CACHED (512Mb) page for DTLB */ | |
223 | movi MMUDR_FIRST, r21 | |
224 | movi MMUDR_CACHED_L, r22 /* PTEL first */ | |
225 | add.l r22, r63, r22 /* Sign extend */ | |
226 | putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ | |
227 | movi MMUDR_CACHED_H, r22 /* PTEH last */ | |
228 | add.l r22, r63, r22 /* Sign extend */ | |
229 | putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ | |
230 | ||
231 | #ifdef CONFIG_EARLY_PRINTK | |
232 | /* | |
233 | * Setup a DTLB translation for SCIF phys. | |
234 | */ | |
235 | addi r21, MMUDR_STEP, r21 | |
236 | movi 0x0a03, r22 /* SCIF phys */ | |
237 | shori 0x0148, r22 | |
238 | putcfg r21, 1, r22 /* PTEL first */ | |
239 | movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ | |
240 | shori 0x0003, r22 | |
241 | putcfg r21, 0, r22 /* PTEH last */ | |
242 | #endif | |
243 | ||
244 | /* | |
245 | * Set cache behaviours. | |
246 | */ | |
247 | /* ICache */ | |
248 | movi ICCR_BASE, r21 | |
249 | movi ICCR0_INIT_VAL, r22 | |
250 | movi ICCR1_INIT_VAL, r23 | |
251 | putcfg r21, ICCR_REG0, r22 | |
252 | putcfg r21, ICCR_REG1, r23 | |
253 | ||
254 | /* OCache */ | |
255 | movi OCCR_BASE, r21 | |
256 | movi OCCR0_INIT_VAL, r22 | |
257 | movi OCCR1_INIT_VAL, r23 | |
258 | putcfg r21, OCCR_REG0, r22 | |
259 | putcfg r21, OCCR_REG1, r23 | |
260 | ||
261 | ||
262 | /* | |
263 | * Enable Caches and MMU. Do the first non-PIC jump. | |
264 | * Now head.S global variables, constants and externs | |
265 | * can be used. | |
266 | */ | |
267 | getcon SR, r21 | |
268 | movi SR_ENABLE_MMU, r22 | |
269 | or r21, r22, r21 | |
270 | putcon r21, SSR | |
271 | movi hyperspace, r22 | |
272 | ori r22, 1, r22 /* Make it SHmedia, not required but..*/ | |
273 | putcon r22, SPC | |
274 | synco | |
275 | rte /* And now go into the hyperspace ... */ | |
276 | hyperspace: /* ... that's the next instruction ! */ | |
277 | ||
278 | /* | |
279 | * Set CPU to a consistent state. | |
280 | * r31 = FPU support flag | |
281 | * tr0/tr7 in use. Others give a chance to loop somewhere safe | |
282 | */ | |
283 | movi start_kernel, r32 | |
284 | ori r32, 1, r32 | |
285 | ||
286 | ptabs r32, tr0 /* r32 = _start_kernel address */ | |
287 | pta/u hopeless, tr1 | |
288 | pta/u hopeless, tr2 | |
289 | pta/u hopeless, tr3 | |
290 | pta/u hopeless, tr4 | |
291 | pta/u hopeless, tr5 | |
292 | pta/u hopeless, tr6 | |
293 | pta/u hopeless, tr7 | |
294 | gettr tr1, r28 /* r28 = hopeless address */ | |
295 | ||
296 | /* Set initial stack pointer */ | |
297 | movi init_thread_union, SP | |
298 | putcon SP, KCR0 /* Set current to init_task */ | |
299 | movi THREAD_SIZE, r22 /* Point to the end */ | |
300 | add SP, r22, SP | |
301 | ||
302 | /* | |
303 | * Initialize FPU. | |
304 | * Keep FPU flag in r31. After this block: | |
305 | * r31 = FPU flag | |
306 | */ | |
307 | movi fpu_in_use, r31 /* Temporary */ | |
308 | ||
309 | #ifdef CONFIG_SH_FPU | |
310 | getcon SR, r21 | |
311 | movi SR_ENABLE_FPU, r22 | |
312 | and r21, r22, r22 | |
313 | putcon r22, SR /* Try to enable */ | |
314 | getcon SR, r22 | |
315 | xor r21, r22, r21 | |
316 | shlri r21, 15, r21 /* Supposedly 0/1 */ | |
317 | st.q r31, 0 , r21 /* Set fpu_in_use */ | |
318 | #else | |
319 | movi 0, r21 | |
320 | st.q r31, 0 , r21 /* Set fpu_in_use */ | |
321 | #endif | |
322 | or r21, ZERO, r31 /* Set FPU flag at last */ | |
323 | ||
324 | #ifndef CONFIG_SH_NO_BSS_INIT | |
325 | /* Don't clear BSS if running on slow platforms such as an RTL simulation, | |
326 | remote memory via SHdebug link, etc. For these the memory can be guaranteed | |
327 | to be all zero on boot anyway. */ | |
328 | /* | |
329 | * Clear bss | |
330 | */ | |
331 | pta clear_quad, tr1 | |
332 | movi __bss_start, r22 | |
333 | movi _end, r23 | |
334 | clear_quad: | |
335 | st.q r22, 0, ZERO | |
336 | addi r22, 8, r22 | |
337 | bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */ | |
338 | #endif | |
339 | pta/u hopeless, tr1 | |
340 | ||
341 | /* Say bye to head.S but be prepared to wrongly get back ... */ | |
342 | blink tr0, LINK | |
343 | ||
344 | /* If we ever get back here through LINK/tr1-tr7 */ | |
345 | pta/u hopeless, tr7 | |
346 | ||
347 | hopeless: | |
348 | /* | |
349 | * Something's badly wrong here. Loop endlessly, | |
350 | * there's nothing more we can do about it. | |
351 | * | |
352 | * Note on hopeless: it can be jumped into invariably | |
353 | * before or after jumping into hyperspace. The only | |
354 | * requirement is to be PIC called (PTA) before and | |
355 | * any way (PTA/PTABS) after. According to Virtual | |
356 | * to Physical mapping a simulator/emulator can easily | |
357 | * tell where we came here from just looking at hopeless | |
358 | * (PC) address. | |
359 | * | |
360 | * For debugging purposes: | |
361 | * (r28) hopeless/loop address | |
362 | * (r29) Original SR | |
363 | * (r30) CPU type/Platform endianness | |
364 | * (r31) FPU Support | |
365 | * (r32) _start_kernel address | |
366 | */ | |
367 | blink tr7, ZERO |