Linux-2.6.12-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86_64 / kernel / head.S
1 /*
2 * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 *
9 * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
10 */
11
12
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <asm/desc.h>
16 #include <asm/segment.h>
17 #include <asm/page.h>
18 #include <asm/msr.h>
19 #include <asm/cache.h>
20
21 /* we are not able to switch in one step to the final KERNEL ADRESS SPACE
22 * because we need identity-mapped pages on setup so define __START_KERNEL to
23 * 0x100000 for this stage
24 *
25 */
26
27 .text
28 .code32
29 .globl startup_32
30 /* %bx: 1 if coming from smp trampoline on secondary cpu */
31 startup_32:
32
33 /*
34 * At this point the CPU runs in 32bit protected mode (CS.D = 1) with
35 * paging disabled and the point of this file is to switch to 64bit
36 * long mode with a kernel mapping for kerneland to jump into the
37 * kernel virtual addresses.
38 * There is no stack until we set one up.
39 */
40
41 /* Initialize the %ds segment register */
42 movl $__KERNEL_DS,%eax
43 movl %eax,%ds
44
45 /* Load new GDT with the 64bit segments using 32bit descriptor */
46 lgdt pGDT32 - __START_KERNEL_map
47
48 /* If the CPU doesn't support CPUID this will double fault.
49 * Unfortunately it is hard to check for CPUID without a stack.
50 */
51
52 /* Check if extended functions are implemented */
53 movl $0x80000000, %eax
54 cpuid
55 cmpl $0x80000000, %eax
56 jbe no_long_mode
57 /* Check if long mode is implemented */
58 mov $0x80000001, %eax
59 cpuid
60 btl $29, %edx
61 jnc no_long_mode
62
63 /*
64 * Prepare for entering 64bits mode
65 */
66
67 /* Enable PAE mode */
68 xorl %eax, %eax
69 btsl $5, %eax
70 movl %eax, %cr4
71
72 /* Setup early boot stage 4 level pagetables */
73 movl $(init_level4_pgt - __START_KERNEL_map), %eax
74 movl %eax, %cr3
75
76 /* Setup EFER (Extended Feature Enable Register) */
77 movl $MSR_EFER, %ecx
78 rdmsr
79
80 /* Enable Long Mode */
81 btsl $_EFER_LME, %eax
82
83 /* Make changes effective */
84 wrmsr
85
86 xorl %eax, %eax
87 btsl $31, %eax /* Enable paging and in turn activate Long Mode */
88 btsl $0, %eax /* Enable protected mode */
89 /* Make changes effective */
90 movl %eax, %cr0
91 /*
92 * At this point we're in long mode but in 32bit compatibility mode
93 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
94 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
95 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
96 */
97 ljmp $__KERNEL_CS, $(startup_64 - __START_KERNEL_map)
98
99 .code64
100 .org 0x100
101 .globl startup_64
102 startup_64:
103 /* We come here either from startup_32
104 * or directly from a 64bit bootloader.
105 * Since we may have come directly from a bootloader we
106 * reload the page tables here.
107 */
108
109 /* Enable PAE mode and PGE */
110 xorq %rax, %rax
111 btsq $5, %rax
112 btsq $7, %rax
113 movq %rax, %cr4
114
115 /* Setup early boot stage 4 level pagetables. */
116 movq $(init_level4_pgt - __START_KERNEL_map), %rax
117 movq %rax, %cr3
118
119 /* Check if nx is implemented */
120 movl $0x80000001, %eax
121 cpuid
122 movl %edx,%edi
123
124 /* Setup EFER (Extended Feature Enable Register) */
125 movl $MSR_EFER, %ecx
126 rdmsr
127
128 /* Enable System Call */
129 btsl $_EFER_SCE, %eax
130
131 /* No Execute supported? */
132 btl $20,%edi
133 jnc 1f
134 btsl $_EFER_NX, %eax
135 1:
136 /* Make changes effective */
137 wrmsr
138
139 /* Setup cr0 */
140 xorq %rax, %rax
141 btsq $31, %rax /* Enable paging */
142 btsq $0, %rax /* Enable protected mode */
143 btsq $1, %rax /* Enable MP */
144 btsq $4, %rax /* Enable ET */
145 btsq $5, %rax /* Enable NE */
146 btsq $16, %rax /* Enable WP */
147 btsq $18, %rax /* Enable AM */
148 /* Make changes effective */
149 movq %rax, %cr0
150
151 /* Setup a boot time stack */
152 movq init_rsp(%rip),%rsp
153
154 /* zero EFLAGS after setting rsp */
155 pushq $0
156 popfq
157
158 /*
159 * We must switch to a new descriptor in kernel space for the GDT
160 * because soon the kernel won't have access anymore to the userspace
161 * addresses where we're currently running on. We have to do that here
162 * because in 32bit we couldn't load a 64bit linear address.
163 */
164 lgdt cpu_gdt_descr
165
166 /*
167 * Setup up a dummy PDA. this is just for some early bootup code
168 * that does in_interrupt()
169 */
170 movl $MSR_GS_BASE,%ecx
171 movq $empty_zero_page,%rax
172 movq %rax,%rdx
173 shrq $32,%rdx
174 wrmsr
175
176 /* set up data segments. actually 0 would do too */
177 movl $__KERNEL_DS,%eax
178 movl %eax,%ds
179 movl %eax,%ss
180 movl %eax,%es
181
182 /* esi is pointer to real mode structure with interesting info.
183 pass it to C */
184 movl %esi, %edi
185
186 /* Finally jump to run C code and to be on real kernel address
187 * Since we are running on identity-mapped space we have to jump
188 * to the full 64bit address , this is only possible as indirect
189 * jump
190 */
191 movq initial_code(%rip),%rax
192 jmp *%rax
193
194 /* SMP bootup changes these two */
195 .globl initial_code
196 initial_code:
197 .quad x86_64_start_kernel
198 .globl init_rsp
199 init_rsp:
200 .quad init_thread_union+THREAD_SIZE-8
201
202 ENTRY(early_idt_handler)
203 xorl %eax,%eax
204 movq 8(%rsp),%rsi # get rip
205 movq (%rsp),%rdx
206 movq %cr2,%rcx
207 leaq early_idt_msg(%rip),%rdi
208 call early_printk
209 1: hlt
210 jmp 1b
211
212 early_idt_msg:
213 .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n"
214
215 .code32
216 ENTRY(no_long_mode)
217 /* This isn't an x86-64 CPU so hang */
218 1:
219 jmp 1b
220
221 .org 0xf00
222 .globl pGDT32
223 pGDT32:
224 .word gdt_end-cpu_gdt_table
225 .long cpu_gdt_table-__START_KERNEL_map
226
227 .org 0xf10
228 ljumpvector:
229 .long startup_64-__START_KERNEL_map
230 .word __KERNEL_CS
231
232 ENTRY(stext)
233 ENTRY(_stext)
234
235 /*
236 * This default setting generates an ident mapping at address 0x100000
237 * and a mapping for the kernel that precisely maps virtual address
238 * 0xffffffff80000000 to physical address 0x000000. (always using
239 * 2Mbyte large pages provided by PAE mode)
240 */
241 .org 0x1000
242 ENTRY(init_level4_pgt)
243 .quad 0x0000000000102007 /* -> level3_ident_pgt */
244 .fill 255,8,0
245 .quad 0x000000000010a007
246 .fill 254,8,0
247 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
248 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
249
250 .org 0x2000
251 ENTRY(level3_ident_pgt)
252 .quad 0x0000000000104007
253 .fill 511,8,0
254
255 .org 0x3000
256 ENTRY(level3_kernel_pgt)
257 .fill 510,8,0
258 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
259 .quad 0x0000000000105007 /* -> level2_kernel_pgt */
260 .fill 1,8,0
261
262 .org 0x4000
263 ENTRY(level2_ident_pgt)
264 /* 40MB for bootup. */
265 .quad 0x0000000000000283
266 .quad 0x0000000000200183
267 .quad 0x0000000000400183
268 .quad 0x0000000000600183
269 .quad 0x0000000000800183
270 .quad 0x0000000000A00183
271 .quad 0x0000000000C00183
272 .quad 0x0000000000E00183
273 .quad 0x0000000001000183
274 .quad 0x0000000001200183
275 .quad 0x0000000001400183
276 .quad 0x0000000001600183
277 .quad 0x0000000001800183
278 .quad 0x0000000001A00183
279 .quad 0x0000000001C00183
280 .quad 0x0000000001E00183
281 .quad 0x0000000002000183
282 .quad 0x0000000002200183
283 .quad 0x0000000002400183
284 .quad 0x0000000002600183
285 /* Temporary mappings for the super early allocator in arch/x86_64/mm/init.c */
286 .globl temp_boot_pmds
287 temp_boot_pmds:
288 .fill 492,8,0
289
290 .org 0x5000
291 ENTRY(level2_kernel_pgt)
292 /* 40MB kernel mapping. The kernel code cannot be bigger than that.
293 When you change this change KERNEL_TEXT_SIZE in page.h too. */
294 /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */
295 .quad 0x0000000000000183
296 .quad 0x0000000000200183
297 .quad 0x0000000000400183
298 .quad 0x0000000000600183
299 .quad 0x0000000000800183
300 .quad 0x0000000000A00183
301 .quad 0x0000000000C00183
302 .quad 0x0000000000E00183
303 .quad 0x0000000001000183
304 .quad 0x0000000001200183
305 .quad 0x0000000001400183
306 .quad 0x0000000001600183
307 .quad 0x0000000001800183
308 .quad 0x0000000001A00183
309 .quad 0x0000000001C00183
310 .quad 0x0000000001E00183
311 .quad 0x0000000002000183
312 .quad 0x0000000002200183
313 .quad 0x0000000002400183
314 .quad 0x0000000002600183
315 /* Module mapping starts here */
316 .fill 492,8,0
317
318 .org 0x6000
319 ENTRY(empty_zero_page)
320
321 .org 0x7000
322 ENTRY(empty_bad_page)
323
324 .org 0x8000
325 ENTRY(empty_bad_pte_table)
326
327 .org 0x9000
328 ENTRY(empty_bad_pmd_table)
329
330 .org 0xa000
331 ENTRY(level3_physmem_pgt)
332 .quad 0x0000000000105007 /* -> level2_kernel_pgt (so that __va works even before pagetable_init) */
333
334 .org 0xb000
335 #ifdef CONFIG_ACPI_SLEEP
336 ENTRY(wakeup_level4_pgt)
337 .quad 0x0000000000102007 /* -> level3_ident_pgt */
338 .fill 255,8,0
339 .quad 0x000000000010a007
340 .fill 254,8,0
341 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
342 .quad 0x0000000000103007 /* -> level3_kernel_pgt */
343 #endif
344
345 .data
346
347 .align 16
348 .globl cpu_gdt_descr
349 cpu_gdt_descr:
350 .word gdt_end-cpu_gdt_table
351 gdt:
352 .quad cpu_gdt_table
353 #ifdef CONFIG_SMP
354 .rept NR_CPUS-1
355 .word 0
356 .quad 0
357 .endr
358 #endif
359
360 /* We need valid kernel segments for data and code in long mode too
361 * IRET will check the segment types kkeil 2000/10/28
362 * Also sysret mandates a special GDT layout
363 */
364
365 .align L1_CACHE_BYTES
366
367 /* The TLS descriptors are currently at a different place compared to i386.
368 Hopefully nobody expects them at a fixed place (Wine?) */
369
370 ENTRY(cpu_gdt_table)
371 .quad 0x0000000000000000 /* NULL descriptor */
372 .quad 0x008f9a000000ffff /* __KERNEL_COMPAT32_CS */
373 .quad 0x00af9a000000ffff /* __KERNEL_CS */
374 .quad 0x00cf92000000ffff /* __KERNEL_DS */
375 .quad 0x00cffa000000ffff /* __USER32_CS */
376 .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
377 .quad 0x00affa000000ffff /* __USER_CS */
378 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
379 .quad 0,0 /* TSS */
380 .quad 0,0 /* LDT */
381 .quad 0,0,0 /* three TLS descriptors */
382 .quad 0x00009a000000ffff /* __KERNEL16_CS - 16bit PM for S3 wakeup. */
383 /* base must be patched for real base address. */
384 gdt_end:
385 /* asm/segment.h:GDT_ENTRIES must match this */
386 /* This should be a multiple of the cache line size */
387 /* GDTs of other CPUs: */
388 .fill (GDT_SIZE * NR_CPUS) - (gdt_end - cpu_gdt_table)
389
390 .align L1_CACHE_BYTES
391 ENTRY(idt_table)
392 .rept 256
393 .quad 0
394 .quad 0
395 .endr
396