x86: fill in missing pv_mmu_ops entries for PAGETABLE_LEVELS >= 3
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / x86 / kernel / trampoline_64.S
CommitLineData
1da177e4
LT
1/*
2 *
3 * Trampoline.S Derived from Setup.S by Linus Torvalds
4 *
5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
90b1c208 6 * 15 Sept 2005 Eric Biederman: 64bit PIC support
1da177e4
LT
7 *
8 * Entry: CS:IP point to the start of our code, we are
9 * in real mode with no stack, but the rest of the
10 * trampoline page to make our stack and everything else
11 * is a mystery.
12 *
13 * In fact we don't actually need a stack so we don't
14 * set one up.
15 *
16 * On entry to trampoline_data, the processor is in real mode
17 * with 16-bit addressing and 16-bit data. CS has some value
18 * and IP is zero. Thus, data addresses need to be absolute
19 * (no relocation) and are taken with regard to r_base.
20 *
90b1c208
VG
21 * With the addition of trampoline_level4_pgt this code can
22 * now enter a 64bit kernel that lives at arbitrary 64bit
23 * physical addresses.
24 *
1da177e4
LT
25 * If you work on this file, check the object module with objdump
26 * --full-contents --reloc to make sure there are no relocation
90b1c208 27 * entries.
1da177e4
LT
28 */
29
30#include <linux/linkage.h>
90b1c208 31#include <asm/pgtable.h>
1da177e4 32#include <asm/page.h>
90b1c208
VG
33#include <asm/msr.h>
34#include <asm/segment.h>
1da177e4 35
121d7bf5
JB
36/* We can free up trampoline after bootup if cpu hotplug is not supported. */
37#ifndef CONFIG_HOTPLUG_CPU
38.section .init.data, "aw", @progbits
39#else
40.section .rodata, "a", @progbits
41#endif
1da177e4
LT
42
43.code16
44
45ENTRY(trampoline_data)
46r_base = .
90b1c208 47 cli # We should be safe anyway
1da177e4
LT
48 wbinvd
49 mov %cs, %ax # Code and data in the same place
50 mov %ax, %ds
90b1c208
VG
51 mov %ax, %es
52 mov %ax, %ss
1da177e4 53
1da177e4
LT
54
55 movl $0xA5A5A5A5, trampoline_data - r_base
56 # write marker for master knows we're running
57
90b1c208
VG
58 # Setup stack
59 movw $(trampoline_stack_end - r_base), %sp
60
61 call verify_cpu # Verify the cpu supports long mode
a4831e08
VG
62 testl %eax, %eax # Check for return code
63 jnz no_longmode
90b1c208
VG
64
65 mov %cs, %ax
66 movzx %ax, %esi # Find the 32bit trampoline location
67 shll $4, %esi
68
69 # Fixup the vectors
70 addl %esi, startup_32_vector - r_base
71 addl %esi, startup_64_vector - r_base
72 addl %esi, tgdt + 2 - r_base # Fixup the gdt pointer
73
983d5dbd
VG
74 /*
75 * GDT tables in non default location kernel can be beyond 16MB and
76 * lgdt will not be able to load the address as in real mode default
77 * operand size is 16bit. Use lgdtl instead to force operand size
78 * to 32 bit.
79 */
80
90b1c208
VG
81 lidtl tidt - r_base # load idt with 0, 0
82 lgdtl tgdt - r_base # load gdt with whatever is appropriate
1da177e4
LT
83
84 xor %ax, %ax
85 inc %ax # protected mode (PE) bit
86 lmsw %ax # into protected mode
90b1c208
VG
87
88 # flush prefetch and jump to startup_32
89 ljmpl *(startup_32_vector - r_base)
90
91 .code32
92 .balign 4
93startup_32:
94 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
95 movl %eax, %ds
96
97 xorl %eax, %eax
98 btsl $5, %eax # Enable PAE mode
99 movl %eax, %cr4
100
101 # Setup trampoline 4 level pagetables
102 leal (trampoline_level4_pgt - r_base)(%esi), %eax
103 movl %eax, %cr3
104
105 movl $MSR_EFER, %ecx
106 movl $(1 << _EFER_LME), %eax # Enable Long Mode
107 xorl %edx, %edx
108 wrmsr
109
110 xorl %eax, %eax
111 btsl $31, %eax # Enable paging and in turn activate Long Mode
112 btsl $0, %eax # Enable protected mode
113 movl %eax, %cr0
114
115 /*
116 * At this point we're in long mode but in 32bit compatibility mode
117 * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
118 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
119 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
120 */
121 ljmp *(startup_64_vector - r_base)(%esi)
122
123 .code64
124 .balign 4
125startup_64:
126 # Now jump into the kernel using virtual addresses
127 movq $secondary_startup_64, %rax
128 jmp *%rax
129
130 .code16
90b1c208
VG
131no_longmode:
132 hlt
133 jmp no_longmode
e0a84f68 134#include "verify_cpu_64.S"
1da177e4
LT
135
136 # Careful these need to be in the same 64K segment as the above;
90b1c208 137tidt:
1da177e4
LT
138 .word 0 # idt limit = 0
139 .word 0, 0 # idt base = 0L
140
90b1c208
VG
141 # Duplicate the global descriptor table
142 # so the kernel can live anywhere
143 .balign 4
144tgdt:
145 .short tgdt_end - tgdt # gdt limit
146 .long tgdt - r_base
147 .short 0
148 .quad 0x00cf9b000000ffff # __KERNEL32_CS
149 .quad 0x00af9b000000ffff # __KERNEL_CS
150 .quad 0x00cf93000000ffff # __KERNEL_DS
151tgdt_end:
152
153 .balign 4
154startup_32_vector:
155 .long startup_32 - r_base
156 .word __KERNEL32_CS, 0
157
158 .balign 4
159startup_64_vector:
160 .long startup_64 - r_base
161 .word __KERNEL_CS, 0
162
163trampoline_stack:
164 .org 0x1000
165trampoline_stack_end:
166ENTRY(trampoline_level4_pgt)
167 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
168 .fill 510,8,0
169 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
1da177e4 170
90b1c208 171ENTRY(trampoline_end)