Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * X86-64 specific CPU setup. | |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen. | |
5 | * See setup.c for older changelog. | |
6 | * $Id: setup64.c,v 1.12 2002/03/21 10:09:17 ak Exp $ | |
7 | */ | |
8 | #include <linux/config.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/string.h> | |
13 | #include <linux/bootmem.h> | |
14 | #include <linux/bitops.h> | |
a940199f | 15 | #include <linux/module.h> |
f9ba7053 | 16 | #include <asm/bootsetup.h> |
1da177e4 LT |
17 | #include <asm/pda.h> |
18 | #include <asm/pgtable.h> | |
19 | #include <asm/processor.h> | |
20 | #include <asm/desc.h> | |
21 | #include <asm/atomic.h> | |
22 | #include <asm/mmu_context.h> | |
23 | #include <asm/smp.h> | |
24 | #include <asm/i387.h> | |
25 | #include <asm/percpu.h> | |
1da177e4 | 26 | #include <asm/proto.h> |
a940199f | 27 | #include <asm/sections.h> |
1da177e4 | 28 | |
f9ba7053 | 29 | char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; |
1da177e4 | 30 | |
e6982c67 | 31 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
1da177e4 LT |
32 | |
33 | struct x8664_pda cpu_pda[NR_CPUS] __cacheline_aligned; | |
34 | ||
1da177e4 LT |
35 | struct desc_ptr idt_descr = { 256 * 16, (unsigned long) idt_table }; |
36 | ||
37 | char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned"))); | |
38 | ||
6c231b7b | 39 | unsigned long __supported_pte_mask __read_mostly = ~0UL; |
1da177e4 LT |
40 | static int do_not_nx __initdata = 0; |
41 | ||
42 | /* noexec=on|off | |
43 | Control non executable mappings for 64bit processes. | |
44 | ||
45 | on Enable(default) | |
46 | off Disable | |
47 | */ | |
48 | int __init nonx_setup(char *str) | |
49 | { | |
50 | if (!strncmp(str, "on", 2)) { | |
51 | __supported_pte_mask |= _PAGE_NX; | |
52 | do_not_nx = 0; | |
53 | } else if (!strncmp(str, "off", 3)) { | |
54 | do_not_nx = 1; | |
55 | __supported_pte_mask &= ~_PAGE_NX; | |
56 | } | |
57 | return 0; | |
58 | } | |
59 | __setup("noexec=", nonx_setup); /* parsed early actually */ | |
60 | ||
61 | int force_personality32 = READ_IMPLIES_EXEC; | |
62 | ||
63 | /* noexec32=on|off | |
64 | Control non executable heap for 32bit processes. | |
65 | To control the stack too use noexec=off | |
66 | ||
67 | on PROT_READ does not imply PROT_EXEC for 32bit processes | |
68 | off PROT_READ implies PROT_EXEC (default) | |
69 | */ | |
70 | static int __init nonx32_setup(char *str) | |
71 | { | |
72 | if (!strcmp(str, "on")) | |
73 | force_personality32 &= ~READ_IMPLIES_EXEC; | |
74 | else if (!strcmp(str, "off")) | |
75 | force_personality32 |= READ_IMPLIES_EXEC; | |
76 | return 0; | |
77 | } | |
78 | __setup("noexec32=", nonx32_setup); | |
79 | ||
80 | /* | |
81 | * Great future plan: | |
82 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
83 | * Always point %gs to its beginning | |
84 | */ | |
85 | void __init setup_per_cpu_areas(void) | |
86 | { | |
87 | int i; | |
88 | unsigned long size; | |
89 | ||
90 | /* Copy section for each CPU (we discard the original) */ | |
91 | size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES); | |
92 | #ifdef CONFIG_MODULES | |
93 | if (size < PERCPU_ENOUGH_ROOM) | |
94 | size = PERCPU_ENOUGH_ROOM; | |
95 | #endif | |
96 | ||
e99b861a | 97 | for_each_cpu_mask (i, cpu_possible_map) { |
a940199f | 98 | char *ptr; |
1da177e4 LT |
99 | |
100 | if (!NODE_DATA(cpu_to_node(i))) { | |
101 | printk("cpu with no node %d, num_online_nodes %d\n", | |
102 | i, num_online_nodes()); | |
103 | ptr = alloc_bootmem(size); | |
104 | } else { | |
105 | ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size); | |
106 | } | |
107 | if (!ptr) | |
108 | panic("Cannot allocate cpu data for CPU %d\n", i); | |
109 | cpu_pda[i].data_offset = ptr - __per_cpu_start; | |
110 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | |
111 | } | |
112 | } | |
113 | ||
114 | void pda_init(int cpu) | |
115 | { | |
116 | struct x8664_pda *pda = &cpu_pda[cpu]; | |
117 | ||
118 | /* Setup up data that may be needed in __get_free_pages early */ | |
119 | asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0)); | |
120 | wrmsrl(MSR_GS_BASE, cpu_pda + cpu); | |
121 | ||
1da177e4 LT |
122 | pda->cpunumber = cpu; |
123 | pda->irqcount = -1; | |
124 | pda->kernelstack = | |
125 | (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE; | |
126 | pda->active_mm = &init_mm; | |
127 | pda->mmu_state = 0; | |
128 | ||
129 | if (cpu == 0) { | |
130 | /* others are initialized in smpboot.c */ | |
131 | pda->pcurrent = &init_task; | |
132 | pda->irqstackptr = boot_cpu_stack; | |
133 | } else { | |
134 | pda->irqstackptr = (char *) | |
135 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | |
136 | if (!pda->irqstackptr) | |
137 | panic("cannot allocate irqstack for cpu %d", cpu); | |
138 | } | |
139 | ||
140 | asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); | |
141 | ||
142 | pda->irqstackptr += IRQSTACKSIZE-64; | |
143 | } | |
144 | ||
145 | char boot_exception_stacks[N_EXCEPTION_STACKS * EXCEPTION_STKSZ] | |
146 | __attribute__((section(".bss.page_aligned"))); | |
147 | ||
148 | /* May not be marked __init: used by software suspend */ | |
149 | void syscall_init(void) | |
150 | { | |
151 | /* | |
152 | * LSTAR and STAR live in a bit strange symbiosis. | |
153 | * They both write to the same internal register. STAR allows to set CS/DS | |
154 | * but only a 32bit target. LSTAR sets the 64bit rip. | |
155 | */ | |
156 | wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32); | |
157 | wrmsrl(MSR_LSTAR, system_call); | |
158 | ||
159 | #ifdef CONFIG_IA32_EMULATION | |
160 | syscall32_cpu_init (); | |
161 | #endif | |
162 | ||
163 | /* Flags to clear on syscall */ | |
164 | wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000); | |
165 | } | |
166 | ||
e6982c67 | 167 | void __cpuinit check_efer(void) |
1da177e4 LT |
168 | { |
169 | unsigned long efer; | |
170 | ||
171 | rdmsrl(MSR_EFER, efer); | |
172 | if (!(efer & EFER_NX) || do_not_nx) { | |
173 | __supported_pte_mask &= ~_PAGE_NX; | |
174 | } | |
175 | } | |
176 | ||
177 | /* | |
178 | * cpu_init() initializes state that is per-CPU. Some data is already | |
179 | * initialized (naturally) in the bootstrap process, such as the GDT | |
180 | * and IDT. We reload them nevertheless, this function acts as a | |
181 | * 'CPU state barrier', nothing should get across. | |
182 | * A lot of state is already set up in PDA init. | |
183 | */ | |
e6982c67 | 184 | void __cpuinit cpu_init (void) |
1da177e4 | 185 | { |
1da177e4 | 186 | int cpu = stack_smp_processor_id(); |
1da177e4 LT |
187 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
188 | unsigned long v; | |
189 | char *estacks = NULL; | |
190 | struct task_struct *me; | |
191 | int i; | |
192 | ||
193 | /* CPU 0 is initialised in head64.c */ | |
194 | if (cpu != 0) { | |
195 | pda_init(cpu); | |
196 | } else | |
197 | estacks = boot_exception_stacks; | |
198 | ||
199 | me = current; | |
200 | ||
201 | if (cpu_test_and_set(cpu, cpu_initialized)) | |
202 | panic("CPU#%d already initialized!\n", cpu); | |
203 | ||
204 | printk("Initializing CPU#%d\n", cpu); | |
205 | ||
a940199f | 206 | clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); |
1da177e4 LT |
207 | |
208 | /* | |
209 | * Initialize the per-CPU GDT with the boot GDT, | |
210 | * and set up the GDT descriptor: | |
211 | */ | |
212 | if (cpu) { | |
213 | memcpy(cpu_gdt_table[cpu], cpu_gdt_table[0], GDT_SIZE); | |
214 | } | |
215 | ||
216 | cpu_gdt_descr[cpu].size = GDT_SIZE; | |
217 | cpu_gdt_descr[cpu].address = (unsigned long)cpu_gdt_table[cpu]; | |
218 | asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); | |
219 | asm volatile("lidt %0" :: "m" (idt_descr)); | |
220 | ||
221 | memcpy(me->thread.tls_array, cpu_gdt_table[cpu], GDT_ENTRY_TLS_ENTRIES * 8); | |
222 | ||
223 | /* | |
224 | * Delete NT | |
225 | */ | |
226 | ||
227 | asm volatile("pushfq ; popq %%rax ; btr $14,%%rax ; pushq %%rax ; popfq" ::: "eax"); | |
228 | ||
229 | syscall_init(); | |
230 | ||
231 | wrmsrl(MSR_FS_BASE, 0); | |
232 | wrmsrl(MSR_KERNEL_GS_BASE, 0); | |
233 | barrier(); | |
234 | ||
235 | check_efer(); | |
236 | ||
237 | /* | |
238 | * set up and load the per-CPU TSS | |
239 | */ | |
240 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | |
241 | if (cpu) { | |
242 | estacks = (char *)__get_free_pages(GFP_ATOMIC, | |
243 | EXCEPTION_STACK_ORDER); | |
244 | if (!estacks) | |
245 | panic("Cannot allocate exception stack %ld %d\n", | |
246 | v, cpu); | |
247 | } | |
248 | estacks += EXCEPTION_STKSZ; | |
249 | t->ist[v] = (unsigned long)estacks; | |
250 | } | |
251 | ||
252 | t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | |
253 | /* | |
254 | * <= is required because the CPU will access up to | |
255 | * 8 bits beyond the end of the IO permission bitmap. | |
256 | */ | |
257 | for (i = 0; i <= IO_BITMAP_LONGS; i++) | |
258 | t->io_bitmap[i] = ~0UL; | |
259 | ||
260 | atomic_inc(&init_mm.mm_count); | |
261 | me->active_mm = &init_mm; | |
262 | if (me->mm) | |
263 | BUG(); | |
264 | enter_lazy_tlb(&init_mm, me); | |
265 | ||
266 | set_tss_desc(cpu, t); | |
267 | load_TR_desc(); | |
268 | load_LDT(&init_mm.context); | |
269 | ||
270 | /* | |
271 | * Clear all 6 debug registers: | |
272 | */ | |
273 | ||
274 | set_debug(0UL, 0); | |
275 | set_debug(0UL, 1); | |
276 | set_debug(0UL, 2); | |
277 | set_debug(0UL, 3); | |
278 | set_debug(0UL, 6); | |
279 | set_debug(0UL, 7); | |
280 | ||
281 | fpu_init(); | |
282 | } |