* enable), so that any CPU's that boot up
* after us can get the correct flags.
*/
-extern unsigned long mmu_cr4_features;
+extern unsigned long mmu_cr4_features;
+extern u32 *trampoline_cr4_features;
static inline void set_in_cr4(unsigned long mask)
{
unsigned long cr4;
mmu_cr4_features |= mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
unsigned long cr4;
mmu_cr4_features &= ~mask;
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
#ifdef CONFIG_X86_32
u32 machine_real_restart_asm;
#endif
-} __attribute__((__packed__));
+};
/* This must match data at trampoline_32/64.S */
struct trampoline_header {
#ifdef CONFIG_X86_32
u32 start;
+ u16 gdt_pad;
u16 gdt_limit;
u32 gdt_base;
#else
u64 start;
+ u32 cr4;
+ u32 efer_low;
+ u32 efer_high;
#endif
-} __attribute__((__packed__));
+};
extern struct real_mode_header *real_mode_header;
extern unsigned char real_mode_blob_end[];
#include <asm/realmode.h>
struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
void __init setup_real_mode(void)
{
trampoline_header->gdt_limit = __BOOT_DS + 7;
trampoline_header->gdt_base = __pa(boot_gdt);
#else
+ if (rdmsr_safe(MSR_EFER, &trampoline_header->efer_low,
+ &trampoline_header->efer_high))
+ BUG();
+
trampoline_header->start = (u64) secondary_startup_64;
+ trampoline_cr4_features = &trampoline_header->cr4;
+ *trampoline_cr4_features = read_cr4();
+
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
if (boot_cpu_data.cpuid_level >= 0) {
/* A CPU has %cr4 if and only if it has CPUID */
mmu_cr4_features = read_cr4();
+ if (trampoline_cr4_features)
+ *trampoline_cr4_features = mmu_cr4_features;
}
#ifdef CONFIG_X86_32
.section ".header", "a"
+ .balign 16
GLOBAL(real_mode_header)
.long pa_text_start
.long pa_ro_end
#include "realmode.h"
.text
- .balign PAGE_SIZE
.code16
+ .balign PAGE_SIZE
ENTRY(trampoline_start)
cli # We should be safe anyway
wbinvd
* to 32 bit.
*/
- lidtl tidt # load idt with 0, 0
- lgdtl tgdt # load gdt with whatever is appropriate
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
movw $__KERNEL_DS, %dx # Data segment descriptor
movl %edx, %fs
movl %edx, %gs
- movl $X86_CR4_PAE, %eax
+ movl pa_tr_cr4, %eax
movl %eax, %cr4 # Enable PAE mode
# Setup trampoline 4 level pagetables
movl $pa_trampoline_pgd, %eax
movl %eax, %cr3
+ # Set up EFER
+ movl pa_tr_efer, %eax
+ movl pa_tr_efer + 4, %edx
movl $MSR_EFER, %ecx
- movl $((1 << _EFER_LME) | (1 << _EFER_NX)), %eax # Enable Long Mode
- xorl %edx, %edx
wrmsr
# Enable paging and in turn activate Long Mode
# Now jump into the kernel using virtual addresses
jmpq *tr_start(%rip)
- .section ".rodata","a"
- .balign 16
-tidt:
- .word 0 # idt limit = 0
- .word 0, 0 # idt base = 0L
-
- # Duplicate the global descriptor table
- # so the kernel can live anywhere
- .balign 16
- .globl tgdt
-tgdt:
- .short tgdt_end - tgdt - 1 # gdt limit
- .long pa_tgdt
- .short 0
- .quad 0x00cf9b000000ffff # __KERNEL32_CS
- .quad 0x00af9b000000ffff # __KERNEL_CS
- .quad 0x00cf93000000ffff # __KERNEL_DS
-tgdt_end:
-
#include "trampoline_common.S"
.section ".rodata","a"
+#ifdef CONFIG_X86_64
+ # Duplicate the global descriptor table
+ # so the kernel can live anywhere
+ .balign 16
+ .globl tr_gdt
+tr_gdt:
+ .short tr_gdt_end - tr_gdt - 1 # gdt limit
+ .long pa_tr_gdt
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+ .quad 0x00af9b000000ffff # __KERNEL_CS
+ .quad 0x00cf93000000ffff # __KERNEL_DS
+tr_gdt_end:
+#endif
+
.balign 4
tr_idt: .fill 1, 6, 0
.balign 4
GLOBAL(trampoline_status) .space 4
+ .balign 8
GLOBAL(trampoline_header)
#ifdef CONFIG_X86_32
tr_start: .space 4
+ tr_gdt_pad: .space 2
tr_gdt: .space 6
#else
tr_start: .space 8
+ GLOBAL(tr_cr4) .space 4
+ GLOBAL(tr_efer) .space 8
#endif
END(trampoline_header)