From: Glauber de Oliveira Costa Date: Wed, 30 Jan 2008 12:31:31 +0000 (+0100) Subject: x86: provide 64-bit with a load_sp0 function. X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=7818a1e0294debee02d5135e17b89f28b8871887;p=GitHub%2Fmt8127%2Fandroid_kernel_alcatel_ttab.git x86: provide 64-bit with a load_sp0 function. Paravirt guests need to inform the underlying hypervisor whenever the sp0 tss field changes. i386 already has such a function, and we use it for x86_64 too. There's an unnecessary (for 64-bit) msr handling part in the original version, and it is placed around an ifdef. Making no more sense in processor_32.h, it is moved to the common header Signed-off-by: Glauber de Oliveira Costa Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index af56104b73ff..e3a3610ade10 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -639,7 +639,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* * Reload esp0, LDT and the page table pointer: */ - tss->x86_tss.sp0 = next->sp0; + load_sp0(tss, next); /* * Switch DS and ES. diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c index 2ea02a71b644..5bd42ce144da 100644 --- a/arch/x86/kernel/smpboot_64.c +++ b/arch/x86/kernel/smpboot_64.c @@ -614,7 +614,7 @@ do_rest: start_rip = setup_trampoline(); init_rsp = c_idle.idle->thread.sp; - per_cpu(init_tss, cpu).x86_tss.sp0 = init_rsp; + load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); initial_code = start_secondary; clear_tsk_thread_flag(c_idle.idle, TIF_FORK); diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index cede9ad3dc6e..b1ea52156362 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h @@ -193,8 +193,22 @@ static inline void native_set_iopl_mask(unsigned mask) #endif } +static inline void native_load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + tss->x86_tss.sp0 = thread->sp0; +#ifdef CONFIG_X86_32 + /* Only happens when SEP is enabled, no need to test "SEP"arately */ + if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { + tss->x86_tss.ss1 = thread->sysenter_cs; + wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); + } +#endif +} -#ifndef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT +#include +#else #define __cpuid native_cpuid #define paravirt_enabled() 0 @@ -206,6 +220,12 @@ static inline void native_set_iopl_mask(unsigned mask) #define set_debugreg(value, register) \ native_set_debugreg(register, value) +static inline void load_sp0(struct tss_struct *tss, + struct thread_struct *thread) +{ + native_load_sp0(tss, thread); +} + #define set_iopl_mask native_set_iopl_mask #endif /* CONFIG_PARAVIRT */ diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index 57b345bc3c74..53037d1a6ae6 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h @@ -278,26 +278,6 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); #define KSTK_ESP(task) (task_pt_regs(task)->sp) -static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) -{ - tss->x86_tss.sp0 = thread->sp0; - /* This can only happen when SEP is enabled, no need to test "SEP"arately */ - if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { - tss->x86_tss.ss1 = thread->sysenter_cs; - wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); - } -} - -#ifdef CONFIG_PARAVIRT -#include -#else - -static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread) -{ - native_load_sp0(tss, thread); -} -#endif /* CONFIG_PARAVIRT */ - /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" #define GENERIC_NOP2 ".byte 0x89,0xf6\n"