From 4540d3faa7c3fca6a6125448861de0e2e485658b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 23 Apr 2015 12:31:17 +0200 Subject: [PATCH] x86/fpu: Remove 'struct task_struct' usage from __thread_fpu_begin() Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 10 ++++++---- arch/x86/kernel/fpu/core.c | 3 ++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index b1803a656651..44516ad6c890 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -351,11 +351,11 @@ static inline void __thread_fpu_end(struct fpu *fpu) stts(); } -static inline void __thread_fpu_begin(struct task_struct *tsk) +static inline void __thread_fpu_begin(struct fpu *fpu) { if (!use_eager_fpu()) clts(); - __thread_set_has_fpu(&tsk->thread.fpu); + __thread_set_has_fpu(fpu); } static inline void drop_fpu(struct task_struct *tsk) @@ -451,7 +451,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta fpu.preload = 0; else prefetch(new->thread.fpu.state); - __thread_fpu_begin(new); + __thread_fpu_begin(new_fpu); } } return fpu; @@ -505,9 +505,11 @@ static inline int restore_xstate_sig(void __user *buf, int ia32_frame) */ static inline void user_fpu_begin(void) { + struct fpu *fpu = ¤t->thread.fpu; + preempt_disable(); if (!user_has_fpu()) - __thread_fpu_begin(current); + __thread_fpu_begin(fpu); preempt_enable(); } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 4e1f8f1bf493..cf49cd574d32 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -329,6 +329,7 @@ static int fpu__unlazy_stopped(struct task_struct *child) void fpu__restore(void) { struct task_struct *tsk = current; + struct fpu *fpu = &tsk->thread.fpu; if (!tsk_used_math(tsk)) { local_irq_enable(); @@ -347,7 +348,7 @@ void fpu__restore(void) /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */ kernel_fpu_disable(); - __thread_fpu_begin(tsk); + __thread_fpu_begin(fpu); if (unlikely(restore_fpu_checking(tsk))) { fpu_reset_state(tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); -- 2.20.1