From af2d94fddcf41e879908b35a8a5308fb94e989c5 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 23 Apr 2015 17:34:20 +0200 Subject: [PATCH] x86/fpu: Use 'struct fpu' in fpu_reset_state() Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/fpu-internal.h | 6 ++---- arch/x86/kernel/fpu/core.c | 7 +++---- arch/x86/kernel/fpu/xsave.c | 4 ++-- arch/x86/kernel/signal.c | 2 +- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 2d7934e4e394..579f7d0a399d 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -380,10 +380,8 @@ static inline void restore_init_xstate(void) * Reset the FPU state in the eager case and drop it in the lazy case (later use * will reinit it). */ -static inline void fpu_reset_state(struct task_struct *tsk) +static inline void fpu_reset_state(struct fpu *fpu) { - struct fpu *fpu = &tsk->thread.fpu; - if (!use_eager_fpu()) drop_fpu(fpu); else @@ -460,7 +458,7 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) if (fpu.preload) { if (unlikely(restore_fpu_checking(new_fpu))) - fpu_reset_state(new); + fpu_reset_state(new_fpu); } } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 1ecd25028079..41c92897f574 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -112,12 +112,11 @@ EXPORT_SYMBOL(__kernel_fpu_begin); void __kernel_fpu_end(void) { - struct task_struct *me = current; - struct fpu *fpu = &me->thread.fpu; + struct fpu *fpu = ¤t->thread.fpu; if (fpu->has_fpu) { if (WARN_ON(restore_fpu_checking(fpu))) - fpu_reset_state(me); + fpu_reset_state(fpu); } else if (!use_eager_fpu()) { stts(); } @@ -371,7 +370,7 @@ void fpu__restore(void) kernel_fpu_disable(); __thread_fpu_begin(fpu); if (unlikely(restore_fpu_checking(fpu))) { - fpu_reset_state(tsk); + fpu_reset_state(fpu); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); } else { tsk->thread.fpu.counter++; diff --git a/arch/x86/kernel/fpu/xsave.c b/arch/x86/kernel/fpu/xsave.c index 049dc619481d..3953cbf8d7e7 100644 --- a/arch/x86/kernel/fpu/xsave.c +++ b/arch/x86/kernel/fpu/xsave.c @@ -343,7 +343,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) config_enabled(CONFIG_IA32_EMULATION)); if (!buf) { - fpu_reset_state(tsk); + fpu_reset_state(fpu); return 0; } @@ -417,7 +417,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) */ user_fpu_begin(); if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { - fpu_reset_state(tsk); + fpu_reset_state(fpu); return -1; } } diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 20a9d355af59..bcb853e44d30 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) * Ensure the signal handler starts with the new fpu state. */ if (fpu->fpstate_active) - fpu_reset_state(current); + fpu_reset_state(fpu); } signal_setup_done(failed, ksig, stepping); } -- 2.20.1