From: Ingo Molnar Date: Mon, 25 May 2015 09:27:46 +0000 (+0200) Subject: x86/fpu: Remove error return values from copy_kernel_to_*regs() functions X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=9ccc27a5d297503e485373b69688d038a1d8e662;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git x86/fpu: Remove error return values from copy_kernel_to_*regs() functions None of the copy_kernel_to_*regs() FPU register copying functions are supposed to fail, and all of them have debugging checks that enforce this. Remove their return values and simplify their call sites, which have redundant error checks and error handling code paths. Cc: Andy Lutomirski Cc: Bobby Powers Cc: Borislav Petkov Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 6193b7a9cf00..da71d41227ff 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -141,7 +141,7 @@ static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); } -static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) +static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) { int err; @@ -157,8 +157,6 @@ static inline int copy_kernel_to_fxregs(struct fxregs_state *fx) } /* Copying from a kernel buffer to FPU registers should never fail: */ WARN_ON_FPU(err); - - return err; } static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) @@ -173,13 +171,11 @@ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) "m" (*fx)); } -static inline int copy_kernel_to_fregs(struct fregs_state *fx) +static inline void copy_kernel_to_fregs(struct fregs_state *fx) { int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); WARN_ON_FPU(err); - - return err; } static inline int copy_user_to_fregs(struct fregs_state __user *fx) @@ -450,20 +446,19 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) return 0; } -static inline int __copy_kernel_to_fpregs(struct fpu *fpu) +static inline void __copy_kernel_to_fpregs(struct fpu *fpu) { if (use_xsave()) { copy_kernel_to_xregs(&fpu->state.xsave, -1); - return 0; } else { if (use_fxsr()) - return copy_kernel_to_fxregs(&fpu->state.fxsave); + copy_kernel_to_fxregs(&fpu->state.fxsave); else - return copy_kernel_to_fregs(&fpu->state.fsave); + copy_kernel_to_fregs(&fpu->state.fsave); } } -static inline int copy_kernel_to_fpregs(struct fpu *fpu) +static inline void copy_kernel_to_fpregs(struct fpu *fpu) { /* * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is @@ -478,7 +473,7 @@ static inline int copy_kernel_to_fpregs(struct fpu *fpu) : : [addr] "m" (fpu->fpregs_active)); } - return __copy_kernel_to_fpregs(fpu); + __copy_kernel_to_fpregs(fpu); } extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); @@ -646,12 +641,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) */ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) { - if (fpu_switch.preload) { - if (unlikely(copy_kernel_to_fpregs(new_fpu))) { - WARN_ON_FPU(1); - fpu__clear(new_fpu); - } - } + if (fpu_switch.preload) + copy_kernel_to_fpregs(new_fpu); } /* diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index e0e0ee565dc3..8470df44c06d 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -126,12 +126,10 @@ void __kernel_fpu_end(void) { struct fpu *fpu = ¤t->thread.fpu; - if (fpu->fpregs_active) { - if (WARN_ON_FPU(copy_kernel_to_fpregs(fpu))) - fpu__clear(fpu); - } else { + if (fpu->fpregs_active) + copy_kernel_to_fpregs(fpu); + else __fpregs_deactivate_hw(); - } kernel_fpu_enable(); } @@ -370,14 +368,8 @@ void fpu__restore(struct fpu *fpu) /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); fpregs_activate(fpu); - if (unlikely(copy_kernel_to_fpregs(fpu))) { - /* Copying the kernel state to FPU registers should never fail: */ - WARN_ON_FPU(1); - fpu__clear(fpu); - force_sig_info(SIGSEGV, SEND_SIG_PRIV, current); - } else { - fpu->counter++; - } + copy_kernel_to_fpregs(fpu); + fpu->counter++; kernel_fpu_enable(); } EXPORT_SYMBOL_GPL(fpu__restore);