From: Ingo Molnar Date: Wed, 29 Apr 2015 18:10:43 +0000 (+0200) Subject: x86/fpu: Rename restore_fpu_checking() to copy_fpstate_to_fpregs() X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=0e75c54f1703e83e6cdf239491bf7294f6c34777;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git x86/fpu: Rename restore_fpu_checking() to copy_fpstate_to_fpregs() fpu_restore_checking() is a helper function of restore_fpu_checking(), but this is not apparent from the naming. Both copy fpstate contents to fpregs, while the fuller variant does a full copy without leaking information. So rename them to: copy_fpstate_to_fpregs() __copy_fpstate_to_fpregs() Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Dave Hansen Cc: Fenghua Yu Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 31bfda818f30..c09aea145e09 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu) extern void fpu__save(struct fpu *fpu); -static inline int fpu_restore_checking(struct fpu *fpu) +static inline int __copy_fpstate_to_fpregs(struct fpu *fpu) { if (use_xsave()) return fpu_xrstor_checking(&fpu->state.xsave); @@ -299,7 +299,7 @@ static inline int fpu_restore_checking(struct fpu *fpu) return frstor_checking(&fpu->state.fsave); } -static inline int restore_fpu_checking(struct fpu *fpu) +static inline int copy_fpstate_to_fpregs(struct fpu *fpu) { /* * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is @@ -314,7 +314,7 @@ static inline int restore_fpu_checking(struct fpu *fpu) : : [addr] "m" (fpu->fpregs_active)); } - return fpu_restore_checking(fpu); + return __copy_fpstate_to_fpregs(fpu); } /* @@ -520,7 +520,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) { if (fpu_switch.preload) { - if (unlikely(restore_fpu_checking(new_fpu))) + if (unlikely(copy_fpstate_to_fpregs(new_fpu))) fpu__reset(new_fpu); } } diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index bf217cde114d..14d8e33d9fe0 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -115,7 +115,7 @@ void __kernel_fpu_end(void) struct fpu *fpu = ¤t->thread.fpu; if (fpu->fpregs_active) { - if (WARN_ON(restore_fpu_checking(fpu))) + if (WARN_ON(copy_fpstate_to_fpregs(fpu))) fpu__reset(fpu); } else { __fpregs_deactivate_hw(); @@ -338,7 +338,7 @@ void fpu__restore(void) /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ kernel_fpu_disable(); fpregs_activate(fpu); - if (unlikely(restore_fpu_checking(fpu))) { + if (unlikely(copy_fpstate_to_fpregs(fpu))) { fpu__reset(fpu); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); } else { diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5c61aae277f9..f4438179398b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7030,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; __kernel_fpu_begin(); - fpu_restore_checking(&vcpu->arch.guest_fpu); + __copy_fpstate_to_fpregs(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); }