extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
-extern void fpstate_init_curr(struct fpu *fpu);
+extern void fpu__activate_curr(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern void fpu__clear(struct task_struct *tsk);
}
/*
- * Initialize the current task's in-memory FPU context:
+ * Activate the current task's in-memory FPU context,
+ * if it has not been used before:
*/
-void fpstate_init_curr(struct fpu *fpu)
+void fpu__activate_curr(struct fpu *fpu)
{
WARN_ON_ONCE(fpu != ¤t->thread.fpu);
- WARN_ON_ONCE(fpu->fpstate_active);
- fpstate_init(fpu);
+ if (!fpu->fpstate_active) {
+ fpstate_init(fpu);
- /* Safe to do for the current task: */
- fpu->fpstate_active = 1;
+ /* Safe to do for the current task: */
+ fpu->fpstate_active = 1;
+ }
}
-EXPORT_SYMBOL_GPL(fpstate_init_curr);
+EXPORT_SYMBOL_GPL(fpu__activate_curr);
/*
* This function is called before we modify a stopped child's
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
drop_fpu(fpu);
} else {
if (!fpu->fpstate_active) {
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
user_fpu_begin();
}
restore_init_xstate();
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
if (!static_cpu_has(X86_FEATURE_FPU))
return fpregs_soft_set(current, NULL,
int r;
sigset_t sigsaved;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
struct desc_struct code_descriptor;
struct fpu *fpu = ¤t->thread.fpu;
- if (!fpu->fpstate_active)
- fpstate_init_curr(fpu);
+ fpu__activate_curr(fpu);
#ifdef RE_ENTRANT_CHECKING
if (emulating) {