struct pt_regs;
struct user_i387_struct;
+extern int fpstate_alloc_init(struct task_struct *curr);
+
extern int init_fpu(struct task_struct *child);
+
extern void fpu_finit(struct fpu *fpu);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
extern void math_state_restore(void);
}
EXPORT_SYMBOL_GPL(fpu_finit);
+/*
+ * Allocate the backing store for the current task's FPU registers
+ * and initialize the registers themselves as well.
+ *
+ * Can fail.
+ */
+int fpstate_alloc_init(struct task_struct *curr)
+{
+ int ret;
+
+ if (WARN_ON_ONCE(curr != current))
+ return -EINVAL;
+ if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
+ return -EINVAL;
+
+ /*
+ * Memory allocation at the first usage of the FPU and other state.
+ */
+ ret = fpu_alloc(&curr->thread.fpu);
+ if (ret)
+ return ret;
+
+ fpu_finit(&curr->thread.fpu);
+
+ /* Safe to do for the current task: */
+ curr->flags |= PF_USED_MATH;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fpstate_alloc_init);
+
/*
* The _current_ task is using the FPU for the first time
* so initialize it and set the mxcsr to its default
} else {
if (!tsk_used_math(tsk)) {
/* kthread execs. TODO: cleanup this horror. */
- if (WARN_ON(init_fpu(tsk)))
+ if (WARN_ON(fpstate_alloc_init(tsk)))
force_sig(SIGKILL, tsk);
user_fpu_begin();
}
/*
* does a slab alloc which can sleep
*/
- if (init_fpu(tsk)) {
+ if (fpstate_alloc_init(tsk)) {
/*
* ran out of memory!
*/
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!used_math() && init_fpu(tsk))
+ if (!used_math() && fpstate_alloc_init(tsk))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
int r;
sigset_t sigsaved;
- if (!tsk_used_math(current) && init_fpu(current))
+ if (!tsk_used_math(current) && fpstate_alloc_init(current))
return -ENOMEM;
if (vcpu->sigset_active)
struct desc_struct code_descriptor;
if (!used_math()) {
- if (init_fpu(current)) {
+ if (fpstate_alloc_init(current)) {
do_group_exit(SIGKILL);
return;
}