struct pt_regs;
struct user_i387_struct;
-extern int fpstate_alloc_init(struct task_struct *curr);
+extern int fpstate_alloc_init(struct fpu *fpu);
extern void fpstate_init(struct fpu *fpu);
extern void fpu__flush_thread(struct task_struct *tsk);
*
* Can fail.
*/
-int fpstate_alloc_init(struct task_struct *curr)
+int fpstate_alloc_init(struct fpu *fpu)
{
- struct fpu *fpu = &curr->thread.fpu;
int ret;
- if (WARN_ON_ONCE(curr != current))
+ if (WARN_ON_ONCE(fpu != ¤t->thread.fpu))
return -EINVAL;
if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
/*
* Memory allocation at the first usage of the FPU and other state.
*/
- ret = fpstate_alloc(&curr->thread.fpu);
+ ret = fpstate_alloc(fpu);
if (ret)
return ret;
- fpstate_init(&curr->thread.fpu);
+ fpstate_init(fpu);
/* Safe to do for the current task: */
fpu->fpstate_active = 1;
/*
* does a slab alloc which can sleep
*/
- if (fpstate_alloc_init(tsk)) {
+ if (fpstate_alloc_init(fpu)) {
/*
* ran out of memory!
*/
} else {
if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */
- if (WARN_ON(fpstate_alloc_init(tsk)))
+ if (WARN_ON(fpstate_alloc_init(fpu)))
force_sig(SIGKILL, tsk);
user_fpu_begin();
}
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
+ if (!fpu->fpstate_active && fpstate_alloc_init(fpu))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
struct fpu *fpu = ¤t->thread.fpu;
if (!fpu->fpstate_active) {
- if (fpstate_alloc_init(current)) {
+ if (fpstate_alloc_init(fpu)) {
do_group_exit(SIGKILL);
return;
}