size_t frame_size,
void __user **fpstate)
{
+ struct fpu *fpu = ¤t->thread.fpu;
unsigned long sp;
/* Default to using normal stack */
ksig->ka.sa.sa_restorer)
sp = (unsigned long) ksig->ka.sa.sa_restorer;
- if (current->flags & PF_USED_MATH) {
+ if (fpu->fpstate_active) {
unsigned long fx_aligned, math_size;
sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
__thread_fpu_end(fpu);
}
- tsk->flags &= ~PF_USED_MATH;
+ fpu->fpstate_active = 0;
preempt_enable();
}
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
- fpu.preload = (new->flags & PF_USED_MATH) &&
+ fpu.preload = new_fpu->fpstate_active &&
(use_eager_fpu() || new->thread.fpu.counter > 5);
if (old_fpu->has_fpu) {
* deal with bursty apps that only use the FPU for a short time:
*/
unsigned char counter;
+ /*
+ * This flag indicates whether this context is fpstate_active: if the task is
+ * not running then we can restore from this context, if the task
+ * is running then we should save into this context.
+ */
+ unsigned char fpstate_active;
};
#endif /* _ASM_X86_FPU_H */
unsigned long fs;
#endif
unsigned long gs;
+
+ /* Floating point and extended processor state */
+ struct fpu fpu;
+
/* Save middle states of ptrace breakpoints */
struct perf_event *ptrace_bps[HBP_NUM];
/* Debug status used for traps, single steps, etc... */
unsigned long cr2;
unsigned long trap_nr;
unsigned long error_code;
- /* floating point and extended processor state */
- struct fpu fpu;
#ifdef CONFIG_X86_32
/* Virtual 86 mode info */
struct vm86_struct __user *vm86_info;
int fpu__copy(struct task_struct *dst, struct task_struct *src)
{
+ struct fpu *dst_fpu = &dst->thread.fpu;
+ struct fpu *src_fpu = &src->thread.fpu;
+
dst->thread.fpu.counter = 0;
dst->thread.fpu.has_fpu = 0;
dst->thread.fpu.state = NULL;
task_disable_lazy_fpu_restore(dst);
- if (src->flags & PF_USED_MATH) {
- int err = fpstate_alloc(&dst->thread.fpu);
+ if (src_fpu->fpstate_active) {
+ int err = fpstate_alloc(dst_fpu);
if (err)
return err;
*/
int fpstate_alloc_init(struct task_struct *curr)
{
+ struct fpu *fpu = &curr->thread.fpu;
int ret;
if (WARN_ON_ONCE(curr != current))
return -EINVAL;
- if (WARN_ON_ONCE(curr->flags & PF_USED_MATH))
+ if (WARN_ON_ONCE(fpu->fpstate_active))
return -EINVAL;
/*
fpstate_init(&curr->thread.fpu);
/* Safe to do for the current task: */
- curr->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
return 0;
}
*/
static int fpu__unlazy_stopped(struct task_struct *child)
{
+ struct fpu *child_fpu = &child->thread.fpu;
int ret;
if (WARN_ON_ONCE(child == current))
return -EINVAL;
- if (child->flags & PF_USED_MATH) {
+ if (child_fpu->fpstate_active) {
task_disable_lazy_fpu_restore(child);
return 0;
}
fpstate_init(&child->thread.fpu);
/* Safe to do for stopped child tasks: */
- child->flags |= PF_USED_MATH;
+ child_fpu->fpstate_active = 1;
return 0;
}
struct task_struct *tsk = current;
struct fpu *fpu = &tsk->thread.fpu;
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
local_irq_enable();
/*
* does a slab alloc which can sleep
void fpu__flush_thread(struct task_struct *tsk)
{
+ struct fpu *fpu = &tsk->thread.fpu;
+
WARN_ON(tsk != current);
if (!use_eager_fpu()) {
drop_fpu(tsk);
fpstate_free(&tsk->thread.fpu);
} else {
- if (!(tsk->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
/* kthread execs. TODO: cleanup this horror. */
if (WARN_ON(fpstate_alloc_init(tsk)))
force_sig(SIGKILL, tsk);
*/
int fpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (target->flags & PF_USED_MATH) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return target_fpu->fpstate_active ? regset->n : 0;
}
int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
{
- return (cpu_has_fxsr && (target->flags & PF_USED_MATH)) ? regset->n : 0;
+ struct fpu *target_fpu = &target->thread.fpu;
+
+ return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
}
int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
* struct user_i387_struct) but is in fact only used for 32-bit
* dumps, so on 64-bit it is really struct user_i387_ia32_struct.
*/
-int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
{
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int fpvalid;
- fpvalid = !!(tsk->flags & PF_USED_MATH);
+ fpvalid = fpu->fpstate_active;
if (fpvalid)
fpvalid = !fpregs_get(tsk, NULL,
0, sizeof(struct user_i387_ia32_struct),
- fpu, NULL);
+ ufpu, NULL);
return fpvalid;
}
{
int ia32_fxstate = (buf != buf_fx);
struct task_struct *tsk = current;
+ struct fpu *fpu = &tsk->thread.fpu;
int state_size = xstate_size;
u64 xstate_bv = 0;
int fx_only = 0;
if (!access_ok(VERIFY_READ, buf, size))
return -EACCES;
- if (!(tsk->flags & PF_USED_MATH) && fpstate_alloc_init(tsk))
+ if (!fpu->fpstate_active && fpstate_alloc_init(tsk))
return -1;
if (!static_cpu_has(X86_FEATURE_FPU))
int err = 0;
/*
- * Drop the current fpu which clears PF_USED_MATH. This ensures
+ * Drop the current fpu which clears fpu->fpstate_active. This ensures
* that any context-switch during the copy of the new state,
* avoids the intermediate state from getting restored/saved.
* Thus avoiding the new restored state from getting corrupted.
* We will be ready to restore/save the state only after
- * PF_USED_MATH is again set.
+ * fpu->fpstate_active is again set.
*/
drop_fpu(tsk);
sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
}
- tsk->flags |= PF_USED_MATH;
+ fpu->fpstate_active = 1;
if (use_eager_fpu()) {
preempt_disable();
fpu__restore();
*/
void __init_refok eager_fpu_init(void)
{
- WARN_ON(current->flags & PF_USED_MATH);
+ WARN_ON(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;
if (eagerfpu == ENABLE)
unsigned long sp = regs->sp;
unsigned long buf_fx = 0;
int onsigstack = on_sig_stack(sp);
+ struct fpu *fpu = ¤t->thread.fpu;
/* redzone */
if (config_enabled(CONFIG_X86_64))
}
}
- if (current->flags & PF_USED_MATH) {
+ if (fpu->fpstate_active) {
sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
return (void __user *)-1L;
/* save i387 and extended state */
- if ((current->flags & PF_USED_MATH) &&
+ if (fpu->fpstate_active &&
save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
return (void __user *)-1L;
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
bool stepping, failed;
+ struct fpu *fpu = ¤t->thread.fpu;
/* Are we from a system call? */
if (syscall_get_nr(current, regs) >= 0) {
/*
* Ensure the signal handler starts with the new fpu state.
*/
- if (current->flags & PF_USED_MATH)
+ if (fpu->fpstate_active)
fpu_reset_state(current);
}
signal_setup_done(failed, ksig, stepping);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
+ struct fpu *fpu = ¤t->thread.fpu;
int r;
sigset_t sigsaved;
- if (!(current->flags & PF_USED_MATH) && fpstate_alloc_init(current))
+ if (!fpu->fpstate_active && fpstate_alloc_init(current))
return -ENOMEM;
if (vcpu->sigset_active)
unsigned long code_base = 0;
unsigned long code_limit = 0; /* Initialized to stop compiler warnings */
struct desc_struct code_descriptor;
+ struct fpu *fpu = ¤t->thread.fpu;
- if (!(current->flags & PF_USED_MATH)) {
+ if (!fpu->fpstate_active) {
if (fpstate_alloc_init(current)) {
do_group_exit(SIGKILL);
return;