struct task_struct;
extern void save_fpu(struct task_struct *__tsk, struct pt_regs *regs);
+void fpu_state_restore(struct pt_regs *regs);
#else
#define release_fpu(regs) do { } while (0)
preempt_disable();
if (test_tsk_thread_flag(tsk, TIF_USEDFPU))
save_fpu(tsk, regs);
+ else
+ tsk->fpu_counter = 0;
preempt_enable();
}
force_sig(SIGFPE, tsk);
}
-BUILD_TRAP_HANDLER(fpu_state_restore)
+void fpu_state_restore(struct pt_regs *regs)
{
struct task_struct *tsk = current;
- TRAP_HANDLER_DECL;
grab_fpu(regs);
- if (!user_mode(regs)) {
+ if (unlikely(!user_mode(regs))) {
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
+ BUG();
return;
}
- if (used_math()) {
+ if (likely(used_math())) {
/* Using the FPU again. */
restore_fpu(tsk);
} else {
set_used_math();
}
set_tsk_thread_flag(tsk, TIF_USEDFPU);
+ tsk->fpu_counter++;
+}
+
+BUILD_TRAP_HANDLER(fpu_state_restore)
+{
+ TRAP_HANDLER_DECL;
+
+ fpu_state_restore(regs);
}
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev, struct task_struct *next)
{
+ struct thread_struct *next_t = &next->thread;
+
#if defined(CONFIG_SH_FPU)
unlazy_fpu(prev, task_pt_regs(prev));
+
+ /* we're going to use this soon, after a few expensive things */
+ if (next->fpu_counter > 5)
+ prefetch(&next_t->fpu.hard);
#endif
#ifdef CONFIG_MMU
#endif
}
+#if defined(CONFIG_SH_FPU)
+ /* If the task has used fpu the last 5 timeslices, just do a full
+ * restore of the math state immediately to avoid the trap; the
+ * chances of needing FPU soon are obviously high now
+ */
+ if (next->fpu_counter > 5) {
+ fpu_state_restore(task_pt_regs(next));
+ }
+#endif
+
return prev;
}