x86/fpu: Remove task_disable_lazy_fpu_restore()
authorIngo Molnar <mingo@kernel.org>
Thu, 23 Apr 2015 15:08:41 +0000 (17:08 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:26 +0000 (15:47 +0200)
Replace task_disable_lazy_fpu_restore() with easier to read
open-coded uses: we already update the fpu->last_cpu field
explicitly in other cases.

(This also removes yet another task_struct using FPU method.)

Better explain the fpu::last_cpu field in the structure definition.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/fpu-internal.h
arch/x86/include/asm/fpu/types.h
arch/x86/kernel/fpu/core.c

index e8f7134f0ffbd4adb73d98f337bd68e48308572a..76a1f35298813ffee8271866e8ff54a3024450ee 100644 (file)
@@ -74,16 +74,6 @@ static inline void __cpu_disable_lazy_restore(unsigned int cpu)
        per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
 }
 
-/*
- * Used to indicate that the FPU state in memory is newer than the FPU
- * state in registers, and the FPU state should be reloaded next time the
- * task is run. Only safe on the current task, or non-running tasks.
- */
-static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
-{
-       tsk->thread.fpu.last_cpu = ~0;
-}
-
 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
 {
        return &new->thread.fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) &&
@@ -430,7 +420,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
 
        if (old_fpu->has_fpu) {
                if (!fpu_save_init(&old->thread.fpu))
-                       task_disable_lazy_fpu_restore(old);
+                       old->thread.fpu.last_cpu = -1;
                else
                        old->thread.fpu.last_cpu = cpu;
 
@@ -446,7 +436,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
                        stts();
        } else {
                old->thread.fpu.counter = 0;
-               task_disable_lazy_fpu_restore(old);
+               old->thread.fpu.last_cpu = -1;
                if (fpu.preload) {
                        new->thread.fpu.counter++;
                        if (fpu_lazy_restore(new, cpu))
index f6317d9aa808016bb9ef420630247cad6f676300..cad1c37d9ea2b006b7a2a642d4c62129bbe3b6e4 100644 (file)
@@ -125,7 +125,18 @@ union thread_xstate {
 };
 
 struct fpu {
+       /*
+        * Records the last CPU on which this context was loaded into
+        * FPU registers. (In the lazy-switching case we might be
+        * able to reuse FPU registers across multiple context switches
+        * this way, if no intermediate task used the FPU.)
+        *
+        * A value of -1 is used to indicate that the FPU state in context
+        * memory is newer than the FPU state in registers, and that the
+        * FPU state should be reloaded next time the task is run.
+        */
        unsigned int                    last_cpu;
+
        unsigned int                    has_fpu;
        union thread_xstate             *state;
        /*
index ba539fc018d749f3f9127ca911461643e8a182ce..230e93783c9997dad7e0e3713ba3288aa9ebf5a4 100644 (file)
@@ -242,8 +242,7 @@ int fpu__copy(struct task_struct *dst, struct task_struct *src)
        dst->thread.fpu.counter = 0;
        dst->thread.fpu.has_fpu = 0;
        dst->thread.fpu.state = NULL;
-
-       task_disable_lazy_fpu_restore(dst);
+       dst->thread.fpu.last_cpu = -1;
 
        if (src_fpu->fpstate_active) {
                int err = fpstate_alloc(dst_fpu);
@@ -319,7 +318,7 @@ static int fpu__unlazy_stopped(struct task_struct *child)
                return -EINVAL;
 
        if (child_fpu->fpstate_active) {
-               task_disable_lazy_fpu_restore(child);
+               child->thread.fpu.last_cpu = -1;
                return 0;
        }