x86/fpu: Rename fpu::fpstate_active to fpu::initialized
authorIngo Molnar <mingo@kernel.org>
Tue, 26 Sep 2017 07:43:36 +0000 (09:43 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 26 Sep 2017 07:43:36 +0000 (09:43 +0200)
The x86 FPU code used to have a complex state machine where both the FPU
registers and the FPU state context could be 'active' (or inactive)
independently of each other - which enabled features like lazy FPU restore.

Much of this complexity is gone in the current code: now we basically can
have FPU-less tasks (kernel threads) that don't use (and save/restore) FPU
state at all, plus full FPU users that save/restore directly with no laziness
whatsoever.

But the fpu::fpstate_active still carries bits of the old complexity - meanwhile
this flag has become a simple flag that shows whether the FPU context saving
area in the thread struct is initialized and used, or not.

Rename it to fpu::initialized to express this simplicity in the name as well.

Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Eric Biggers <ebiggers3@gmail.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Yu-cheng Yu <yu-cheng.yu@intel.com>
Link: http://lkml.kernel.org/r/20170923130016.21448-30-mingo@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/trace/fpu.h
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/signal.c
arch/x86/mm/pkeys.c

index e0bb46c0285752e73c8eb55420e29f97d88a818e..0e2a5edbce00111f6a41f1b3070610b5316d1e71 100644 (file)
@@ -231,7 +231,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
                 ksig->ka.sa.sa_restorer)
                sp = (unsigned long) ksig->ka.sa.sa_restorer;
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                unsigned long fx_aligned, math_size;
 
                sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
index 508e4181c4af16996f1cc749457d88d27a56337e..b26ae05da18a7972b03ba19d9bf20a9c809e50c5 100644 (file)
@@ -527,7 +527,7 @@ static inline void fpregs_activate(struct fpu *fpu)
 static inline void
 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
-       if (old_fpu->fpstate_active) {
+       if (old_fpu->initialized) {
                if (!copy_fpregs_to_fpstate(old_fpu))
                        old_fpu->last_cpu = -1;
                else
@@ -550,7 +550,7 @@ switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
 {
        bool preload = static_cpu_has(X86_FEATURE_FPU) &&
-                      new_fpu->fpstate_active;
+                      new_fpu->initialized;
 
        if (preload) {
                if (!fpregs_state_valid(new_fpu, cpu))
index 71db45ca88708a5115e0773645d380fb3c20853e..a1520575d86b81fd2521d7e4013856b7e70d4db2 100644 (file)
@@ -293,13 +293,13 @@ struct fpu {
        unsigned int                    last_cpu;
 
        /*
-        * @fpstate_active:
+        * @initialized:
         *
-        * This flag indicates whether this context is active: if the task
+        * This flag indicates whether this context is initialized: if the task
         * is not running then we can restore from this context, if the task
         * is running then we should save into this context.
         */
-       unsigned char                   fpstate_active;
+       unsigned char                   initialized;
 
        /*
         * @state:
index da565aae9fd2521043069ea96a07a62736e7d5b4..39f7a27bef130fbb6c83a881951e440091ed9168 100644 (file)
@@ -12,22 +12,22 @@ DECLARE_EVENT_CLASS(x86_fpu,
 
        TP_STRUCT__entry(
                __field(struct fpu *, fpu)
-               __field(bool, fpstate_active)
+               __field(bool, initialized)
                __field(u64, xfeatures)
                __field(u64, xcomp_bv)
                ),
 
        TP_fast_assign(
                __entry->fpu            = fpu;
-               __entry->fpstate_active = fpu->fpstate_active;
+               __entry->initialized    = fpu->initialized;
                if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
                        __entry->xfeatures = fpu->state.xsave.header.xfeatures;
                        __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
                }
        ),
-       TP_printk("x86/fpu: %p fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
+       TP_printk("x86/fpu: %p initialized: %d xfeatures: %llx xcomp_bv: %llx",
                        __entry->fpu,
-                       __entry->fpstate_active,
+                       __entry->initialized,
                        __entry->xfeatures,
                        __entry->xcomp_bv
        )
index b2cdeb3b18609538102cf52f83a200ece5ab284b..c8d6032f04d09a725cbec07dbe88a8841e1de76c 100644 (file)
@@ -100,7 +100,7 @@ void __kernel_fpu_begin(void)
 
        kernel_fpu_disable();
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                /*
                 * Ignore return value -- we don't care if reg state
                 * is clobbered.
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
 {
        struct fpu *fpu = &current->thread.fpu;
 
-       if (fpu->fpstate_active)
+       if (fpu->initialized)
                copy_kernel_to_fpregs(&fpu->state);
 
        kernel_fpu_enable();
@@ -148,7 +148,7 @@ void fpu__save(struct fpu *fpu)
 
        preempt_disable();
        trace_x86_fpu_before_save(fpu);
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                if (!copy_fpregs_to_fpstate(fpu)) {
                        copy_kernel_to_fpregs(&fpu->state);
                }
@@ -191,7 +191,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
        dst_fpu->last_cpu = -1;
 
-       if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
+       if (!src_fpu->initialized || !static_cpu_has(X86_FEATURE_FPU))
                return 0;
 
        WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -240,13 +240,13 @@ void fpu__activate_curr(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu);
 
-       if (!fpu->fpstate_active) {
+       if (!fpu->initialized) {
                fpstate_init(&fpu->state);
                trace_x86_fpu_init_state(fpu);
 
                trace_x86_fpu_activate_state(fpu);
                /* Safe to do for the current task: */
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
        }
 }
 EXPORT_SYMBOL_GPL(fpu__activate_curr);
@@ -271,13 +271,13 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
        if (fpu == &current->thread.fpu) {
                fpu__save(fpu);
        } else {
-               if (!fpu->fpstate_active) {
+               if (!fpu->initialized) {
                        fpstate_init(&fpu->state);
                        trace_x86_fpu_init_state(fpu);
 
                        trace_x86_fpu_activate_state(fpu);
                        /* Safe to do for current and for stopped child tasks: */
-                       fpu->fpstate_active = 1;
+                       fpu->initialized = 1;
                }
        }
 }
@@ -303,7 +303,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
         */
        WARN_ON_FPU(fpu == &current->thread.fpu);
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                /* Invalidate any lazy state: */
                __fpu_invalidate_fpregs_state(fpu);
        } else {
@@ -312,7 +312,7 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
 
                trace_x86_fpu_activate_state(fpu);
                /* Safe to do for stopped child tasks: */
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
        }
 }
 
@@ -354,7 +354,7 @@ void fpu__drop(struct fpu *fpu)
        preempt_disable();
 
        if (fpu == &current->thread.fpu) {
-               if (fpu->fpstate_active) {
+               if (fpu->initialized) {
                        /* Ignore delayed exceptions from user space */
                        asm volatile("1: fwait\n"
                                     "2:\n"
@@ -363,7 +363,7 @@ void fpu__drop(struct fpu *fpu)
                }
        }
 
-       fpu->fpstate_active = 0;
+       fpu->initialized = 0;
 
        trace_x86_fpu_dropped(fpu);
 
index d5d44c452624c88e3abb5c75e68d00b55fee6019..7affb7e3d9a5b94326b51528119787f4f956640b 100644 (file)
@@ -240,7 +240,7 @@ static void __init fpu__init_system_ctx_switch(void)
        WARN_ON_FPU(!on_boot_cpu);
        on_boot_cpu = 0;
 
-       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+       WARN_ON_FPU(current->thread.fpu.initialized);
 }
 
 /*
index c764f74053223def07d73504dae4bd5426ec13ee..19e82334e8115f0827978b6cfa19bdd5be71f377 100644 (file)
@@ -16,14 +16,14 @@ int regset_fpregs_active(struct task_struct *target, const struct user_regset *r
 {
        struct fpu *target_fpu = &target->thread.fpu;
 
-       return target_fpu->fpstate_active ? regset->n : 0;
+       return target_fpu->initialized ? regset->n : 0;
 }
 
 int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
 {
        struct fpu *target_fpu = &target->thread.fpu;
 
-       if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->fpstate_active)
+       if (boot_cpu_has(X86_FEATURE_FXSR) && target_fpu->initialized)
                return regset->n;
        else
                return 0;
@@ -380,7 +380,7 @@ int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
        struct fpu *fpu = &tsk->thread.fpu;
        int fpvalid;
 
-       fpvalid = fpu->fpstate_active;
+       fpvalid = fpu->initialized;
        if (fpvalid)
                fpvalid = !fpregs_get(tsk, NULL,
                                      0, sizeof(struct user_i387_ia32_struct),
index da68ea1c3a44204d452b4b41a9477de93b894aab..ab2dd24cfea48068ea2236bf275b019a65cb29d8 100644 (file)
@@ -171,7 +171,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
                        sizeof(struct user_i387_ia32_struct), NULL,
                        (struct _fpstate_32 __user *) buf) ? -1 : 1;
 
-       if (fpu->fpstate_active || using_compacted_format()) {
+       if (fpu->initialized || using_compacted_format()) {
                /* Save the live register state to the user directly. */
                if (copy_fpregs_to_sigframe(buf_fx))
                        return -1;
@@ -315,12 +315,12 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                int err = 0;
 
                /*
-                * Drop the current fpu which clears fpu->fpstate_active. This ensures
+                * Drop the current fpu which clears fpu->initialized. This ensures
                 * that any context-switch during the copy of the new state,
                 * avoids the intermediate state from getting restored/saved.
                 * Thus avoiding the new restored state from getting corrupted.
                 * We will be ready to restore/save the state only after
-                * fpu->fpstate_active is again set.
+                * fpu->initialized is again set.
                 */
                fpu__drop(fpu);
 
@@ -342,7 +342,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                        sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
                }
 
-               fpu->fpstate_active = 1;
+               fpu->initialized = 1;
                preempt_disable();
                fpu__restore(fpu);
                preempt_enable();
index fda1109cc3553564e11040b1fc99b417e3a513d4..703e76d027ee531351167f6b8b02b2ccb6fa409b 100644 (file)
@@ -867,7 +867,7 @@ const void *get_xsave_field_ptr(int xsave_state)
 {
        struct fpu *fpu = &current->thread.fpu;
 
-       if (!fpu->fpstate_active)
+       if (!fpu->initialized)
                return NULL;
        /*
         * fpu__save() takes the CPU's xstate registers
index e04442345fc0977cf73f2573f77b3df71310f0a8..4e188fda59612ed70b98342e8580cd1f311ed141 100644 (file)
@@ -263,7 +263,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                sp = (unsigned long) ka->sa.sa_restorer;
        }
 
-       if (fpu->fpstate_active) {
+       if (fpu->initialized) {
                sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
                                          &buf_fx, &math_size);
                *fpstate = (void __user *)sp;
@@ -279,7 +279,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                return (void __user *)-1L;
 
        /* save i387 and extended state */
-       if (fpu->fpstate_active &&
+       if (fpu->initialized &&
            copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
                return (void __user *)-1L;
 
@@ -755,7 +755,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                /*
                 * Ensure the signal handler starts with the new fpu state.
                 */
-               if (fpu->fpstate_active)
+               if (fpu->initialized)
                        fpu__clear(fpu);
        }
        signal_setup_done(failed, ksig, stepping);
index 4d24269c071f4962ddcddf1e939d79c8e5583975..d7bc0eea20a5ed2fc8ec43ebc06429517cbb362b 100644 (file)
@@ -44,7 +44,7 @@ int __execute_only_pkey(struct mm_struct *mm)
         */
        preempt_disable();
        if (!need_to_set_mm_pkey &&
-           current->thread.fpu.fpstate_active &&
+           current->thread.fpu.initialized &&
            !__pkru_allows_read(read_pkru(), execute_only_pkey)) {
                preempt_enable();
                return execute_only_pkey;