powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
authorCyril Bur <cyrilbur@gmail.com>
Mon, 29 Feb 2016 06:53:48 +0000 (17:53 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Wed, 2 Mar 2016 12:34:48 +0000 (23:34 +1100)
This prepares for the decoupling of saving {fpu,altivec,vsx} registers and
marking {fpu,altivec,vsx} as being unused by a thread.

Currently giveup_{fpu,altivec,vsx}() does both however optimisations to
task switching can be made if these two operations are decoupled.
save_all() will permit the saving of registers to thread structs and leave
threads MSR with bits enabled.

This patch introduces no functional change.

Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/process.c

index 11a81bd5dabdb8bbb698c7383394f7d2f8d99227..52ed654d01ba84a5178102f7c0c7b6835ee1325a 100644 (file)
 #define MSR_HV         0
 #endif
 
+/*
+ * To be used in shared book E/book S, this avoids needing to worry about
+ * book S/book E in shared code
+ */
+#ifndef MSR_SPE
+#define MSR_SPE        0
+#endif
+
 #define MSR_VEC                __MASK(MSR_VEC_LG)      /* Enable AltiVec */
 #define MSR_VSX                __MASK(MSR_VSX_LG)      /* Enable VSX */
 #define MSR_POW                __MASK(MSR_POW_LG)      /* Enable Power Management */
index 5b268b6be74c791cadfa2ea8575e298104bd529a..3690041c126a7850a4dc37ec0684b81e04fa2bd5 100644 (file)
@@ -34,6 +34,7 @@ static inline void disable_kernel_fp(void)
        msr_check_and_clear(MSR_FP);
 }
 #else
+static inline void __giveup_fpu(struct task_struct *t) { }
 static inline void flush_fp_to_thread(struct task_struct *t) { }
 #endif
 
@@ -46,6 +47,8 @@ static inline void disable_kernel_altivec(void)
 {
        msr_check_and_clear(MSR_VEC);
 }
+#else
+static inline void __giveup_altivec(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_VSX
@@ -57,6 +60,8 @@ static inline void disable_kernel_vsx(void)
 {
        msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
+#else
+static inline void __giveup_vsx(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_SPE
@@ -68,6 +73,8 @@ static inline void disable_kernel_spe(void)
 {
        msr_check_and_clear(MSR_SPE);
 }
+#else
+static inline void __giveup_spe(struct task_struct *t) { }
 #endif
 
 static inline void clear_task_ebb(struct task_struct *t)
index 55c1eb0465af48209b4cad8efe1a4cd36f1cf7a9..29da07fb3b4adb64880a44561d79a3f3e5251bb6 100644 (file)
@@ -444,12 +444,41 @@ void restore_math(struct pt_regs *regs)
        regs->msr = msr;
 }
 
+void save_all(struct task_struct *tsk)
+{
+       unsigned long usermsr;
+
+       if (!tsk->thread.regs)
+               return;
+
+       usermsr = tsk->thread.regs->msr;
+
+       if ((usermsr & msr_all_available) == 0)
+               return;
+
+       msr_check_and_set(msr_all_available);
+
+       if (usermsr & MSR_FP)
+               __giveup_fpu(tsk);
+
+       if (usermsr & MSR_VEC)
+               __giveup_altivec(tsk);
+
+       if (usermsr & MSR_VSX)
+               __giveup_vsx(tsk);
+
+       if (usermsr & MSR_SPE)
+               __giveup_spe(tsk);
+
+       msr_check_and_clear(msr_all_available);
+}
+
 void flush_all_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
                preempt_disable();
                BUG_ON(tsk != current);
-               giveup_all(tsk);
+               save_all(tsk);
 
 #ifdef CONFIG_SPE
                if (tsk->thread.regs->msr & MSR_SPE)