powerpc: Move part of giveup_fpu,altivec,spe into c
authorAnton Blanchard <anton@samba.org>
Thu, 29 Oct 2015 00:44:01 +0000 (11:44 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 1 Dec 2015 02:52:25 +0000 (13:52 +1100)
Move the MSR modification into new c functions. Removing it from
the low level functions will allow us to avoid costly MSR writes
by batching them up.

Move the check_if_tm_restore_required() check into these new functions.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/switch_to.h
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/vector.S

index 042aaf05a7870c8ade08b7913710bf4a96876375..c2678b93bcbacdbc545e37abd740215197140aaf 100644 (file)
@@ -23,28 +23,27 @@ extern int emulate_altivec(struct pt_regs *);
 extern void __giveup_vsx(struct task_struct *);
 extern void giveup_vsx(struct task_struct *);
 extern void enable_kernel_spe(void);
-extern void giveup_spe(struct task_struct *);
 extern void load_up_spe(struct task_struct *);
 extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
 #ifdef CONFIG_PPC_FPU
 extern void flush_fp_to_thread(struct task_struct *);
 extern void giveup_fpu(struct task_struct *);
+extern void __giveup_fpu(struct task_struct *);
 #else
 static inline void flush_fp_to_thread(struct task_struct *t) { }
 static inline void giveup_fpu(struct task_struct *t) { }
+static inline void __giveup_fpu(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
+extern void __giveup_altivec(struct task_struct *);
 #else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-static inline void giveup_altivec(struct task_struct *t)
-{
-}
+static inline void flush_altivec_to_thread(struct task_struct *t) { }
+static inline void giveup_altivec(struct task_struct *t) { }
+static inline void __giveup_altivec(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_VSX
@@ -57,10 +56,12 @@ static inline void flush_vsx_to_thread(struct task_struct *t)
 
 #ifdef CONFIG_SPE
 extern void flush_spe_to_thread(struct task_struct *);
+extern void giveup_spe(struct task_struct *);
+extern void __giveup_spe(struct task_struct *);
 #else
-static inline void flush_spe_to_thread(struct task_struct *t)
-{
-}
+static inline void flush_spe_to_thread(struct task_struct *t) { }
+static inline void giveup_spe(struct task_struct *t) { }
+static inline void __giveup_spe(struct task_struct *t) { }
 #endif
 
 static inline void clear_task_ebb(struct task_struct *t)
index 71bdce284ad935992f75a3fc529660610187454d..431ab571ed1b1ec8cb859ddfa8bb8468e162a201 100644 (file)
@@ -155,24 +155,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        blr
 
 /*
- * giveup_fpu(tsk)
+ * __giveup_fpu(tsk)
  * Disable FP for the task given as the argument,
  * and save the floating-point registers in its thread_struct.
  * Enables the FPU for use in the kernel on return.
  */
-_GLOBAL(giveup_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r5,r5,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-       SYNC_601
-       ISYNC_601
-       MTMSRD(r5)                      /* enable use of fpu now */
-       SYNC_601
-       isync
+_GLOBAL(__giveup_fpu)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r6,THREAD_FPSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
index d6980bbae95444b83963eeba2067b6bc4cfd12dd..f705171b924b9389c5b16979ce52fd90171ecffa 100644 (file)
@@ -984,14 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
 
 #ifdef CONFIG_SPE
 /*
- * extern void giveup_spe(struct task_struct *prev)
+ * extern void __giveup_spe(struct task_struct *prev)
  *
  */
-_GLOBAL(giveup_spe)
-       mfmsr   r5
-       oris    r5,r5,MSR_SPE@h
-       mtmsr   r5                      /* enable use of SPE now */
-       isync
+_GLOBAL(__giveup_spe)
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpi    0,r5,0
index 202963ee013a81c76b62dcbd6bc59c0c8cbc9844..41e1607e800caf9ff5277f08609dab49566123e6 100644 (file)
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
 #endif
 
 #ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(giveup_fpu);
 EXPORT_SYMBOL(load_fp_state);
 EXPORT_SYMBOL(store_fp_state);
 #endif
 
 #ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(giveup_altivec);
 EXPORT_SYMBOL(load_vr_state);
 EXPORT_SYMBOL(store_vr_state);
 #endif
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
 EXPORT_SYMBOL_GPL(__giveup_vsx);
 #endif
 
-#ifdef CONFIG_SPE
-EXPORT_SYMBOL(giveup_spe);
-#endif
-
 #ifdef CONFIG_EPAPR_PARAVIRT
 EXPORT_SYMBOL(epapr_hypercall_start);
 #endif
index 5bf8ec2597d42aebcc117199f7419183b72fc74e..6bcf82bed6107d660e1a0915c86b3a39894531fa 100644 (file)
@@ -88,6 +88,25 @@ static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
 #ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+       u64 oldmsr = mfmsr();
+       u64 newmsr;
+
+       check_if_tm_restore_required(tsk);
+
+       newmsr = oldmsr | MSR_FP;
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX))
+               newmsr |= MSR_VSX;
+#endif
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+
+       __giveup_fpu(tsk);
+}
+EXPORT_SYMBOL(giveup_fpu);
+
 /*
  * Make sure the floating-point register state in the
  * the thread_struct is up to date for task tsk.
@@ -113,7 +132,6 @@ void flush_fp_to_thread(struct task_struct *tsk)
                         * to still have its FP state in the CPU registers.
                         */
                        BUG_ON(tsk != current);
-                       check_if_tm_restore_required(tsk);
                        giveup_fpu(tsk);
                }
                preempt_enable();
@@ -127,7 +145,6 @@ void enable_kernel_fp(void)
        WARN_ON(preemptible());
 
        if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
-               check_if_tm_restore_required(current);
                giveup_fpu(current);
        } else {
                u64 oldmsr = mfmsr();
@@ -139,12 +156,26 @@ void enable_kernel_fp(void)
 EXPORT_SYMBOL(enable_kernel_fp);
 
 #ifdef CONFIG_ALTIVEC
+void giveup_altivec(struct task_struct *tsk)
+{
+       u64 oldmsr = mfmsr();
+       u64 newmsr;
+
+       check_if_tm_restore_required(tsk);
+
+       newmsr = oldmsr | MSR_VEC;
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+
+       __giveup_altivec(tsk);
+}
+EXPORT_SYMBOL(giveup_altivec);
+
 void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
 
        if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
-               check_if_tm_restore_required(current);
                giveup_altivec(current);
        } else {
                u64 oldmsr = mfmsr();
@@ -165,7 +196,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VEC) {
                        BUG_ON(tsk != current);
-                       check_if_tm_restore_required(tsk);
                        giveup_altivec(tsk);
                }
                preempt_enable();
@@ -214,6 +244,20 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
+void giveup_spe(struct task_struct *tsk)
+{
+       u64 oldmsr = mfmsr();
+       u64 newmsr;
+
+       check_if_tm_restore_required(tsk);
+
+       newmsr = oldmsr | MSR_SPE;
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+
+       __giveup_spe(tsk);
+}
+EXPORT_SYMBOL(giveup_spe);
 
 void enable_kernel_spe(void)
 {
index b31528c302537261e63448bd0e8ddb41b5459b52..6e925b40a484ebf034fad75eb4848f72adf99a4f 100644 (file)
@@ -112,17 +112,11 @@ _GLOBAL(load_up_altivec)
        blr
 
 /*
- * giveup_altivec(tsk)
+ * __giveup_altivec(tsk)
  * Disable VMX for the task given as the argument,
  * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
  */
-_GLOBAL(giveup_altivec)
-       mfmsr   r5
-       oris    r5,r5,MSR_VEC@h
-       SYNC
-       MTMSRD(r5)                      /* enable use of VMX now */
-       isync
+_GLOBAL(__giveup_altivec)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r7,THREAD_VRSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)