powerpc: Always restore FPU/VEC/VSX if hardware transactional memory in use
authorCyril Bur <cyrilbur@gmail.com>
Fri, 23 Sep 2016 06:18:08 +0000 (16:18 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 4 Oct 2016 05:43:05 +0000 (16:43 +1100)
Comment from arch/powerpc/kernel/process.c:967:
 If userspace is inside a transaction (whether active or
 suspended) and FP/VMX/VSX instructions have ever been enabled
 inside that transaction, then we have to keep them enabled
 and keep the FP/VMX/VSX state loaded while ever the transaction
 continues.  The reason is that if we didn't, and subsequently
 got a FP/VMX/VSX unavailable interrupt inside a transaction,
 we don't know whether it's the same transaction, and thus we
 don't know which of the checkpointed state and the ransactional
 state to use.

restore_math() restore_fp() and restore_altivec() currently may not
restore the registers. It doesn't appear that this is more serious
than a performance penalty. If the math registers aren't restored the
userspace thread will still be run with the facility disabled.
Userspace will not be able to read invalid values. On the first access
it will take an facility unavailable exception and the kernel will
detected an active transaction, at which point it will abort the
transaction. There is the possibility for a pathological case
preventing any progress by transactions, however, transactions
are never guaranteed to make progress.

Fixes: 70fe3d9 ("powerpc: Restore FPU/VEC/VSX if previously used")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/process.c

index ce8a26a0c947ebc29800316b6f4672d9fd76c274..3846fab5d1ceb01d5f4af4bcff9c5eb1cbe904e3 100644 (file)
@@ -89,7 +89,13 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
                set_thread_flag(TIF_RESTORE_TM);
        }
 }
+
+static inline bool msr_tm_active(unsigned long msr)
+{
+       return MSR_TM_ACTIVE(msr);
+}
 #else
+static inline bool msr_tm_active(unsigned long msr) { return false; }
 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
@@ -209,7 +215,7 @@ void enable_kernel_fp(void)
 EXPORT_SYMBOL(enable_kernel_fp);
 
 static int restore_fp(struct task_struct *tsk) {
-       if (tsk->thread.load_fp) {
+       if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) {
                load_fp_state(&current->thread.fp_state);
                current->thread.load_fp++;
                return 1;
@@ -279,7 +285,8 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 
 static int restore_altivec(struct task_struct *tsk)
 {
-       if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+               (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) {
                load_vr_state(&tsk->thread.vr_state);
                tsk->thread.used_vr = 1;
                tsk->thread.load_vec++;
@@ -465,7 +472,8 @@ void restore_math(struct pt_regs *regs)
 {
        unsigned long msr;
 
-       if (!current->thread.load_fp && !loadvec(current->thread))
+       if (!msr_tm_active(regs->msr) &&
+               !current->thread.load_fp && !loadvec(current->thread))
                return;
 
        msr = regs->msr;
@@ -984,6 +992,13 @@ void restore_tm_state(struct pt_regs *regs)
        msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
        msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
 
+       /* Ensure that restore_math() will restore */
+       if (msr_diff & MSR_FP)
+               current->thread.load_fp = 1;
+#ifdef CONFIG_ALIVEC
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
+               current->thread.load_vec = 1;
+#endif
        restore_math(regs);
 
        regs->msr |= msr_diff;