powerpc: Avoid load hit store in __giveup_fpu() and __giveup_altivec()
authorAnton Blanchard <anton@samba.org>
Sun, 29 May 2016 12:03:50 +0000 (22:03 +1000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 14 Jun 2016 03:58:25 +0000 (13:58 +1000)
In both __giveup_fpu() and __giveup_altivec() we make two modifications
to tsk->thread.regs->msr. gcc decides to do a read/modify/write of
each change, so we end up with a load hit store:

        ld      r9,264(r10)
        rldicl  r9,r9,50,1
        rotldi  r9,r9,14
        std     r9,264(r10)
...
        ld      r9,264(r10)
        rldicl  r9,r9,40,1
        rotldi  r9,r9,24
        std     r9,264(r10)

Fix this by using a temporary.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/kernel/process.c

index e2f12cbcade9a49287c370204bcc71a4fb23b33a..a2dd3b1276fffb92de6bf6f94b78e33796951309 100644 (file)
@@ -139,12 +139,16 @@ EXPORT_SYMBOL(__msr_check_and_clear);
 #ifdef CONFIG_PPC_FPU
 void __giveup_fpu(struct task_struct *tsk)
 {
+       unsigned long msr;
+
        save_fpu(tsk);
-       tsk->thread.regs->msr &= ~MSR_FP;
+       msr = tsk->thread.regs->msr;
+       msr &= ~MSR_FP;
 #ifdef CONFIG_VSX
        if (cpu_has_feature(CPU_FTR_VSX))
-               tsk->thread.regs->msr &= ~MSR_VSX;
+               msr &= ~MSR_VSX;
 #endif
+       tsk->thread.regs->msr = msr;
 }
 
 void giveup_fpu(struct task_struct *tsk)
@@ -219,12 +223,16 @@ static int restore_fp(struct task_struct *tsk) { return 0; }
 
 static void __giveup_altivec(struct task_struct *tsk)
 {
+       unsigned long msr;
+
        save_altivec(tsk);
-       tsk->thread.regs->msr &= ~MSR_VEC;
+       msr = tsk->thread.regs->msr;
+       msr &= ~MSR_VEC;
 #ifdef CONFIG_VSX
        if (cpu_has_feature(CPU_FTR_VSX))
-               tsk->thread.regs->msr &= ~MSR_VSX;
+               msr &= ~MSR_VSX;
 #endif
+       tsk->thread.regs->msr = msr;
 }
 
 void giveup_altivec(struct task_struct *tsk)