parisc: Fix alignment of pa_tlb_lock in assembly on 32-bit SMP kernel
authorHelge Deller <deller@gmx.de>
Tue, 2 Jan 2018 19:36:44 +0000 (20:36 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 10 Jan 2018 08:27:12 +0000 (09:27 +0100)
commit 88776c0e70be0290f8357019d844aae15edaa967 upstream.

Qemu for PARISC reported on a 32bit SMP parisc kernel strange failures
about "Not-handled unaligned insn 0x0e8011d6 and 0x0c2011c9."

Those opcodes evaluate to the ldcw() assembly instruction which requires
(on 32bit) an alignment of 16 bytes to ensure atomicity.

As it turns out, qemu is correct and in our assembly code in entry.S and
pacache.S we don't pay attention to the required alignment.

This patch fixes the problem by aligning the lock offset in assembly
code in the same manner as we do in our C-code.

Signed-off-by: Helge Deller <deller@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/parisc/include/asm/ldcw.h
arch/parisc/kernel/entry.S
arch/parisc/kernel/pacache.S

index 8121aa6db2ff21ad37510879dd82a3fe7ba7fa29..51bb6b8eade65983a1b05ca0de83657cd00db504 100644 (file)
@@ -11,6 +11,7 @@
    for the semaphore.  */
 
 #define __PA_LDCW_ALIGNMENT    16
+#define __PA_LDCW_ALIGN_ORDER  4
 #define __ldcw_align(a) ({                                     \
        unsigned long __ret = (unsigned long) &(a)->lock[0];    \
        __ret = (__ret + __PA_LDCW_ALIGNMENT - 1)               \
@@ -28,6 +29,7 @@
    ldcd). */
 
 #define __PA_LDCW_ALIGNMENT    4
+#define __PA_LDCW_ALIGN_ORDER  2
 #define __ldcw_align(a) (&(a)->slock)
 #define __LDCW "ldcw,co"
 
index 623496c117564cdbc7f939dea4ff777e114212ac..5dc831955de5d6046c0fb41f9cb6e700991ca423 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/pgtable.h>
 #include <asm/signal.h>
 #include <asm/unistd.h>
+#include <asm/ldcw.h>
 #include <asm/thread_info.h>
 
 #include <linux/linkage.h>
 #endif
 
        .import         pa_tlb_lock,data
+       .macro  load_pa_tlb_lock reg
+#if __PA_LDCW_ALIGNMENT > 4
+       load32  PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
+       depi    0,31,__PA_LDCW_ALIGN_ORDER, \reg
+#else
+       load32  PA(pa_tlb_lock), \reg
+#endif
+       .endm
 
        /* space_to_prot macro creates a prot id from a space id */
 
        .macro          tlb_lock        spc,ptp,pte,tmp,tmp1,fault
 #ifdef CONFIG_SMP
        cmpib,COND(=),n 0,\spc,2f
-       load32          PA(pa_tlb_lock),\tmp
+       load_pa_tlb_lock \tmp
 1:     LDCW            0(\tmp),\tmp1
        cmpib,COND(=)   0,\tmp1,1b
        nop
        /* Release pa_tlb_lock lock. */
        .macro          tlb_unlock1     spc,tmp
 #ifdef CONFIG_SMP
-       load32          PA(pa_tlb_lock),\tmp
+       load_pa_tlb_lock \tmp
        tlb_unlock0     \spc,\tmp
 #endif
        .endm
index a4761b7724060ac6e84a023e4453b85843a249ac..16073f4721184be28c1c8136e6cf41c74c49d577 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/assembly.h>
 #include <asm/pgtable.h>
 #include <asm/cache.h>
+#include <asm/ldcw.h>
 #include <linux/linkage.h>
 
        .text
@@ -333,8 +334,12 @@ ENDPROC(flush_data_cache_local)
 
        .macro  tlb_lock        la,flags,tmp
 #ifdef CONFIG_SMP
-       ldil            L%pa_tlb_lock,%r1
-       ldo             R%pa_tlb_lock(%r1),\la
+#if __PA_LDCW_ALIGNMENT > 4
+       load32          pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
+       depi            0,31,__PA_LDCW_ALIGN_ORDER, \la
+#else
+       load32          pa_tlb_lock, \la
+#endif
        rsm             PSW_SM_I,\flags
 1:     LDCW            0(\la),\tmp
        cmpib,<>,n      0,\tmp,3f