commit
fedb8da96355f5f64353625bf96dc69423ad1826 upstream.
For years I thought all parisc machines executed loads and stores in
order. However, Jeff Law recently indicated on gcc-patches that this is
not correct. There are various degrees of out-of-order execution all the
way back to the PA7xxx processor series (hit-under-miss). The PA8xxx
series has full out-of-order execution for both integer operations, and
loads and stores.
This is described in the following article:
http://web.archive.org/web/
20040214092531/http://www.cpus.hp.com/technical_references/advperf.shtml
For this reason, we need to define mb() and to insert a memory barrier
before the store unlocking spinlocks. This ensures that all memory
accesses are complete prior to unlocking. The ldcw instruction performs
the same function on entry.
Signed-off-by: John David Anglin <dave.anglin@bell.net>
Cc: stable@vger.kernel.org # 4.0+
Signed-off-by: Helge Deller <deller@gmx.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+ which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb() do { synchronize_caches(); } while (0)
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() mb()
+#define __smp_rmb() mb()
+#define __smp_wmb() mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
/* Release pa_tlb_lock lock without reloading lock address. */
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
+ or,COND(=) %r0,\spc,%r0
+ sync
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
+ sync
stw \tmp,0(\la)
mtsm \flags
#endif
sub,<> %r28, %r25, %r0
2: stw,ma %r24, 0(%r26)
/* Free lock */
+ sync
stw,ma %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
3:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
cas2_end:
/* Free lock */
+ sync
stw,ma %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
22:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28