From 6541d98d114ef4ccf923c02ae665262450b3966c Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sun, 12 Aug 2018 16:31:17 -0400 Subject: [PATCH] parisc: Remove unnecessary barriers from spinlock.h MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit commit 3b885ac1dc35b87a39ee176a6c7e2af9c789d8b8 upstream. Now that mb() is an instruction barrier, it will slow performance if we issue unnecessary barriers. The spinlock defines have a number of unnecessary barriers.  The __ldcw() define is both a hardware and compiler barrier.  The mb() barriers in the routines using __ldcw() serve no purpose. The only barrier needed is the one in arch_spin_unlock().  We need to ensure all accesses are complete prior to releasing the lock. Signed-off-by: John David Anglin Cc: stable@vger.kernel.org # 4.0+ Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- arch/parisc/include/asm/spinlock.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index af03359e6ac5..a82776592c8e 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, { volatile unsigned int *a; - mb(); a = __ldcw_align(x); while (__ldcw(a) == 0) while (*a == 0) @@ -30,16 +29,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x, local_irq_disable(); } else cpu_relax(); - mb(); } static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; - mb(); + a = __ldcw_align(x); - *a = 1; mb(); + *a = 1; } static inline int arch_spin_trylock(arch_spinlock_t *x) @@ -47,10 +45,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x) volatile unsigned int *a; int ret; - mb(); a = __ldcw_align(x); ret = __ldcw(a) != 0; - mb(); return ret; } -- 2.20.1