From 3f6aa0b113846a8628baa649af422cfc6fb1d786 Mon Sep 17 00:00:00 2001 From: Mikael Pettersson Date: Mon, 15 Aug 2011 10:11:50 +0000 Subject: [PATCH] sparc32: unbreak arch_write_unlock() The sparc32 version of arch_write_unlock() is just a plain assignment. Unfortunately this allows the compiler to schedule side-effects in a protected region to occur after the HW-level unlock, which is broken. E.g., the following trivial test case gets miscompiled: #include rwlock_t lock; int counter; void foo(void) { write_lock(&lock); ++counter; write_unlock(&lock); } Fixed by adding a compiler memory barrier to arch_write_unlock(). The sparc64 version combines the barrier and assignment into a single asm(), and implements the operation as a static inline, so that's what I did too. Compile-tested with sparc32_defconfig + CONFIG_SMP=y. Signed-off-by: Mikael Pettersson Signed-off-by: David S. Miller --- arch/sparc/include/asm/spinlock_32.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 5f5b8bf3f50d..bcc98fc35281 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -131,6 +131,15 @@ static inline void arch_write_lock(arch_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } +static void inline arch_write_unlock(arch_rwlock_t *lock) +{ + __asm__ __volatile__( +" st %%g0, [%0]" + : /* no outputs */ + : "r" (lock) + : "memory"); +} + static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -175,8 +184,6 @@ static inline int __arch_read_trylock(arch_rwlock_t *rw) res; \ }) -#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) - #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -- 2.20.1