arch/sparc: Enable queued spinlock support for SPARC
authorBabu Moger <babu.moger@oracle.com>
Wed, 24 May 2017 23:55:15 +0000 (17:55 -0600)
committerDavid S. Miller <davem@davemloft.net>
Thu, 25 May 2017 19:06:52 +0000 (12:06 -0700)
This patch makes the necessary changes in SPARC architecture to enable
queued spinlock support. Here are some of the earlier discussions about
this feature.
https://lwn.net/Articles/561775/
https://lwn.net/Articles/590243/

Cleaned-up the spinlock_64.h. The definitions of arch_spin_xxx are
replaced by the function in <asm-generic/qspinlock.h>

Signed-off-by: Babu Moger <babu.moger@oracle.com>
Reviewed-by: HÃ¥kon Bugge <haakon.bugge@oracle.com>
Reviewed-by: Jane Chu <jane.chu@oracle.com>
Reviewed-by: Shannon Nelson <shannon.nelson@oracle.com>
Reviewed-by: Vijay Kumar <vijay.ac.kumar@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/Kconfig
arch/sparc/include/asm/qspinlock.h [new file with mode: 0644]
arch/sparc/include/asm/spinlock_64.h
arch/sparc/include/asm/spinlock_types.h

index 0e80652075961afa99d155b214a0f9c9db5c9b97..78af684f63b94065288b2a2efbbd0151beac4d73 100644 (file)
@@ -84,6 +84,7 @@ config SPARC64
        select HAVE_NMI
        select HAVE_REGS_AND_STACK_ACCESS_API
        select ARCH_USE_QUEUED_RWLOCKS
+       select ARCH_USE_QUEUED_SPINLOCKS
 
 config ARCH_DEFCONFIG
        string
diff --git a/arch/sparc/include/asm/qspinlock.h b/arch/sparc/include/asm/qspinlock.h
new file mode 100644 (file)
index 0000000..5ae9a28
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _ASM_SPARC_QSPINLOCK_H
+#define _ASM_SPARC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_SPARC_QSPINLOCK_H */
index 8901c2d4ada9aae37985aea085126c365cc734e5..f7028f5e1a5a3bff399a7a02aa4655f7cad8e012 100644 (file)
 #include <asm/processor.h>
 #include <asm/barrier.h>
 #include <asm/qrwlock.h>
-
-/* To get debugging spinlocks which detect and catch
- * deadlock situations, set CONFIG_DEBUG_SPINLOCK
- * and rebuild your kernel.
- */
-
-/* Because we play games to save cycles in the non-contention case, we
- * need to be extra careful about branch targets into the "spinning"
- * code.  They live in their own section, but the newer V9 branches
- * have a shorter range than the traditional 32-bit sparc branch
- * variants.  The rule is that the branches that go into and out of
- * the spinner sections must be pre-V9 branches.
- */
-
-#define arch_spin_is_locked(lp)        ((lp)->lock != 0)
-
-static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
-{
-       smp_cond_load_acquire(&lock->lock, !VAL);
-}
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       unsigned long tmp;
-
-       __asm__ __volatile__(
-"1:    ldstub          [%1], %0\n"
-"      brnz,pn         %0, 2f\n"
-"       nop\n"
-"      .subsection     2\n"
-"2:    ldub            [%1], %0\n"
-"      brnz,pt         %0, 2b\n"
-"       nop\n"
-"      ba,a,pt         %%xcc, 1b\n"
-"      .previous"
-       : "=&r" (tmp)
-       : "r" (lock)
-       : "memory");
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-       unsigned long result;
-
-       __asm__ __volatile__(
-"      ldstub          [%1], %0\n"
-       : "=r" (result)
-       : "r" (lock)
-       : "memory");
-
-       return (result == 0UL);
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       __asm__ __volatile__(
-"      stb             %%g0, [%0]"
-       : /* No outputs */
-       : "r" (lock)
-       : "memory");
-}
-
-static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
-       unsigned long tmp1, tmp2;
-
-       __asm__ __volatile__(
-"1:    ldstub          [%2], %0\n"
-"      brnz,pn         %0, 2f\n"
-"       nop\n"
-"      .subsection     2\n"
-"2:    rdpr            %%pil, %1\n"
-"      wrpr            %3, %%pil\n"
-"3:    ldub            [%2], %0\n"
-"      brnz,pt         %0, 3b\n"
-"       nop\n"
-"      ba,pt           %%xcc, 1b\n"
-"       wrpr           %1, %%pil\n"
-"      .previous"
-       : "=&r" (tmp1), "=&r" (tmp2)
-       : "r"(lock), "r"(flags)
-       : "memory");
-}
+#include <asm/qspinlock.h>
 
 #define arch_read_lock_flags(p, f) arch_read_lock(p)
 #define arch_write_lock_flags(p, f) arch_write_lock(p)
index 64fce21b23df48e651a7666aefac9b2dd8182089..bce8ef44dfa99bce688e936c8c9d74c0c215b3e6 100644 (file)
@@ -1,11 +1,16 @@
 #ifndef __SPARC_SPINLOCK_TYPES_H
 #define __SPARC_SPINLOCK_TYPES_H
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
+
 typedef struct {
        volatile unsigned char lock;
 } arch_spinlock_t;
 
 #define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #ifdef CONFIG_QUEUED_RWLOCKS
 #include <asm-generic/qrwlock_types.h>