[S390] percpu: implement arch specific irqsafe_cpu_ops
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Mon, 23 May 2011 08:24:32 +0000 (10:24 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 23 May 2011 08:24:29 +0000 (10:24 +0200)
Implement arch specific irqsafe_cpu ops. The arch specific ops do not
disable/enable interrupts since that is an expensive operation. Instead
we disable preemption and perform a compare and swap loop.
Since on server distros (the ones we care about) preemption is disabled
the preempt_disable()/preempt_enable() pair is a nop.
In the end this code should be faster than the generic one.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/cmpxchg.h
arch/s390/include/asm/percpu.h

index 7488e52efa97628481e95d6c0dca90e8ed30cd0a..81d7908416cf769202a40541d6a1779560712df5 100644 (file)
@@ -167,7 +167,6 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
 #ifdef CONFIG_64BIT
 #define cmpxchg64(ptr, o, n)                                           \
 ({                                                                     \
-       BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
        cmpxchg((ptr), (o), (n));                                       \
 })
 #else /* CONFIG_64BIT */
index f7ad8719d02dcd211bc18c39806d86a5dceb985a..5325c89a5843b52c73cd0ef36dd6cc67c1a10928 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef __ARCH_S390_PERCPU__
 #define __ARCH_S390_PERCPU__
 
+#include <linux/preempt.h>
+#include <asm/cmpxchg.h>
+
 /*
  * s390 uses its own implementation for per cpu data, the offset of
  * the cpu local data area is cached in the cpu's lowcore memory.
 #define ARCH_NEEDS_WEAK_PER_CPU
 #endif
 
+#define arch_irqsafe_cpu_to_op(pcp, val, op)                           \
+do {                                                                   \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ old__, new__, prev__;                                \
+       pcp_op_T__ *ptr__;                                              \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       prev__ = *ptr__;                                                \
+       do {                                                            \
+               old__ = prev__;                                         \
+               new__ = old__ op (val);                                 \
+               switch (sizeof(*ptr__)) {                               \
+               case 8:                                                 \
+                       prev__ = cmpxchg64(ptr__, old__, new__);        \
+                       break;                                          \
+               default:                                                \
+                       prev__ = cmpxchg(ptr__, old__, new__);          \
+               }                                                       \
+       } while (prev__ != old__);                                      \
+       preempt_enable();                                               \
+} while (0)
+
+#define irqsafe_cpu_add_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
+#define irqsafe_cpu_add_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
+#define irqsafe_cpu_add_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
+#define irqsafe_cpu_add_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, +)
+
+#define irqsafe_cpu_and_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
+#define irqsafe_cpu_and_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
+#define irqsafe_cpu_and_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
+#define irqsafe_cpu_and_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, &)
+
+#define irqsafe_cpu_or_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
+#define irqsafe_cpu_or_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
+#define irqsafe_cpu_or_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
+#define irqsafe_cpu_or_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, |)
+
+#define irqsafe_cpu_xor_1(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
+#define irqsafe_cpu_xor_2(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
+#define irqsafe_cpu_xor_4(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
+#define irqsafe_cpu_xor_8(pcp, val) arch_irqsafe_cpu_to_op(pcp, val, ^)
+
+#define arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)                      \
+({                                                                     \
+       typedef typeof(pcp) pcp_op_T__;                                 \
+       pcp_op_T__ ret__;                                               \
+       pcp_op_T__ *ptr__;                                              \
+       preempt_disable();                                              \
+       ptr__ = __this_cpu_ptr(&(pcp));                                 \
+       switch (sizeof(*ptr__)) {                                       \
+       case 8:                                                         \
+               ret__ = cmpxchg64(ptr__, oval, nval);                   \
+               break;                                                  \
+       default:                                                        \
+               ret__ = cmpxchg(ptr__, oval, nval);                     \
+       }                                                               \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) arch_irqsafe_cpu_cmpxchg(pcp, oval, nval)
+
 #include <asm-generic/percpu.h>
 
 #endif /* __ARCH_S390_PERCPU__ */