static inline int __atomic_cmpxchg(int *ptr, int old, int new)
{
- asm volatile(
- " cs %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new) : "cc", "memory");
- return old;
+ return __sync_val_compare_and_swap(ptr, old, new);
+}
+
+static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
+{
+ return __sync_bool_compare_and_swap(ptr, old, new);
}
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
{
- asm volatile(
- " csg %[old],%[new],%[ptr]"
- : [old] "+d" (old), [ptr] "+Q" (*ptr)
- : [new] "d" (new) : "cc", "memory");
- return old;
+ return __sync_val_compare_and_swap(ptr, old, new);
+}
+
+static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
+{
+ return __sync_bool_compare_and_swap(ptr, old, new);
}
#endif /* __ARCH_S390_ATOMIC_OPS__ */
#define __ASM_SPINLOCK_H
#include <linux/smp.h>
+#include <asm/atomic_ops.h>
#include <asm/barrier.h>
#include <asm/processor.h>
extern int spin_retry;
-static inline int
-_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
-{
- return __sync_bool_compare_and_swap(lock, old, new);
-}
-
#ifndef CONFIG_SMP
static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
#else
* (the type definitions are in asm/spinlock_types.h)
*/
-void arch_lock_relax(unsigned int cpu);
+void arch_lock_relax(int cpu);
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
{
barrier();
return likely(arch_spin_value_unlocked(*lp) &&
- _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL));
+ __atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
- typecheck(unsigned int, lp->lock);
+ typecheck(int, lp->lock);
asm volatile(
"st %1,%0\n"
: "+Q" (lp->lock)
static inline int arch_read_trylock_once(arch_rwlock_t *rw)
{
- unsigned int old = ACCESS_ONCE(rw->lock);
- return likely((int) old >= 0 &&
- _raw_compare_and_swap(&rw->lock, old, old + 1));
+ int old = ACCESS_ONCE(rw->lock);
+ return likely(old >= 0 &&
+ __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
}
static inline int arch_write_trylock_once(arch_rwlock_t *rw)
{
- unsigned int old = ACCESS_ONCE(rw->lock);
+ int old = ACCESS_ONCE(rw->lock);
return likely(old == 0 &&
- _raw_compare_and_swap(&rw->lock, 0, 0x80000000));
+ __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
}
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __RAW_LOCK(ptr, op_val, op_string) \
({ \
- unsigned int old_val; \
+ int old_val; \
\
- typecheck(unsigned int *, ptr); \
+ typecheck(int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
"bcr 14,0\n" \
#define __RAW_UNLOCK(ptr, op_val, op_string) \
({ \
- unsigned int old_val; \
+ int old_val; \
\
- typecheck(unsigned int *, ptr); \
+ typecheck(int *, ptr); \
asm volatile( \
op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (*ptr) \
})
extern void _raw_read_lock_wait(arch_rwlock_t *lp);
-extern void _raw_write_lock_wait(arch_rwlock_t *lp, unsigned int prev);
+extern void _raw_write_lock_wait(arch_rwlock_t *lp, int prev);
static inline void arch_read_lock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
old = __RAW_LOCK(&rw->lock, 1, __RAW_OP_ADD);
- if ((int) old < 0)
+ if (old < 0)
_raw_read_lock_wait(rw);
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
old = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
if (old != 0)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
- unsigned int old;
+ int old;
do {
old = ACCESS_ONCE(rw->lock);
- } while (!_raw_compare_and_swap(&rw->lock, old, old - 1));
+ } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
}
static inline void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- typecheck(unsigned int, rw->lock);
+ typecheck(int, rw->lock);
rw->owner = 0;
asm volatile(
}
__setup("spin_retry=", spin_retry_setup);
-static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
+static inline void compare_and_delay(int *lock, int old)
{
asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
}
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
- unsigned int cpu = SPINLOCK_LOCKVAL;
- unsigned int owner;
- int count, first_diag;
+ int cpu = SPINLOCK_LOCKVAL;
+ int owner, count, first_diag;
first_diag = 1;
while (1) {
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
- if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return;
continue;
}
count = spin_retry;
do {
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&lp->lock, owner);
+ compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
- unsigned int cpu = SPINLOCK_LOCKVAL;
- unsigned int owner;
- int count, first_diag;
+ int cpu = SPINLOCK_LOCKVAL;
+ int owner, count, first_diag;
local_irq_restore(flags);
first_diag = 1;
/* Try to get the lock if it is free. */
if (!owner) {
local_irq_disable();
- if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
continue;
count = spin_retry;
do {
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&lp->lock, owner);
+ compare_and_delay(&lp->lock, owner);
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
- unsigned int cpu = SPINLOCK_LOCKVAL;
- unsigned int owner;
- int count;
+ int cpu = SPINLOCK_LOCKVAL;
+ int owner, count;
for (count = spin_retry; count > 0; count--) {
owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
- if (_raw_compare_and_swap(&lp->lock, 0, cpu))
+ if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
return 1;
} else if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&lp->lock, owner);
+ compare_and_delay(&lp->lock, owner);
}
return 0;
}
void _raw_read_lock_wait(arch_rwlock_t *rw)
{
- unsigned int owner, old;
int count = spin_retry;
+ int owner, old;
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
__RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
- if ((int) old < 0) {
+ if (old < 0) {
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&rw->lock, old);
+ compare_and_delay(&rw->lock, old);
continue;
}
- if (_raw_compare_and_swap(&rw->lock, old, old + 1))
+ if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
return;
}
}
int _raw_read_trylock_retry(arch_rwlock_t *rw)
{
- unsigned int old;
int count = spin_retry;
+ int old;
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
- if ((int) old < 0) {
+ if (old < 0) {
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&rw->lock, old);
+ compare_and_delay(&rw->lock, old);
continue;
}
- if (_raw_compare_and_swap(&rw->lock, old, old + 1))
+ if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
return 1;
}
return 0;
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
-void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
+void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
{
- unsigned int owner, old;
int count = spin_retry;
+ int owner, old;
owner = 0;
while (1) {
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
smp_mb();
- if ((int) old >= 0) {
+ if (old >= 0) {
prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
old = prev;
}
- if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ if ((old & 0x7fffffff) == 0 && prev >= 0)
break;
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&rw->lock, old);
+ compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
void _raw_write_lock_wait(arch_rwlock_t *rw)
{
- unsigned int owner, old, prev;
int count = spin_retry;
+ int owner, old, prev;
prev = 0x80000000;
owner = 0;
}
old = ACCESS_ONCE(rw->lock);
owner = ACCESS_ONCE(rw->owner);
- if ((int) old >= 0 &&
- _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
+ if (old >= 0 &&
+ __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
prev = old;
else
smp_mb();
- if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
+ if ((old & 0x7fffffff) == 0 && prev >= 0)
break;
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&rw->lock, old);
+ compare_and_delay(&rw->lock, old);
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
int _raw_write_trylock_retry(arch_rwlock_t *rw)
{
- unsigned int old;
int count = spin_retry;
+ int old;
while (count-- > 0) {
old = ACCESS_ONCE(rw->lock);
if (old) {
if (MACHINE_HAS_CAD)
- _raw_compare_and_delay(&rw->lock, old);
+ compare_and_delay(&rw->lock, old);
continue;
}
- if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
+ if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
return 1;
}
return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
-void arch_lock_relax(unsigned int cpu)
+void arch_lock_relax(int cpu)
{
if (!cpu)
return;