Use lowcore constant to improve the code generated for spinlocks.
[ Martin Schwidefsky: patch breakdown and code beautification ]
Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
__u32 percpu_offset; /* 0x02f0 */
__u32 machine_flags; /* 0x02f4 */
__u32 ftrace_func; /* 0x02f8 */
- __u8 pad_0x02fc[0x0300-0x02fc]; /* 0x02fc */
+ __u32 spinlock_lockval; /* 0x02fc */
/* Interrupt response block */
__u8 irb[64]; /* 0x0300 */
__u64 machine_flags; /* 0x0388 */
__u64 ftrace_func; /* 0x0390 */
__u64 gmap; /* 0x0398 */
- __u8 pad_0x03a0[0x0400-0x03a0]; /* 0x03a0 */
+ __u32 spinlock_lockval; /* 0x03a0 */
+ __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
/* Interrupt response block. */
__u8 irb[64]; /* 0x0400 */
#include <linux/smp.h>
+#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
+
extern int spin_retry;
static inline int
void arch_spin_relax(arch_spinlock_t *);
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
+static inline u32 arch_spin_lockval(int cpu)
+{
+ return ~cpu;
+}
+
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
{
- unsigned int new = ~smp_processor_id();
-
- return _raw_compare_and_swap(&lp->lock, 0, new);
+ return _raw_compare_and_swap(&lp->lock, 0, SPINLOCK_LOCKVAL);
}
static inline int arch_spin_tryrelease_once(arch_spinlock_t *lp)
{
- unsigned int old = ~smp_processor_id();
-
- return _raw_compare_and_swap(&lp->lock, old, 0);
+ return _raw_compare_and_swap(&lp->lock, SPINLOCK_LOCKVAL, 0);
}
static inline void arch_spin_lock(arch_spinlock_t *lp)
mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
+#ifdef CONFIG_SMP
+ lc->spinlock_lockval = arch_spin_lockval(0);
+#endif
+
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
}
lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
- STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->cpu_nr = cpu;
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
atomic_inc(&init_mm.context.attach_count);
lc->cpu_nr = cpu;
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->percpu_offset = __per_cpu_offset[cpu];
lc->kernel_asce = S390_lowcore.kernel_asce;
lc->machine_flags = S390_lowcore.machine_flags;
void __init smp_setup_processor_id(void)
{
S390_lowcore.cpu_nr = 0;
+ S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
}
/*
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
while (1) {
void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
{
int count = spin_retry;
- unsigned int cpu = ~smp_processor_id();
+ unsigned int cpu = SPINLOCK_LOCKVAL;
unsigned int owner;
local_irq_restore(flags);