Although the lock_spinning calls in the spinlock code are on the
uncommon path, their presence can cause the compiler to generate many
more register save/restores in the function pre/postamble, which is in
the fast path. To avoid this, convert it to using the pvops callee-save
calling convention, which defers all the save/restores until the actual
function is called, keeping the fastpath clean.
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Link: http://lkml.kernel.org/r/1376058122-8248-8-git-send-email-raghavendra.kt@linux.vnet.ibm.com
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Tested-by: Attilio Rao <attilio.rao@citrix.com>
Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
__ticket_t ticket)
{
- PVOP_VCALL2(pv_lock_ops.lock_spinning, lock, ticket);
+ PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}
static __always_inline void ____ticket_unlock_kick(struct arch_spinlock *lock,
#endif
struct pv_lock_ops {
- void (*lock_spinning)(struct arch_spinlock *lock, __ticket_t ticket);
+ struct paravirt_callee_save lock_spinning;
void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
};
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
- .lock_spinning = paravirt_nop,
+ .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
.unlock_kick = paravirt_nop,
#endif
};
local_irq_restore(flags);
spin_time_accum_blocked(start);
}
+PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
{
return;
}
- pv_lock_ops.lock_spinning = xen_lock_spinning;
+ pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
pv_lock_ops.unlock_kick = xen_unlock_kick;
}