locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests
authorPan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Wed, 2 Nov 2016 09:08:33 +0000 (05:08 -0400)
committerIngo Molnar <mingo@kernel.org>
Tue, 22 Nov 2016 11:48:07 +0000 (12:48 +0100)
Optimize spinlock and mutex busy-loops by providing a vcpu_is_preempted(cpu)
function on KVM and Xen platforms.

Extend the pv_lock_ops interface accordingly and implement the callbacks
on KVM and Xen.

Signed-off-by: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
[ Translated to English. ]
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: benh@kernel.crashing.org
Cc: boqun.feng@gmail.com
Cc: borntraeger@de.ibm.com
Cc: bsingharora@gmail.com
Cc: dave@stgolabs.net
Cc: jgross@suse.com
Cc: kernellwp@gmail.com
Cc: konrad.wilk@oracle.com
Cc: linuxppc-dev@lists.ozlabs.org
Cc: mpe@ellerman.id.au
Cc: paulmck@linux.vnet.ibm.com
Cc: paulus@samba.org
Cc: rkrcmar@redhat.com
Cc: virtualization@lists.linux-foundation.org
Cc: will.deacon@arm.com
Cc: xen-devel-request@lists.xenproject.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1478077718-37424-7-git-send-email-xinhui.pan@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/spinlock.h
arch/x86/kernel/paravirt-spinlocks.c

index 0f400c0e49795a78ed26fc3230fe6277a443aa48..38c3bb74740f378fced83d0b0813149db6aba0fb 100644 (file)
@@ -310,6 +310,8 @@ struct pv_lock_ops {
 
        void (*wait)(u8 *ptr, u8 val);
        void (*kick)(int cpu);
+
+       bool (*vcpu_is_preempted)(int cpu);
 };
 
 /* This contains all the paravirt structures: we get a convenient
index 921bea7a2708e2017f12184e09cde6203d6cf0e2..0526f596e399b493e63ab6df74e72c18ab82af71 100644 (file)
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+       return pv_lock_ops.vcpu_is_preempted(cpu);
+}
+#endif
+
 #include <asm/qspinlock.h>
 
 /*
index 2c55a003b79318668d6806d891d3817ce8409130..2f204dd552a40c45812df4dd28e2394a0d764a25 100644 (file)
@@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void)
                __raw_callee_save___native_queued_spin_unlock;
 }
 
+static bool native_vcpu_is_preempted(int cpu)
+{
+       return 0;
+}
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
        .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
        .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
        .wait = paravirt_nop,
        .kick = paravirt_nop,
+       .vcpu_is_preempted = native_vcpu_is_preempted,
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);