x86/paravirt: Optimize native pv_lock_ops.vcpu_is_preempted()
authorPeter Zijlstra <peterz@infradead.org>
Tue, 15 Nov 2016 15:47:06 +0000 (16:47 +0100)
committerIngo Molnar <mingo@kernel.org>
Tue, 22 Nov 2016 11:48:11 +0000 (12:48 +0100)
Avoid the pointless function call to pv_lock_ops.vcpu_is_preempted()
when a paravirt spinlock enabled kernel is ran on native hardware.

Do this by patching out the CALL instruction with "XOR %RAX,%RAX"
which has the same effect (0 return value).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: David.Laight@ACULAB.COM
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Pan Xinhui <xinhui.pan@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: benh@kernel.crashing.org
Cc: boqun.feng@gmail.com
Cc: borntraeger@de.ibm.com
Cc: bsingharora@gmail.com
Cc: dave@stgolabs.net
Cc: jgross@suse.com
Cc: kernellwp@gmail.com
Cc: konrad.wilk@oracle.com
Cc: mpe@ellerman.id.au
Cc: paulmck@linux.vnet.ibm.com
Cc: paulus@samba.org
Cc: pbonzini@redhat.com
Cc: rkrcmar@redhat.com
Cc: will.deacon@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/qspinlock.h
arch/x86/include/asm/spinlock.h
arch/x86/kernel/kvm.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/xen/spinlock.c

index ce932812f142a731cc4b9e082899b59b07b7a850..6108b1fada2bf93216cffe105605f26d576e065f 100644 (file)
@@ -678,6 +678,11 @@ static __always_inline void pv_kick(int cpu)
        PVOP_VCALL1(pv_lock_ops.kick, cpu);
 }
 
+static __always_inline bool pv_vcpu_is_preempted(int cpu)
+{
+       return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
+}
+
 #endif /* SMP && PARAVIRT_SPINLOCKS */
 
 #ifdef CONFIG_X86_32
index 38c3bb74740f378fced83d0b0813149db6aba0fb..2614bd7a7839340a0d891683b32e1338662588d3 100644 (file)
@@ -311,7 +311,7 @@ struct pv_lock_ops {
        void (*wait)(u8 *ptr, u8 val);
        void (*kick)(int cpu);
 
-       bool (*vcpu_is_preempted)(int cpu);
+       struct paravirt_callee_save vcpu_is_preempted;
 };
 
 /* This contains all the paravirt structures: we get a convenient
index eaba0807603009e6ed1612a7049bb35bafeaf31e..c343ab52579ff87faa26b5e363995076ebca2cb0 100644 (file)
@@ -32,6 +32,12 @@ static inline void queued_spin_unlock(struct qspinlock *lock)
 {
        pv_queued_spin_unlock(lock);
 }
+
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+       return pv_vcpu_is_preempted(cpu);
+}
 #else
 static inline void queued_spin_unlock(struct qspinlock *lock)
 {
index 0526f596e399b493e63ab6df74e72c18ab82af71..921bea7a2708e2017f12184e09cde6203d6cf0e2 100644 (file)
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define vcpu_is_preempted vcpu_is_preempted
-static inline bool vcpu_is_preempted(int cpu)
-{
-       return pv_lock_ops.vcpu_is_preempted(cpu);
-}
-#endif
-
 #include <asm/qspinlock.h>
 
 /*
index 0b48dd2c35548e1deaf111e034ca6eaf4efb9b7d..52e90d6054fb5f9120067cef5b5a5e2e1b1540c0 100644 (file)
@@ -415,15 +415,6 @@ void kvm_disable_steal_time(void)
        wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
 }
 
-static bool kvm_vcpu_is_preempted(int cpu)
-{
-       struct kvm_steal_time *src;
-
-       src = &per_cpu(steal_time, cpu);
-
-       return !!src->preempted;
-}
-
 #ifdef CONFIG_SMP
 static void __init kvm_smp_prepare_boot_cpu(void)
 {
@@ -480,9 +471,6 @@ void __init kvm_guest_init(void)
        if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
                has_steal_clock = 1;
                pv_time_ops.steal_clock = kvm_steal_clock;
-#ifdef CONFIG_PARAVIRT_SPINLOCKS
-               pv_lock_ops.vcpu_is_preempted = kvm_vcpu_is_preempted;
-#endif
        }
 
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -604,6 +592,14 @@ out:
        local_irq_restore(flags);
 }
 
+__visible bool __kvm_vcpu_is_preempted(int cpu)
+{
+       struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
+
+       return !!src->preempted;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
+
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  */
@@ -620,6 +616,11 @@ void __init kvm_spinlock_init(void)
        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
        pv_lock_ops.wait = kvm_wait;
        pv_lock_ops.kick = kvm_kick_cpu;
+
+       if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+               pv_lock_ops.vcpu_is_preempted =
+                       PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
+       }
 }
 
 static __init int kvm_spinlock_init_jump(void)
index 2f204dd552a40c45812df4dd28e2394a0d764a25..6d4bf812af45d9ed144ee33faf9f906ed9e107c6 100644 (file)
@@ -12,7 +12,6 @@ __visible void __native_queued_spin_unlock(struct qspinlock *lock)
 {
        native_queued_spin_unlock(lock);
 }
-
 PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
 
 bool pv_is_native_spin_unlock(void)
@@ -21,9 +20,16 @@ bool pv_is_native_spin_unlock(void)
                __raw_callee_save___native_queued_spin_unlock;
 }
 
-static bool native_vcpu_is_preempted(int cpu)
+__visible bool __native_vcpu_is_preempted(int cpu)
+{
+       return false;
+}
+PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
+
+bool pv_is_native_vcpu_is_preempted(void)
 {
-       return 0;
+       return pv_lock_ops.vcpu_is_preempted.func ==
+               __raw_callee_save___native_vcpu_is_preempted;
 }
 
 struct pv_lock_ops pv_lock_ops = {
@@ -32,7 +38,7 @@ struct pv_lock_ops pv_lock_ops = {
        .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
        .wait = paravirt_nop,
        .kick = paravirt_nop,
-       .vcpu_is_preempted = native_vcpu_is_preempted,
+       .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
 #endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
index 920c6ae085923c5adda13d72593a4d566376035b..ff03dbd286251b254f9cf1d0d063ddc0489e8c4a 100644 (file)
@@ -12,6 +12,7 @@ DEF_NATIVE(pv_cpu_ops, clts, "clts");
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %eax, %eax");
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -27,6 +28,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 }
 
 extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
 
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
@@ -56,6 +58,12 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                                end   = end_pv_lock_ops_queued_spin_unlock;
                                goto patch_site;
                        }
+               case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+                       if (pv_is_native_vcpu_is_preempted()) {
+                               start = start_pv_lock_ops_vcpu_is_preempted;
+                               end   = end_pv_lock_ops_vcpu_is_preempted;
+                               goto patch_site;
+                       }
 #endif
 
        default:
index bb3840cedb4f00c0479b8e97844816a83091ed31..e61dd9791f4fd400dd61ebfd1195e29a0088963a 100644 (file)
@@ -21,6 +21,7 @@ DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
 #endif
 
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
@@ -36,6 +37,7 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 }
 
 extern bool pv_is_native_spin_unlock(void);
+extern bool pv_is_native_vcpu_is_preempted(void);
 
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
@@ -68,6 +70,12 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                                end   = end_pv_lock_ops_queued_spin_unlock;
                                goto patch_site;
                        }
+               case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
+                       if (pv_is_native_vcpu_is_preempted()) {
+                               start = start_pv_lock_ops_vcpu_is_preempted;
+                               end   = end_pv_lock_ops_vcpu_is_preempted;
+                               goto patch_site;
+                       }
 #endif
 
        default:
index 74756bbd28c4d2b51fa8c759a2966a3ca61526ce..e8a9ea7d7a211f5f98f9563de6dc13085185b852 100644 (file)
@@ -114,6 +114,8 @@ void xen_uninit_lock_cpu(int cpu)
        per_cpu(irq_name, cpu) = NULL;
 }
 
+PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
+
 /*
  * Our init of PV spinlocks is split in two init functions due to us
  * using paravirt patching and jump labels patching and having to do
@@ -136,8 +138,7 @@ void __init xen_init_spinlocks(void)
        pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
        pv_lock_ops.wait = xen_qlock_wait;
        pv_lock_ops.kick = xen_qlock_kick;
-
-       pv_lock_ops.vcpu_is_preempted = xen_vcpu_stolen;
+       pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
 }
 
 /*