KVM: x86: clean up kvm_arch_vcpu_runnable
authorPaolo Bonzini <pbonzini@redhat.com>
Tue, 13 Oct 2015 08:18:53 +0000 (10:18 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 13 Oct 2015 16:28:59 +0000 (18:28 +0200)
Split the huge conditional in two functions.

Fixes: 64d6067057d9658acb8675afcfba549abdb7fc16
Cc: stable@vger.kernel.org
Reviewed-by: Radim Krčmář <rkrcmar@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index 3ac33f86c8737c7f32c85885626a0b532ea6cfe6..b69ef58e51eefce0b609bb9eb37a61ad9cbd07f8 100644 (file)
@@ -6453,6 +6453,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
+               !vcpu->arch.apf.halted);
+}
+
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
@@ -6461,8 +6467,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
 
        for (;;) {
-               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-                   !vcpu->arch.apf.halted)
+               if (kvm_vcpu_running(vcpu))
                        r = vcpu_enter_guest(vcpu);
                else
                        r = vcpu_block(kvm, vcpu);
@@ -7762,19 +7767,33 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        kvm_mmu_invalidate_zap_all_pages(kvm);
 }
 
+static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+{
+       if (!list_empty_careful(&vcpu->async_pf.done))
+               return true;
+
+       if (kvm_apic_has_events(vcpu))
+               return true;
+
+       if (vcpu->arch.pv.pv_unhalted)
+               return true;
+
+       if (atomic_read(&vcpu->arch.nmi_queued))
+               return true;
+
+       if (kvm_arch_interrupt_allowed(vcpu) &&
+           kvm_cpu_has_interrupt(vcpu))
+               return true;
+
+       return false;
+}
+
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
                kvm_x86_ops->check_nested_events(vcpu, false);
 
-       return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
-               !vcpu->arch.apf.halted)
-               || !list_empty_careful(&vcpu->async_pf.done)
-               || kvm_apic_has_events(vcpu)
-               || vcpu->arch.pv.pv_unhalted
-               || atomic_read(&vcpu->arch.nmi_queued) ||
-               (kvm_arch_interrupt_allowed(vcpu) &&
-                kvm_cpu_has_interrupt(vcpu));
+       return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)