KVM: x86: Rework request for immediate exit
authorJan Kiszka <jan.kiszka@siemens.com>
Sun, 28 Apr 2013 08:50:52 +0000 (10:50 +0200)
committerGleb Natapov <gleb@redhat.com>
Sun, 28 Apr 2013 09:44:18 +0000 (12:44 +0300)
The VMX implementation of enable_irq_window raised
KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This
caused infinite loops on vmentry. Fix it by letting enable_irq_window
signal the need for an immediate exit via its return value and drop
KVM_REQ_IMMEDIATE_EXIT.

This issue only affects nested VMX scenarios.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h

index 18635ae42a8e2eaa1e265c2efc74bb5a2c651348..111b4a0c39076cb1239b5e5679f0fd61040ba2cd 100644 (file)
@@ -694,7 +694,7 @@ struct kvm_x86_ops {
        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
        void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
-       void (*enable_irq_window)(struct kvm_vcpu *vcpu);
+       int (*enable_irq_window)(struct kvm_vcpu *vcpu);
        void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
        int (*vm_has_apicv)(struct kvm *kvm);
        void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
index 15c9cccd716bace921bfd8c38309d1132d4f6f54..7f896cbe717f933ffd5f1d8be8470442823421fc 100644 (file)
@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
        return ret;
 }
 
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static int enable_irq_window(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
                svm_set_vintr(svm);
                svm_inject_irq(svm, 0x0);
        }
+       return 0;
 }
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
index 0f0cb3110626a4bb307233262a51c65dc4ca6a18..74c525e2c6085716202a5b1c3625b96b490baf31 100644 (file)
@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
                PIN_BASED_NMI_EXITING;
 }
 
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static int enable_irq_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
-       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
                /*
                 * We get here if vmx_interrupt_allowed() said we can't
-                * inject to L1 now because L2 must run. Ask L2 to exit
-                * right after entry, so we can inject to L1 more promptly.
+                * inject to L1 now because L2 must run. The caller will have
+                * to make L2 exit right after entry, so we can inject to L1
+                * more promptly.
                 */
-               kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
-               return;
-       }
+               return -EBUSY;
 
        cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+       return 0;
 }
 
 static void enable_nmi_window(struct kvm_vcpu *vcpu)
index 2a434bf3918d684d877689fd7978d4c6e14f16ea..c522260b5bbfa7b796a6ca1ebe0dbbbd16bf34ae 100644 (file)
@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        int r;
        bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
                vcpu->run->request_interrupt_window;
-       bool req_immediate_exit = 0;
+       bool req_immediate_exit = false;
 
        if (vcpu->requests) {
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        record_steal_time(vcpu);
                if (kvm_check_request(KVM_REQ_NMI, vcpu))
                        process_nmi(vcpu);
-               req_immediate_exit =
-                       kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
                if (kvm_check_request(KVM_REQ_PMU, vcpu))
                        kvm_handle_pmu_event(vcpu);
                if (kvm_check_request(KVM_REQ_PMI, vcpu))
@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (vcpu->arch.nmi_pending)
                        kvm_x86_ops->enable_nmi_window(vcpu);
                else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
-                       kvm_x86_ops->enable_irq_window(vcpu);
+                       req_immediate_exit =
+                               kvm_x86_ops->enable_irq_window(vcpu) != 0;
 
                if (kvm_lapic_enabled(vcpu)) {
                        /*
index 93a50054d46c6f4e94fcfbf6f8a2f1b02fab4ec8..7bde42470e3723c652bf76b6f707ceaa1465edc8 100644 (file)
@@ -119,14 +119,13 @@ static inline bool is_error_page(struct page *page)
 #define KVM_REQ_APF_HALT          12
 #define KVM_REQ_STEAL_UPDATE      13
 #define KVM_REQ_NMI               14
-#define KVM_REQ_IMMEDIATE_EXIT    15
-#define KVM_REQ_PMU               16
-#define KVM_REQ_PMI               17
-#define KVM_REQ_WATCHDOG          18
-#define KVM_REQ_MASTERCLOCK_UPDATE 19
-#define KVM_REQ_MCLOCK_INPROGRESS 20
-#define KVM_REQ_EPR_EXIT          21
-#define KVM_REQ_SCAN_IOAPIC       22
+#define KVM_REQ_PMU               15
+#define KVM_REQ_PMI               16
+#define KVM_REQ_WATCHDOG          17
+#define KVM_REQ_MASTERCLOCK_UPDATE 18
+#define KVM_REQ_MCLOCK_INPROGRESS 19
+#define KVM_REQ_EPR_EXIT          20
+#define KVM_REQ_SCAN_IOAPIC       21
 
 #define KVM_USERSPACE_IRQ_SOURCE_ID            0
 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID       1