KVM: VMX: Add the deliver posted interrupt algorithm
authorYang Zhang <yang.z.zhang@Intel.com>
Thu, 11 Apr 2013 11:25:15 +0000 (19:25 +0800)
committerMarcelo Tosatti <mtosatti@redhat.com>
Tue, 16 Apr 2013 19:32:40 +0000 (16:32 -0300)
Only deliver the posted interrupt when target vcpu is running
and there is no previous interrupt pending in pir.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
virt/kvm/kvm_main.c

index 68d438630dd3fa74288820a824fef30b88078528..599f98b612d400870c7e3488f2bd339166f6ef2b 100644 (file)
@@ -701,6 +701,8 @@ struct kvm_x86_ops {
        void (*hwapic_isr_update)(struct kvm *kvm, int isr);
        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
        void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
+       void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+       void (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
        int (*get_tdp_level)(void);
        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
index d197579435d00bf8e78703d4e454e05f8b4b1cbe..dbf74c922aa13510d00e80deecaeb07a00e7965b 100644 (file)
@@ -318,6 +318,19 @@ static u8 count_vectors(void *bitmap)
        return count;
 }
 
+void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+{
+       u32 i, pir_val;
+       struct kvm_lapic *apic = vcpu->arch.apic;
+
+       for (i = 0; i <= 7; i++) {
+               pir_val = xchg(&pir[i], 0);
+               if (pir_val)
+                       *((u32 *)(apic->regs + APIC_IRR + i * 0x10)) |= pir_val;
+       }
+}
+EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
+
 static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
 {
        apic->irr_pending = true;
index 7fe0c9180ea1f962947e72e23b5f003728a64d40..c730ac9fe80188d15957bf9b556918036db892ab 100644 (file)
@@ -54,6 +54,7 @@ u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu);
 void kvm_apic_set_version(struct kvm_vcpu *vcpu);
 
 void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr);
+void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
index 2f8fe3f06837a3145de35fde3a790272158904fd..d6713e18bbc1a168dac938f72babf56521099d89 100644 (file)
@@ -3577,6 +3577,11 @@ static void svm_hwapic_isr_update(struct kvm *kvm, int isr)
        return;
 }
 
+static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -4305,6 +4310,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .vm_has_apicv = svm_vm_has_apicv,
        .load_eoi_exitmap = svm_load_eoi_exitmap,
        .hwapic_isr_update = svm_hwapic_isr_update,
+       .sync_pir_to_irr = svm_sync_pir_to_irr,
 
        .set_tss_addr = svm_set_tss_addr,
        .get_tdp_level = get_npt_level,
index 8535519d0352a6cf4a0179050709c24c9b80ea08..3a14d8a0ee462853d86448fd824d7cc56de2fa7a 100644 (file)
@@ -375,6 +375,23 @@ struct pi_desc {
        u32 rsvd[7];
 } __aligned(64);
 
+static bool pi_test_and_set_on(struct pi_desc *pi_desc)
+{
+       return test_and_set_bit(POSTED_INTR_ON,
+                       (unsigned long *)&pi_desc->control);
+}
+
+static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
+{
+       return test_and_clear_bit(POSTED_INTR_ON,
+                       (unsigned long *)&pi_desc->control);
+}
+
+static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+{
+       return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
+}
+
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        unsigned long         host_rsp;
@@ -639,6 +656,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
 static bool guest_state_valid(struct kvm_vcpu *vcpu);
 static u32 vmx_segment_access_rights(struct kvm_segment *var);
+static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -2846,8 +2864,11 @@ static __init int hardware_setup(void)
 
        if (enable_apicv)
                kvm_x86_ops->update_cr8_intercept = NULL;
-       else
+       else {
                kvm_x86_ops->hwapic_irr_update = NULL;
+               kvm_x86_ops->deliver_posted_interrupt = NULL;
+               kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
+       }
 
        if (nested)
                nested_vmx_setup_ctls_msrs();
@@ -3908,6 +3929,45 @@ static int vmx_vm_has_apicv(struct kvm *kvm)
        return enable_apicv && irqchip_in_kernel(kvm);
 }
 
+/*
+ * Send interrupt to vcpu via posted interrupt way.
+ * 1. If target vcpu is running(non-root mode), send posted interrupt
+ * notification to vcpu and hardware will sync PIR to vIRR atomically.
+ * 2. If target vcpu isn't running(root mode), kick it to pick up the
+ * interrupt from PIR in next vmentry.
+ */
+static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int r;
+
+       if (pi_test_and_set_pir(vector, &vmx->pi_desc))
+               return;
+
+       r = pi_test_and_set_on(&vmx->pi_desc);
+       kvm_make_request(KVM_REQ_EVENT, vcpu);
+       if (!r && (vcpu->mode == IN_GUEST_MODE))
+               apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
+                               POSTED_INTR_VECTOR);
+       else
+               kvm_vcpu_kick(vcpu);
+}
+
+static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!pi_test_and_clear_on(&vmx->pi_desc))
+               return;
+
+       kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
+}
+
+static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
+{
+       return;
+}
+
 /*
  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
  * will not change in the lifetime of the guest.
@@ -7784,6 +7844,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .load_eoi_exitmap = vmx_load_eoi_exitmap,
        .hwapic_irr_update = vmx_hwapic_irr_update,
        .hwapic_isr_update = vmx_hwapic_isr_update,
+       .sync_pir_to_irr = vmx_sync_pir_to_irr,
+       .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
 
        .set_tss_addr = vmx_set_tss_addr,
        .get_tdp_level = get_ept_level,
index 65a9e0716a8d4b9333e5f950c3558b1bded6f27f..aaac1a7a9ea853bc4990feaf60c1bbf17815260b 100644 (file)
@@ -1671,6 +1671,7 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
                        smp_send_reschedule(cpu);
        put_cpu();
 }
+EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
 #endif /* !CONFIG_S390 */
 
 void kvm_resched(struct kvm_vcpu *vcpu)