KVM: x86: preparatory changes for APICv cleanups
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 19 Dec 2016 12:05:46 +0000 (13:05 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 15 Feb 2017 13:54:34 +0000 (14:54 +0100)
Add return value to __kvm_apic_update_irr/kvm_apic_update_irr.
Move vmx_sync_pir_to_irr around.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/vmx.c

index 8ddd0ed0388088c6b80abfbb050ed3e0f26e4da7..120afc2bcfd303d52951784898b4479ccd5e985c 100644 (file)
@@ -341,7 +341,7 @@ static int find_highest_vector(void *bitmap)
             vec >= 0; vec -= APIC_VECTORS_PER_REG) {
                reg = bitmap + REG_POS(vec);
                if (*reg)
-                       return fls(*reg) - 1 + vec;
+                       return __fls(*reg) + vec;
        }
 
        return -1;
@@ -361,27 +361,36 @@ static u8 count_vectors(void *bitmap)
        return count;
 }
 
-void __kvm_apic_update_irr(u32 *pir, void *regs)
+int __kvm_apic_update_irr(u32 *pir, void *regs)
 {
-       u32 i, pir_val;
+       u32 i, vec;
+       u32 pir_val, irr_val;
+       int max_irr = -1;
 
-       for (i = 0; i <= 7; i++) {
+       for (i = vec = 0; i <= 7; i++, vec += 32) {
                pir_val = READ_ONCE(pir[i]);
+               irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
                if (pir_val) {
-                       pir_val = xchg(&pir[i], 0);
-                       *((u32 *)(regs + APIC_IRR + i * 0x10)) |= pir_val;
+                       irr_val |= xchg(&pir[i], 0);
+                       *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
                }
+               if (irr_val)
+                       max_irr = __fls(irr_val) + vec;
        }
+
+       return max_irr;
 }
 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
 
-void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
+int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
+       int max_irr;
 
-       __kvm_apic_update_irr(pir, apic->regs);
+       max_irr = __kvm_apic_update_irr(pir, apic->regs);
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
+       return max_irr;
 }
 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
 
index 05abd837b78a385f37c758f3e2ca6008f91de473..bcbe811f3b97f1d8b576a24e7fefca0b4fe477dd 100644 (file)
@@ -71,8 +71,8 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
                           int short_hand, unsigned int dest, int dest_mode);
 
-void __kvm_apic_update_irr(u32 *pir, void *regs);
-void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
+int __kvm_apic_update_irr(u32 *pir, void *regs);
+int kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
                     struct dest_map *dest_map);
index 8d2e0cc8e83e974261d78c4fa741fabaee9a94ce..4ac9b484e2443f5869b254b56cabc32f26f2f2da 100644 (file)
@@ -5057,22 +5057,6 @@ static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
                kvm_vcpu_kick(vcpu);
 }
 
-static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-       if (!pi_test_on(&vmx->pi_desc))
-               return;
-
-       pi_clear_on(&vmx->pi_desc);
-       /*
-        * IOMMU can write to PIR.ON, so the barrier matters even on UP.
-        * But on x86 this is just a compiler barrier anyway.
-        */
-       smp_mb__after_atomic();
-       kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
-}
-
 /*
  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
  * will not change in the lifetime of the guest.
@@ -8738,6 +8722,22 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
        }
 }
 
+static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!pi_test_on(&vmx->pi_desc))
+               return;
+
+       pi_clear_on(&vmx->pi_desc);
+       /*
+        * IOMMU can write to PIR.ON, so the barrier matters even on UP.
+        * But on x86 this is just a compiler barrier anyway.
+        */
+       smp_mb__after_atomic();
+       kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
+}
+
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
 {
        if (!kvm_vcpu_apicv_active(vcpu))