Get rid of ioapic_inj_irq() and ioapic_inj_nmi() functions.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
void kvm_sal_emul(struct kvm_vcpu *vcpu);
-static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
#endif /* __ASSEMBLY__*/
#endif
{
switch (dm) {
case SAPIC_FIXED:
- kvm_apic_set_irq(vcpu, vector, 0);
+ kvm_apic_set_irq(vcpu, vector, dm, 0);
break;
case SAPIC_NMI:
- kvm_apic_set_irq(vcpu, 2, 0);
+ kvm_apic_set_irq(vcpu, 2, dm, 0);
break;
case SAPIC_EXTINT:
- kvm_apic_set_irq(vcpu, 0, 0);
+ kvm_apic_set_irq(vcpu, 0, dm, 0);
break;
case SAPIC_INIT:
case SAPIC_PMI:
put_cpu();
}
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig)
{
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig);
#endif
}
EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
+ int vector, int level, int trig_mode);
+
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ int lapic_dmode;
- if (!apic_test_and_set_irr(vec, apic)) {
- /* a new pending irq is set in IRR */
- if (trig)
- apic_set_vector(vec, apic->regs + APIC_TMR);
- else
- apic_clear_vector(vec, apic->regs + APIC_TMR);
- kvm_vcpu_kick(apic->vcpu);
- return 1;
+ switch (dmode) {
+ case IOAPIC_LOWEST_PRIORITY:
+ lapic_dmode = APIC_DM_LOWEST;
+ break;
+ case IOAPIC_FIXED:
+ lapic_dmode = APIC_DM_FIXED;
+ break;
+ case IOAPIC_NMI:
+ lapic_dmode = APIC_DM_NMI;
+ break;
+ default:
+ printk(KERN_DEBUG"Ignoring delivery mode %d\n", dmode);
+ return 0;
+ break;
}
- return 0;
+ return __apic_accept_irq(apic, lapic_dmode, vec, 1, trig);
}
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode)
{
- int orig_irr, result = 0;
+ int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
switch (delivery_mode) {
if (unlikely(!apic_enabled(apic)))
break;
- orig_irr = apic_test_and_set_irr(vector, apic);
- if (orig_irr && trig_mode) {
- apic_debug("level trig mode repeatedly for vector %d",
- vector);
+ result = !apic_test_and_set_irr(vector, apic);
+ if (!result) {
+ if (trig_mode)
+ apic_debug("level trig mode repeatedly for "
+ "vector %d", vector);
break;
}
apic_set_vector(vector, apic->regs + APIC_TMR);
} else
apic_clear_vector(vector, apic->regs + APIC_TMR);
-
kvm_vcpu_kick(vcpu);
-
- result = (orig_irr == 0);
break;
case APIC_DM_REMRD:
break;
case APIC_DM_NMI:
+ result = 1;
kvm_inject_nmi(vcpu);
kvm_vcpu_kick(vcpu);
break;
case APIC_DM_INIT:
if (level) {
+ result = 1;
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
printk(KERN_DEBUG
"INIT on a runnable vcpu %d\n",
apic_debug("SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector);
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
+ result = 1;
vcpu->arch.sipi_vector = vector;
vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
kvm_vcpu_kick(vcpu);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 dmode, u8 trig);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
}
}
-static int ioapic_inj_irq(struct kvm_ioapic *ioapic,
- struct kvm_vcpu *vcpu,
- u8 vector, u8 trig_mode, u8 delivery_mode)
-{
- ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode,
- delivery_mode);
-
- ASSERT((delivery_mode == IOAPIC_FIXED) ||
- (delivery_mode == IOAPIC_LOWEST_PRIORITY));
-
- return kvm_apic_set_irq(vcpu, vector, trig_mode);
-}
-
-static void ioapic_inj_nmi(struct kvm_vcpu *vcpu)
-{
- kvm_inject_nmi(vcpu);
- kvm_vcpu_kick(vcpu);
-}
-
static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
{
union kvm_ioapic_redirect_entry entry = ioapic->redirtbl[irq];
__clear_bit(vcpu_id, deliver_bitmask);
vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu) {
- if (entry.fields.delivery_mode ==
- IOAPIC_LOWEST_PRIORITY ||
- entry.fields.delivery_mode == IOAPIC_FIXED) {
- if (r < 0)
- r = 0;
- r += ioapic_inj_irq(ioapic, vcpu,
- entry.fields.vector,
- entry.fields.trig_mode,
- entry.fields.delivery_mode);
- } else if (entry.fields.delivery_mode == IOAPIC_NMI) {
- r = 1;
- ioapic_inj_nmi(vcpu);
- } else
- ioapic_debug("unsupported delivery mode %x!\n",
- entry.fields.delivery_mode);
+ if (r < 0)
+ r = 0;
+ r += kvm_apic_set_irq(vcpu,
+ entry.fields.vector,
+ entry.fields.trig_mode,
+ entry.fields.delivery_mode);
} else
ioapic_debug("null destination vcpu: "
"mask=%x vector=%x delivery_mode=%x\n",
if (r < 0)
r = 0;
r += kvm_apic_set_irq(vcpu, entry.fields.vector,
+ entry.fields.dest_mode,
entry.fields.trig_mode);
}
}