At the vector level, kernel and userspace irqchip are fairly similar.
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
#include "irq.h"
#include "i8254.h"
+#include "x86.h"
/*
* check if there are pending timer events
{
struct kvm_pic *s;
+ if (!irqchip_in_kernel(v->kvm))
+ return v->arch.irq_summary;
+
if (kvm_apic_has_interrupt(v) == -1) { /* LAPIC */
if (kvm_apic_accept_pic_intr(v)) {
s = pic_irqchip(v->kvm); /* PIC */
struct kvm_pic *s;
int vector;
+ if (!irqchip_in_kernel(v->kvm))
+ return kvm_pop_irq(v);
+
vector = kvm_get_apic_interrupt(v); /* APIC */
if (vector == -1) {
if (kvm_apic_accept_pic_intr(v)) {
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
- if (kvm_run->request_interrupt_window &&
- !svm->vcpu.arch.irq_summary) {
+ if (!irqchip_in_kernel(svm->vcpu.kvm) &&
+ kvm_run->request_interrupt_window &&
+ !kvm_cpu_has_interrupt(&svm->vcpu)) {
++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
(svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
(svm->vcpu.arch.hflags & HF_GIF_MASK));
- if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
+ if (svm->vcpu.arch.interrupt_window_open &&
+ kvm_cpu_has_interrupt(&svm->vcpu))
/*
* If interrupts enabled, and not blocked by sti or mov ss. Good.
*/
* Interrupts blocked. Wait for unblock.
*/
if (!svm->vcpu.arch.interrupt_window_open &&
- (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
+ (kvm_cpu_has_interrupt(&svm->vcpu) ||
+ kvm_run->request_interrupt_window))
svm_set_vintr(svm);
else
svm_clear_vintr(svm);
vmx_inject_nmi(vcpu);
if (vcpu->arch.nmi_pending)
enable_nmi_window(vcpu);
- else if (vcpu->arch.irq_summary
- || kvm_run->request_interrupt_window)
+ else if (kvm_cpu_has_interrupt(vcpu) ||
+ kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
return;
}
if (vcpu->arch.interrupt_window_open) {
- if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
- kvm_queue_interrupt(vcpu, kvm_pop_irq(vcpu));
+ if (kvm_cpu_has_interrupt(vcpu) && !vcpu->arch.interrupt.pending)
+ kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
if (vcpu->arch.interrupt.pending)
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
- }
- if (!vcpu->arch.interrupt_window_open &&
- (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
+ } else if(kvm_cpu_has_interrupt(vcpu) ||
+ kvm_run->request_interrupt_window)
enable_irq_window(vcpu);
}
* If the user space waits to inject interrupts, exit as soon as
* possible
*/
- if (kvm_run->request_interrupt_window &&
- !vcpu->arch.irq_summary) {
+ if (!irqchip_in_kernel(vcpu->kvm) &&
+ kvm_run->request_interrupt_window &&
+ !kvm_cpu_has_interrupt(vcpu)) {
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0;
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
- return (!vcpu->arch.irq_summary &&
+ return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
kvm_run->request_interrupt_window &&
vcpu->arch.interrupt_window_open &&
(kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
else
kvm_run->ready_for_interrupt_injection =
(vcpu->arch.interrupt_window_open &&
- vcpu->arch.irq_summary == 0);
+ !kvm_cpu_has_interrupt(vcpu));
}
static void vapic_enter(struct kvm_vcpu *vcpu)