return 0;
}
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /* do real check here */
+ return 1;
+}
+
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return vcpu->arch.timer_fired;
return !!(v->arch.pending_exceptions);
}
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /* do real check here */
+ return 1;
+}
+
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !(v->arch.msr & MSR_WE);
return rc;
}
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ /* do real check here */
+ return 1;
+}
+
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return 0;
void (*inject_pending_irq)(struct kvm_vcpu *vcpu);
void (*inject_pending_vectors)(struct kvm_vcpu *vcpu,
struct kvm_run *run);
-
+ int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
int (*get_mt_mask_shift)(void);
vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
}
+static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb *vmcb = svm->vmcb;
+ return (vmcb->save.rflags & X86_EFLAGS_IF) &&
+ !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+ (svm->vcpu.arch.hflags & HF_GIF_MASK);
+}
+
static void svm_intr_assist(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
.exception_injected = svm_exception_injected,
.inject_pending_irq = svm_intr_assist,
.inject_pending_vectors = do_interrupt_requests,
+ .interrupt_allowed = svm_interrupt_allowed,
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
GUEST_INTR_STATE_MOV_SS)));
}
+static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ vmx_update_window_states(vcpu);
+ return vcpu->arch.interrupt_window_open;
+}
+
static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
.exception_injected = vmx_exception_injected,
.inject_pending_irq = vmx_intr_assist,
.inject_pending_vectors = do_interrupt_requests,
-
+ .interrupt_allowed = vmx_interrupt_allowed,
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
.get_mt_mask_shift = vmx_get_mt_mask_shift,
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
put_cpu();
}
+
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
+{
+ return kvm_x86_ops->interrupt_allowed(vcpu);
+}
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
- if (kvm_cpu_has_interrupt(vcpu) ||
+ if ((kvm_arch_interrupt_allowed(vcpu) &&
+ kvm_cpu_has_interrupt(vcpu)) ||
kvm_arch_vcpu_runnable(vcpu)) {
set_bit(KVM_REQ_UNHALT, &vcpu->requests);
break;