void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
-void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
+void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
#endif
start_apic_timer(apic);
}
-void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct hrtimer *timer;
if (hrtimer_cancel(timer))
hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
}
-EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
if (unlikely(r))
goto out;
- if (vcpu->requests)
+ if (vcpu->requests) {
+ if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
+ __kvm_migrate_apic_timer(vcpu);
if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
&vcpu->requests)) {
kvm_run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
+ }
kvm_inject_pending_timer_irqs(vcpu);
* vcpu->requests bit members
*/
#define KVM_REQ_TLB_FLUSH 0
+#define KVM_REQ_MIGRATE_TIMER 1
#define KVM_REQ_REPORT_TPR_ACCESS 2
struct kvm_vcpu;
return (gpa_t)gfn << PAGE_SHIFT;
}
+static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+{
+ set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
+}
+
enum kvm_stat_kind {
KVM_STAT_VM,
KVM_STAT_VCPU,