kvm: x86: add tsc_offset field to struct kvm_vcpu_arch
authorLuiz Capitulino <lcapitulino@redhat.com>
Wed, 7 Sep 2016 18:47:19 +0000 (14:47 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 16 Sep 2016 14:57:45 +0000 (16:57 +0200)
A future commit will want to easily read a vCPU's TSC offset,
so we store it in struct kvm_arch_vcpu_arch for easy access.

Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index cd82bf74e7a551ad3954498b03af7e1cec8021b1..ddffcfbe155cd1cc6d38f2c69ed34dfed1c06a88 100644 (file)
@@ -568,6 +568,7 @@ struct kvm_vcpu_arch {
                struct kvm_steal_time steal;
        } st;
 
+       u64 tsc_offset;
        u64 last_guest_tsc;
        u64 last_host_tsc;
        u64 tsc_offset_adjustment;
index 57ffe78931042c15f35d2e3a977cacd1f43333c1..9a2cc696702a18d63e3c53cfedd4365b6f4f7c51 100644 (file)
@@ -1413,6 +1413,12 @@ u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+{
+       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+       vcpu->arch.tsc_offset = offset;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1522,7 +1528,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 
        if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
                update_ia32_tsc_adjust_msr(vcpu, offset);
-       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+       kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
        spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
@@ -2750,7 +2756,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (check_tsc_unstable()) {
                        u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
-                       kvm_x86_ops->write_tsc_offset(vcpu, offset);
+                       kvm_vcpu_write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;
                }
                /*