KVM: X86: Let kvm-clock report the right tsc frequency
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 25 Mar 2011 08:44:47 +0000 (09:44 +0100)
committerAvi Kivity <avi@redhat.com>
Wed, 11 May 2011 11:57:04 +0000 (07:57 -0400)
This patch changes the kvm_guest_time_update function to use
TSC frequency the guest actually has for updating its clock.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/x86.c

index ecdc562ea3e215561b70b8304a599e7113b4a363..e3aaa02ca032d30d51a1dee43ca7bcf60bf46a7b 100644 (file)
@@ -396,7 +396,10 @@ struct kvm_vcpu_arch {
        u64 last_kernel_ns;
        u64 last_tsc_nsec;
        u64 last_tsc_write;
+       u32 virtual_tsc_khz;
        bool tsc_catchup;
+       u32  tsc_catchup_mult;
+       s8   tsc_catchup_shift;
 
        bool nmi_pending;
        bool nmi_injected;
@@ -466,9 +469,6 @@ struct kvm_arch {
        u64 last_tsc_nsec;
        u64 last_tsc_offset;
        u64 last_tsc_write;
-       u32 virtual_tsc_khz;
-       u32 virtual_tsc_mult;
-       s8 virtual_tsc_shift;
 
        struct kvm_xen_hvm_config xen_hvm_config;
 
index 0d6524fa2affb114b22be4e98b4c36405ed33f0f..78d729174d999010c509639bf28823bfd1755d95 100644 (file)
@@ -969,6 +969,14 @@ static inline int kvm_tsc_changes_freq(void)
        return ret;
 }
 
+static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.virtual_tsc_khz)
+               return vcpu->arch.virtual_tsc_khz;
+       else
+               return __this_cpu_read(cpu_tsc_khz);
+}
+
 static inline u64 nsec_to_cycles(u64 nsec)
 {
        u64 ret;
@@ -982,20 +990,19 @@ static inline u64 nsec_to_cycles(u64 nsec)
        return ret;
 }
 
-static void kvm_arch_set_tsc_khz(struct kvm *kvm, u32 this_tsc_khz)
+static void kvm_init_tsc_catchup(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
 {
        /* Compute a scale to convert nanoseconds in TSC cycles */
        kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
-                          &kvm->arch.virtual_tsc_shift,
-                          &kvm->arch.virtual_tsc_mult);
-       kvm->arch.virtual_tsc_khz = this_tsc_khz;
+                          &vcpu->arch.tsc_catchup_shift,
+                          &vcpu->arch.tsc_catchup_mult);
 }
 
 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
 {
        u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec,
-                                     vcpu->kvm->arch.virtual_tsc_mult,
-                                     vcpu->kvm->arch.virtual_tsc_shift);
+                                     vcpu->arch.tsc_catchup_mult,
+                                     vcpu->arch.tsc_catchup_shift);
        tsc += vcpu->arch.last_tsc_write;
        return tsc;
 }
@@ -1062,8 +1069,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
-       this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
-
+       this_tsc_khz = vcpu_tsc_khz(v);
        if (unlikely(this_tsc_khz == 0)) {
                local_irq_restore(flags);
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
@@ -6060,8 +6066,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        }
        vcpu->arch.pio_data = page_address(page);
 
-       if (!kvm->arch.virtual_tsc_khz)
-               kvm_arch_set_tsc_khz(kvm, max_tsc_khz);
+       kvm_init_tsc_catchup(vcpu, max_tsc_khz);
 
        r = kvm_mmu_create(vcpu);
        if (r < 0)