KVM: x86: Replace call-back compute_tsc_offset() with a common function
authorHaozhong Zhang <haozhong.zhang@intel.com>
Tue, 20 Oct 2015 07:39:05 +0000 (15:39 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 10 Nov 2015 11:06:16 +0000 (12:06 +0100)
Both VMX and SVM calculate the tsc-offset in the same way, so this
patch removes the call-back compute_tsc_offset() and replaces it with a
common function kvm_compute_tsc_offset().

Signed-off-by: Haozhong Zhang <haozhong.zhang@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index c5a3f3d66e905cf38cdaa744d6c4f04acceeb368..672f960e81449478215865357cf28b1f072f6ffb 100644 (file)
@@ -856,7 +856,6 @@ struct kvm_x86_ops {
        u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
-       u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
        u64 (*read_l1_tsc)(struct kvm_vcpu *vcpu, u64 host_tsc);
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
index f6e49a6c9ab0f76da2ac71453e35113d3885da32..d99b175ffbeaf62bcdd7720d5464d34a8e4348da 100644 (file)
@@ -1004,15 +1004,6 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-       u64 tsc;
-
-       tsc = kvm_scale_tsc(vcpu, rdtsc());
-
-       return target_tsc - tsc;
-}
-
 static void init_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4370,7 +4361,6 @@ static struct kvm_x86_ops svm_x86_ops = {
        .read_tsc_offset = svm_read_tsc_offset,
        .write_tsc_offset = svm_write_tsc_offset,
        .adjust_tsc_offset = svm_adjust_tsc_offset,
-       .compute_tsc_offset = svm_compute_tsc_offset,
        .read_l1_tsc = svm_read_l1_tsc,
 
        .set_tdp_cr3 = set_tdp_cr3,
index baee468938997656c0194f19c41019ccfc67771b..2d4782ce9a9378b00e526d7cacd3b3ce43ce6602 100644 (file)
@@ -2426,11 +2426,6 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool ho
                                           offset + adjustment);
 }
 
-static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
-{
-       return target_tsc - rdtsc();
-}
-
 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
@@ -10813,7 +10808,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .read_tsc_offset = vmx_read_tsc_offset,
        .write_tsc_offset = vmx_write_tsc_offset,
        .adjust_tsc_offset = vmx_adjust_tsc_offset,
-       .compute_tsc_offset = vmx_compute_tsc_offset,
        .read_l1_tsc = vmx_read_l1_tsc,
 
        .set_tdp_cr3 = vmx_set_cr3,
index c314e8d22a671f80e8f2323137478eb507101d00..bb46066e125b95ad65e226a673631624b8de0b4f 100644 (file)
@@ -1392,6 +1392,15 @@ u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
 }
 EXPORT_SYMBOL_GPL(kvm_scale_tsc);
 
+static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       u64 tsc;
+
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
+
+       return target_tsc - tsc;
+}
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -1403,7 +1412,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        u64 data = msr->data;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+       offset = kvm_compute_tsc_offset(vcpu, data);
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
@@ -1460,7 +1469,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                } else {
                        u64 delta = nsec_to_cycles(vcpu, elapsed);
                        data += delta;
-                       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
+                       offset = kvm_compute_tsc_offset(vcpu, data);
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }
                matched = true;
@@ -2687,7 +2696,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                if (tsc_delta < 0)
                        mark_tsc_unstable("KVM discovered backwards TSC");
                if (check_tsc_unstable()) {
-                       u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
+                       u64 offset = kvm_compute_tsc_offset(vcpu,
                                                vcpu->arch.last_guest_tsc);
                        kvm_x86_ops->write_tsc_offset(vcpu, offset);
                        vcpu->arch.tsc_catchup = 1;