unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
+ spinlock_t tsc_write_lock;
struct kvm_xen_hvm_config xen_hvm_config;
bool (*has_wbinvd_exit)(void);
+ void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+
const struct trace_print_flags *exit_reasons_str;
};
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
- svm_write_tsc_offset(&svm->vcpu, 0-native_read_tsc());
+ kvm_write_tsc(&svm->vcpu, 0);
err = fx_init(&svm->vcpu);
if (err)
switch (ecx) {
case MSR_IA32_TSC:
- svm_write_tsc_offset(vcpu, data - native_read_tsc());
+ kvm_write_tsc(vcpu, data);
break;
case MSR_STAR:
svm->vmcb->save.star = data;
.set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit,
+
+ .write_tsc_offset = svm_write_tsc_offset,
};
static int __init svm_init(void)
}
/*
- * writes 'guest_tsc' into guest's timestamp counter "register"
- * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
+ * writes 'offset' into guest's timestamp counter offset register
*/
-static void vmx_write_tsc_offset(u64 offset)
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
vmcs_write64(TSC_OFFSET, offset);
}
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
- u64 host_tsc;
int ret = 0;
switch (msr_index) {
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_TSC:
- rdtscll(host_tsc);
- vmx_write_tsc_offset(data - host_tsc);
+ kvm_write_tsc(vcpu, data);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
- vmx_write_tsc_offset(0-native_read_tsc());
+ kvm_write_tsc(&vmx->vcpu, 0);
return 0;
}
.set_supported_cpuid = vmx_set_supported_cpuid,
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
+
+ .write_tsc_offset = vmx_write_tsc_offset,
};
static int __init vmx_init(void)
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 offset;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+ offset = data - native_read_tsc();
+ kvm_x86_ops->write_tsc_offset(vcpu, offset);
+ spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+
+ /* Reset of TSC must disable overshoot protection below */
+ vcpu->arch.hv_clock.tsc_timestamp = 0;
+}
+EXPORT_SYMBOL_GPL(kvm_write_tsc);
+
static void kvm_write_guest_time(struct kvm_vcpu *v)
{
struct timespec ts;
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
+ spin_lock_init(&kvm->arch.tsc_write_lock);
+
return kvm;
}
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
+void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data);
+
#endif