kvm/x86: added hyper-v crash msrs into kvm hyperv context
authorAndrey Smetanin <asmetanin@virtuozzo.com>
Fri, 3 Jul 2015 12:01:37 +0000 (15:01 +0300)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 23 Jul 2015 06:27:06 +0000 (08:27 +0200)
Added kvm Hyper-V context hv crash variables as storage
of Hyper-V crash msrs.

Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com>
Signed-off-by: Denis V. Lunev <den@openvz.org>
Reviewed-by: Peter Hornyack <peterhornyack@google.com>
CC: Paolo Bonzini <pbonzini@redhat.com>
CC: Gleb Natapov <gleb@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/hyperv.h
arch/x86/kvm/x86.c

index 24168822212bd1141e68f85fcfd6c113b67fff89..fa32b5314dcdad94d377b59f1b5233f2a55a1f32 100644 (file)
@@ -595,6 +595,10 @@ struct kvm_hv {
        u64 hv_guest_os_id;
        u64 hv_hypercall;
        u64 hv_tsc_page;
+
+       /* Hyper-v based guest crash (NT kernel bugcheck) parameters */
+       u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
+       u64 hv_crash_ctl;
 };
 
 struct kvm_arch {
index 2b49f100a698ac6a1077a465568f623908fc1c58..a8160d2ae362c5dc1018785b6b1e18c598e2a6cc 100644 (file)
@@ -39,6 +39,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
        case HV_X64_MSR_HYPERCALL:
        case HV_X64_MSR_REFERENCE_TSC:
        case HV_X64_MSR_TIME_REF_COUNT:
+       case HV_X64_MSR_CRASH_CTL:
+       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
                r = true;
                break;
        }
@@ -46,7 +48,63 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
        return r;
 }
 
-static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
+                                    u32 index, u64 *pdata)
+{
+       struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+               return -EINVAL;
+
+       *pdata = hv->hv_crash_param[index];
+       return 0;
+}
+
+static int kvm_hv_msr_get_crash_ctl(struct kvm_vcpu *vcpu, u64 *pdata)
+{
+       struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+       *pdata = hv->hv_crash_ctl;
+       return 0;
+}
+
+static int kvm_hv_msr_set_crash_ctl(struct kvm_vcpu *vcpu, u64 data, bool host)
+{
+       struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+       if (host)
+               hv->hv_crash_ctl = data & HV_X64_MSR_CRASH_CTL_NOTIFY;
+
+       if (!host && (data & HV_X64_MSR_CRASH_CTL_NOTIFY)) {
+
+               vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
+                         hv->hv_crash_param[0],
+                         hv->hv_crash_param[1],
+                         hv->hv_crash_param[2],
+                         hv->hv_crash_param[3],
+                         hv->hv_crash_param[4]);
+
+               /* Send notification about crash to user space */
+               kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
+       }
+
+       return 0;
+}
+
+static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
+                                    u32 index, u64 data)
+{
+       struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+
+       if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
+               return -EINVAL;
+
+       hv->hv_crash_param[index] = data;
+       return 0;
+}
+
+static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
+                            bool host)
 {
        struct kvm *kvm = vcpu->kvm;
        struct kvm_hv *hv = &kvm->arch.hyperv;
@@ -99,6 +157,12 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                mark_page_dirty(kvm, gfn);
                break;
        }
+       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+               return kvm_hv_msr_set_crash_data(vcpu,
+                                                msr - HV_X64_MSR_CRASH_P0,
+                                                data);
+       case HV_X64_MSR_CRASH_CTL:
+               return kvm_hv_msr_set_crash_ctl(vcpu, data, host);
        default:
                vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
                            msr, data);
@@ -171,6 +235,12 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case HV_X64_MSR_REFERENCE_TSC:
                data = hv->hv_tsc_page;
                break;
+       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+               return kvm_hv_msr_get_crash_data(vcpu,
+                                                msr - HV_X64_MSR_CRASH_P0,
+                                                pdata);
+       case HV_X64_MSR_CRASH_CTL:
+               return kvm_hv_msr_get_crash_ctl(vcpu, pdata);
        default:
                vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
                return 1;
@@ -215,13 +285,13 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        return 0;
 }
 
-int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 {
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
                mutex_lock(&vcpu->kvm->lock);
-               r = kvm_hv_set_msr_pw(vcpu, msr, data);
+               r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
                mutex_unlock(&vcpu->kvm->lock);
                return r;
        } else
index 115c738ccbe3ff28efbb2505fee9efee00b4231d..c7bce559f67b3e90044ea2c96b00280f9c978ebe 100644 (file)
@@ -24,7 +24,7 @@
 #ifndef __ARCH_X86_KVM_HYPERV_H__
 #define __ARCH_X86_KVM_HYPERV_H__
 
-int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);
+int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_hv_hypercall_enabled(struct kvm *kvm);
 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
index d53b10744fba72239cb903bfb8f1b55724d8faab..cfa3e5a7d6bef5ca3e37428c54d091f134fce376 100644 (file)
@@ -950,6 +950,8 @@ static u32 emulated_msrs[] = {
        MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
        HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
        HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
+       HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
+       HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
        HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
        MSR_KVM_PV_EOI_EN,
 
@@ -2103,7 +2105,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                 */
                break;
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
-               return kvm_hv_set_msr_common(vcpu, msr, data);
+       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+       case HV_X64_MSR_CRASH_CTL:
+               return kvm_hv_set_msr_common(vcpu, msr, data,
+                                            msr_info->host_initiated);
        case MSR_IA32_BBL_CR_CTL3:
                /* Drop writes to this legacy MSR -- see rdmsr
                 * counterpart for further detail.
@@ -2302,6 +2307,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                msr_info->data = 0x20000000;
                break;
        case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+       case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
+       case HV_X64_MSR_CRASH_CTL:
                return kvm_hv_get_msr_common(vcpu,
                                             msr_info->index, &msr_info->data);
                break;