KVM: x86: Validate guest writes to MSR_IA32_APICBASE
authorJan Kiszka <jan.kiszka@siemens.com>
Fri, 24 Jan 2014 15:48:44 +0000 (16:48 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 27 Jan 2014 13:39:44 +0000 (14:39 +0100)
Check for invalid state transitions on guest-initiated updates of
MSR_IA32_APICBASE. This address both enabling of the x2APIC when it is
not supported and all invalid transitions as described in SDM section
10.12.5. It also checks that no reserved bit is set in APICBASE by the
guest.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
[Use cpuid_maxphyaddr instead of guest_cpuid_get_phys_bits. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/cpuid.h
arch/x86/kvm/lapic.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index f1e4895174b2472da123b8b74cbee2460b239ccf..a2a1bb7ed8c1b32677db62d4f5bf4303cd1cb9b2 100644 (file)
@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
        return best && (best->ecx & bit(X86_FEATURE_PCID));
 }
 
+static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 1, 0);
+       return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+}
+
 #endif
index c8b0d0d2da5ce2d67f9342fba000e60e1aec84eb..6a11845fd8b94435a383823a1e559ba153dbaaca 100644 (file)
@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
                struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
 
 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
                struct kvm_lapic_state *s);
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
index 5c8879127cfa8dbd0327768b9068680d75657285..a06f101ef64b4ae43e954319bbaa81a5c24c326b 100644 (file)
@@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       u64 msr;
+       struct msr_data apic_base_msr;
 
        vmx->rmode.vm86_active = 0;
 
@@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
        kvm_set_cr8(&vmx->vcpu, 0);
-       msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+       apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
        if (kvm_vcpu_is_bsp(&vmx->vcpu))
-               msr |= MSR_IA32_APICBASE_BSP;
-       kvm_set_apic_base(&vmx->vcpu, msr);
+               apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
+       apic_base_msr.host_initiated = true;
+       kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
 
        vmx_segment_cache_clear(vmx);
 
index dc11b4f98577149c2a811a7dfaa946c4e98f33c3..34d0d610aa8a07e8265bb2101a3e90b14446cc88 100644 (file)
@@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
 
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
-{
-       /* TODO: reserve bits check */
-       kvm_lapic_set_base(vcpu, data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+       u64 old_state = vcpu->arch.apic_base &
+               (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+       u64 new_state = msr_info->data &
+               (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+       u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
+               0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+
+       if (!msr_info->host_initiated &&
+           ((msr_info->data & reserved_bits) != 0 ||
+            new_state == X2APIC_ENABLE ||
+            (new_state == MSR_IA32_APICBASE_ENABLE &&
+             old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
+            (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
+             old_state == 0)))
+               return 1;
+
+       kvm_lapic_set_base(vcpu, msr_info->data);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
@@ -2009,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case 0x200 ... 0x2ff:
                return set_msr_mtrr(vcpu, msr, data);
        case MSR_IA32_APICBASE:
-               kvm_set_apic_base(vcpu, data);
-               break;
+               return kvm_set_apic_base(vcpu, msr_info);
        case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
                return kvm_x2apic_msr_write(vcpu, msr, data);
        case MSR_IA32_TSCDEADLINE:
@@ -6412,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
+       struct msr_data apic_base_msr;
        int mmu_reset_needed = 0;
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
@@ -6435,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
-       kvm_set_apic_base(vcpu, sregs->apic_base);
+       apic_base_msr.data = sregs->apic_base;
+       apic_base_msr.host_initiated = true;
+       kvm_set_apic_base(vcpu, &apic_base_msr);
 
        mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
        kvm_x86_ops->set_cr0(vcpu, sregs->cr0);