x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Wed, 20 Jun 2018 17:58:37 +0000 (13:58 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:12:54 +0000 (18:12 +0200)
commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a upstream

There is no semantic change but this change allows an unbalanced amount of
MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
restore on VMEXIT or VMENTER may be different.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/vmx.c

index e7eedccc15f21a5b8c2d0d68a0f40ce913158148..36779522acd5411bfd66295d888aabe06d974b52 100644 (file)
@@ -618,6 +618,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
                        (unsigned long *)&pi_desc->control);
 }
 
+struct vmx_msrs {
+       unsigned int            nr;
+       struct vmx_msr_entry    val[NR_AUTOLOAD_MSRS];
+};
+
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        unsigned long         host_rsp;
@@ -651,9 +656,8 @@ struct vcpu_vmx {
        struct loaded_vmcs   *loaded_vmcs;
        bool                  __launched; /* temporary, used in vmx_vcpu_run */
        struct msr_autoload {
-               unsigned nr;
-               struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
-               struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+               struct vmx_msrs guest;
+               struct vmx_msrs host;
        } msr_autoload;
        struct {
                int           loaded;
@@ -2041,18 +2045,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
                }
                break;
        }
-
-       for (i = 0; i < m->nr; ++i)
-               if (m->guest[i].index == msr)
+       for (i = 0; i < m->guest.nr; ++i)
+               if (m->guest.val[i].index == msr)
                        break;
 
-       if (i == m->nr)
+       if (i == m->guest.nr)
                return;
-       --m->nr;
-       m->guest[i] = m->guest[m->nr];
-       m->host[i] = m->host[m->nr];
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       --m->guest.nr;
+       --m->host.nr;
+       m->guest.val[i] = m->guest.val[m->guest.nr];
+       m->host.val[i] = m->host.val[m->host.nr];
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
 }
 
 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -2104,24 +2108,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
        }
 
-       for (i = 0; i < m->nr; ++i)
-               if (m->guest[i].index == msr)
+       for (i = 0; i < m->guest.nr; ++i)
+               if (m->guest.val[i].index == msr)
                        break;
 
        if (i == NR_AUTOLOAD_MSRS) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
                return;
-       } else if (i == m->nr) {
-               ++m->nr;
-               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
-               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       } else if (i == m->guest.nr) {
+               ++m->guest.nr;
+               ++m->host.nr;
+               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
        }
 
-       m->guest[i].index = msr;
-       m->guest[i].value = guest_val;
-       m->host[i].index = msr;
-       m->host[i].value = host_val;
+       m->guest.val[i].index = msr;
+       m->guest.val[i].value = guest_val;
+       m->host.val[i].index = msr;
+       m->host.val[i].value = host_val;
 }
 
 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
@@ -5765,9 +5770,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
-       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
        vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
-       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
 
        if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
                vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -10901,10 +10906,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
         * Set the MSR load/store lists to match L0's settings.
         */
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
-       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
-       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
 
        /*
         * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
@@ -11842,8 +11847,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
        vmx_segment_cache_clear(vmx);
 
        /* Update any VMCS fields that might have changed while L2 ran */
-       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
-       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
        vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
        if (vmx->hv_deadline_tsc == -1)
                vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,