x86/speculation: Use ARCH_CAPABILITIES to skip L1D flush on vmentry
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 5 Aug 2018 14:07:46 +0000 (16:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:13:00 +0000 (18:13 +0200)
commit 8e0b2b916662e09dd4d09e5271cdf214c6b80e62 upstream

Bit 3 of ARCH_CAPABILITIES tells a hypervisor that L1D flush on vmentry is
not needed.  Add a new value to enum vmx_l1d_flush_state, which is used
either if there is no L1TF bug at all, or if bit 3 is set in ARCH_CAPABILITIES.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/vmx.c

index 2cccec4ab1c9b5220cddc570c1a00c6efed20f1c..ef7eec669a1bcc5d1ff80a50920b4e74ba137e3b 100644 (file)
@@ -70,6 +70,7 @@
 #define MSR_IA32_ARCH_CAPABILITIES     0x0000010a
 #define ARCH_CAP_RDCL_NO               (1 << 0)   /* Not susceptible to Meltdown */
 #define ARCH_CAP_IBRS_ALL              (1 << 1)   /* Enhanced IBRS support */
+#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3)   /* Skip L1D flush on vmentry */
 #define ARCH_CAP_SSB_NO                        (1 << 4)   /*
                                                    * Not susceptible to Speculative Store Bypass
                                                    * attack, so no Speculative Store Bypass
index 7b049281716978abc073f44d5f4b24411bfff74e..08c14aec26acaecb7ed96f04cd87da8a0d123116 100644 (file)
@@ -577,6 +577,7 @@ enum vmx_l1d_flush_state {
        VMENTER_L1D_FLUSH_COND,
        VMENTER_L1D_FLUSH_ALWAYS,
        VMENTER_L1D_FLUSH_EPT_DISABLED,
+       VMENTER_L1D_FLUSH_NOT_REQUIRED,
 };
 
 extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
index 6e6cd933d56c509d3b22b407b11ebe237f4803f2..64a72b4a780a85de4ce99583e69bd219d5e417bc 100644 (file)
@@ -731,6 +731,7 @@ static const char *l1tf_vmx_states[] = {
        [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
        [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
        [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
+       [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
 };
 
 static ssize_t l1tf_show_state(char *buf)
index bbaa5421c2370e1d1d54b222cec84cff43ca112c..63ec03108b5de0bfab3072d360e54ae12c0f3ce9 100644 (file)
@@ -221,6 +221,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
                return 0;
        }
 
+       if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
+              u64 msr;
+
+              rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
+              if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
+                      l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+                      return 0;
+              }
+       }
+
        /* If set to auto use the default l1tf mitigation method */
        if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
                switch (l1tf_mitigation) {