KVM: VMX: Prevent guest RSB poisoning attacks with eIBRS
authorJosh Poimboeuf <jpoimboe@kernel.org>
Thu, 27 Oct 2022 20:55:30 +0000 (13:55 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 1 Nov 2022 18:14:45 +0000 (19:14 +0100)
commit fc02735b14fff8c6678b521d324ade27b1a3d4cf upstream.

On eIBRS systems, the returns in the vmexit return path from
__vmx_vcpu_run() to vmx_vcpu_run() are exposed to RSB poisoning attacks.

Fix that by moving the post-vmexit spec_ctrl handling to immediately
after the vmexit.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
[ bp: Adjust for the fact that vmexit is in inline assembly ]
Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/include/asm/nospec-branch.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kvm/vmx.c

index c7cbad1ec03474c7079bc1d2a4b52a2f8cb0ba0e..2d6d5bac49976721e0a0e6316b180fb9d1c12357 100644 (file)
@@ -257,7 +257,7 @@ extern char __indirect_thunk_end[];
  * retpoline and IBRS mitigations for Spectre v2 need this; only on future
  * CPUs with IBRS_ALL *might* it be avoided.
  */
-static inline void vmexit_fill_RSB(void)
+static __always_inline void vmexit_fill_RSB(void)
 {
 #ifdef CONFIG_RETPOLINE
        unsigned long loops;
@@ -292,6 +292,7 @@ static inline void indirect_branch_prediction_barrier(void)
 
 /* The Intel SPEC CTRL MSR base value cache */
 extern u64 x86_spec_ctrl_base;
+extern u64 x86_spec_ctrl_current;
 extern void write_spec_ctrl_current(u64 val, bool force);
 extern u64 spec_ctrl_current(void);
 
index 5f805013b7f47b7f3b475c545acde80ea667202a..1fde42e5be6e15d4ed6b2a8bbd73831012e73450 100644 (file)
@@ -185,6 +185,10 @@ void __init check_bugs(void)
 #endif
 }
 
+/*
+ * NOTE: For VMX, this function is not called in the vmexit path.
+ * It uses vmx_spec_ctrl_restore_host() instead.
+ */
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
index 48b40e160e271a3290de1f121cdd5f3e3906590c..539720a8e09419c07e922f4bdeb031f8c2873b09 100644 (file)
@@ -9770,10 +9770,31 @@ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
        vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, delta_tsc);
 }
 
+u64 __always_inline vmx_spec_ctrl_restore_host(struct vcpu_vmx *vmx)
+{
+       u64 guestval, hostval = this_cpu_read(x86_spec_ctrl_current);
+
+       if (!cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL))
+               return 0;
+
+       guestval = __rdmsr(MSR_IA32_SPEC_CTRL);
+
+       /*
+        * If the guest/host SPEC_CTRL values differ, restore the host value.
+        */
+       if (guestval != hostval)
+               native_wrmsrl(MSR_IA32_SPEC_CTRL, hostval);
+
+       barrier_nospec();
+
+       return guestval;
+}
+
 static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long debugctlmsr, cr3, cr4;
+       u64 spec_ctrl;
 
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!cpu_has_virtual_nmis() &&
@@ -9967,6 +9988,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
                , "eax", "ebx", "edi", "esi"
 #endif
              );
+       /*
+        * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
+        * the first unbalanced RET after vmexit!
+        *
+        * For retpoline, RSB filling is needed to prevent poisoned RSB entries
+        * and (in some cases) RSB underflow.
+        *
+        * eIBRS has its own protection against poisoned RSB, so it doesn't
+        * need the RSB filling sequence.  But it does need to be enabled
+        * before the first unbalanced RET.
+        *
+        * So no RETs before vmx_spec_ctrl_restore_host() below.
+        */
+       vmexit_fill_RSB();
+
+       /* Save this for below */
+       spec_ctrl = vmx_spec_ctrl_restore_host(vmx);
 
        vmx_enable_fb_clear(vmx);
 
@@ -9986,12 +10024,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
         * save it.
         */
        if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
-               vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
-
-       x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
-
-       /* Eliminate branch target predictions from guest mode */
-       vmexit_fill_RSB();
+               vmx->spec_ctrl = spec_ctrl;
 
        /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
        if (debugctlmsr)