kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
}
+static bool vmx_l1d_use_msr_save_list(void)
+{
+ if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
+ static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+ !static_cpu_has(X86_FEATURE_FLUSH_L1D))
+ return false;
+
+ return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+}
+
#define VMX_XSS_EXIT_BITMAP 0
/*
* Sets up the vmcs for emulated real mode.
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
}
+ /*
+ * If flushing the L1D cache on every VMENTER is enforced and the
+ * MSR is available, use the MSR save list.
+ */
+ if (vmx_l1d_use_msr_save_list())
+ add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
}
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
bool always;
/*
- * If the mitigation mode is 'flush always', keep the flush bit
- * set, otherwise clear it. It gets set again either from
- * vcpu_run() or from one of the unsafe VMEXIT handlers.
+ * This code is only executed when:
+ * - the flush mode is 'cond'
+ * - the flush mode is 'always' and the flush MSR is not
+ * available
+ *
+ * If the CPU has the flush MSR then clear the flush bit because
+ * 'always' mode is handled via the MSR save list.
+ *
+ * If the MSR is not avaibable then act depending on the mitigation
+ * mode: If 'flush always', keep the flush bit set, otherwise clear
+ * it.
+ *
+ * The flush bit gets set again either from vcpu_run() or from one
+ * of the unsafe VMEXIT handlers.
*/
- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
+ always = false;
+ else
+ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+
vcpu->arch.l1tf_flush_l1d = always;
vcpu->stat.l1d_flush++;
struct page *page;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
- !boot_cpu_has_bug(X86_BUG_L1TF))
+ !boot_cpu_has_bug(X86_BUG_L1TF) ||
+ vmx_l1d_use_msr_save_list())
return 0;
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {