x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
authorNicolai Stange <nstange@suse.de>
Wed, 18 Jul 2018 17:07:38 +0000 (19:07 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:12:57 +0000 (18:12 +0200)
commit 288d152c23dcf3c09da46c5c481903ca10ebfef7 upstream

The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
to evict the L1d cache.

However, these pages are never cleared and, in theory, their data could be
leaked.

More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.

Fix this by initializing the individual vmx_l1d_flush_pages with a
different pattern each.

Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
"flush_pages" to reflect this change.

Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm")
Signed-off-by: Nicolai Stange <nstange@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/vmx.c

index d077869f9228f81763d434ff641c14e76b4a758a..56c4108412987b6498517561508ab0eae1702841 100644 (file)
@@ -214,6 +214,7 @@ static void *vmx_l1d_flush_pages;
 static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
 {
        struct page *page;
+       unsigned int i;
 
        if (!enable_ept) {
                l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
@@ -246,6 +247,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
                if (!page)
                        return -ENOMEM;
                vmx_l1d_flush_pages = page_address(page);
+
+               /*
+                * Initialize each page with a different pattern in
+                * order to protect against KSM in the nested
+                * virtualization case.
+                */
+               for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
+                       memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
+                              PAGE_SIZE);
+               }
        }
 
        l1tf_vmx_mitigation = l1tf;
@@ -9176,7 +9187,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
                /* First ensure the pages are in the TLB */
                "xorl   %%eax, %%eax\n"
                ".Lpopulate_tlb:\n\t"
-               "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
+               "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
                "addl   $4096, %%eax\n\t"
                "cmpl   %%eax, %[size]\n\t"
                "jne    .Lpopulate_tlb\n\t"
@@ -9185,12 +9196,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
                /* Now fill the cache */
                "xorl   %%eax, %%eax\n"
                ".Lfill_cache:\n"
-               "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
+               "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
                "addl   $64, %%eax\n\t"
                "cmpl   %%eax, %[size]\n\t"
                "jne    .Lfill_cache\n\t"
                "lfence\n"
-               :: [empty_zp] "r" (vmx_l1d_flush_pages),
+               :: [flush_pages] "r" (vmx_l1d_flush_pages),
                    [size] "r" (size)
                : "eax", "ebx", "ecx", "edx");
 }