x86/mm/kmmio: Make the tracer robust against L1TF
authorAndi Kleen <ak@linux.intel.com>
Tue, 7 Aug 2018 22:09:38 +0000 (15:09 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:13:01 +0000 (18:13 +0200)
commit 1063711b57393c1999248cccb57bebfaf16739e7 upstream

The mmio tracer sets io mapping PTEs and PMDs to non present when enabled
without inverting the address bits, which makes the PTE entry vulnerable
for L1TF.

Make it use the right low level macros to actually invert the address bits
to protect against L1TF.

In principle this could be avoided because MMIO tracing is not likely to be
enabled on production machines, but the fix is straigt forward and for
consistency sake it's better to get rid of the open coded PTE manipulation.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/mm/kmmio.c

index 7c868670963617865d2e9fb494a08b04a82b49cb..79eb55ce69a91f716c15524d56816604bcdcc100 100644 (file)
@@ -126,24 +126,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
 
 static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
 {
+       pmd_t new_pmd;
        pmdval_t v = pmd_val(*pmd);
        if (clear) {
-               *old = v & _PAGE_PRESENT;
-               v &= ~_PAGE_PRESENT;
-       } else  /* presume this has been called with clear==true previously */
-               v |= *old;
-       set_pmd(pmd, __pmd(v));
+               *old = v;
+               new_pmd = pmd_mknotpresent(*pmd);
+       } else {
+               /* Presume this has been called with clear==true previously */
+               new_pmd = __pmd(*old);
+       }
+       set_pmd(pmd, new_pmd);
 }
 
 static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
 {
        pteval_t v = pte_val(*pte);
        if (clear) {
-               *old = v & _PAGE_PRESENT;
-               v &= ~_PAGE_PRESENT;
-       } else  /* presume this has been called with clear==true previously */
-               v |= *old;
-       set_pte_atomic(pte, __pte(v));
+               *old = v;
+               /* Nothing should care about address */
+               pte_clear(&init_mm, 0, pte);
+       } else {
+               /* Presume this has been called with clear==true previously */
+               set_pte_atomic(pte, __pte(*old));
+       }
 }
 
 static int clear_page_presence(struct kmmio_fault_page *f, bool clear)