x86/nmi: Fix NMI uaccess race against CR3 switching
authorAndy Lutomirski <luto@kernel.org>
Wed, 29 Aug 2018 15:47:18 +0000 (08:47 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 5 Sep 2018 07:26:39 +0000 (09:26 +0200)
commit 4012e77a903d114f915fc607d6d2ed54a3d6c9b1 upstream.

A NMI can hit in the middle of context switching or in the middle of
switch_mm_irqs_off().  In either case, CR3 might not match current->mm,
which could cause copy_from_user_nmi() and friends to read the wrong
memory.

Fix it by adding a new nmi_uaccess_okay() helper and checking it in
copy_from_user_nmi() and in __copy_from_user_nmi()'s callers.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Rik van Riel <riel@surriel.com>
Cc: Nadav Amit <nadav.amit@gmail.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jann Horn <jannh@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/dd956eba16646fd0b15c3c0741269dfd84452dac.1535557289.git.luto@kernel.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/events/core.c
arch/x86/include/asm/tlbflush.h
arch/x86/lib/usercopy.c
arch/x86/mm/tlb.c

index 717c9219d00ec3fbdc6ebea65277bfc6025dc8fa..e5097dc85a06cf73d4da6b93aa3f5d34b9bdad6a 100644 (file)
@@ -2462,7 +2462,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
 
        perf_callchain_store(entry, regs->ip);
 
-       if (!current->mm)
+       if (!nmi_uaccess_okay())
                return;
 
        if (perf_callchain_user32(regs, entry))
index 875ca99b82eef003781464af111214392fdd0f5c..5f00ecb9d2515e693218d0e4a561824fe0279d94 100644 (file)
@@ -175,8 +175,16 @@ struct tlb_state {
         * are on.  This means that it may not match current->active_mm,
         * which will contain the previous user mm when we're in lazy TLB
         * mode even if we've already switched back to swapper_pg_dir.
+        *
+        * During switch_mm_irqs_off(), loaded_mm will be set to
+        * LOADED_MM_SWITCHING during the brief interrupts-off window
+        * when CR3 and loaded_mm would otherwise be inconsistent.  This
+        * is for nmi_uaccess_okay()'s benefit.
         */
        struct mm_struct *loaded_mm;
+
+#define LOADED_MM_SWITCHING ((struct mm_struct *)1)
+
        u16 loaded_mm_asid;
        u16 next_asid;
        /* last user mm's ctx id */
@@ -246,6 +254,38 @@ struct tlb_state {
 };
 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or
+ * switching the loaded mm.  It can also be dangerous if we
+ * interrupted some kernel code that was temporarily using a
+ * different mm.
+ */
+static inline bool nmi_uaccess_okay(void)
+{
+       struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
+       struct mm_struct *current_mm = current->mm;
+
+       VM_WARN_ON_ONCE(!loaded_mm);
+
+       /*
+        * The condition we want to check is
+        * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
+        * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
+        * is supposed to be reasonably fast.
+        *
+        * Instead, we check the almost equivalent but somewhat conservative
+        * condition below, and we rely on the fact that switch_mm_irqs_off()
+        * sets loaded_mm to LOADED_MM_SWITCHING before writing to CR3.
+        */
+       if (loaded_mm != current_mm)
+               return false;
+
+       VM_WARN_ON_ONCE(current_mm->pgd != __va(read_cr3_pa()));
+
+       return true;
+}
+
 /* Initialize cr4 shadow for this CPU. */
 static inline void cr4_init_shadow(void)
 {
index c8c6ad0d58b89c3621d0fcf11f45e00442ebf2a2..3f435d7fca5e62bc999e48c6a4f3e1c0fc86def9 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/uaccess.h>
 #include <linux/export.h>
 
+#include <asm/tlbflush.h>
+
 /*
  * We rely on the nested NMI work to allow atomic faults from the NMI path; the
  * nested NMI paths are careful to preserve CR2.
@@ -19,6 +21,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        if (__range_not_ok(from, n, TASK_SIZE))
                return n;
 
+       if (!nmi_uaccess_okay())
+               return n;
+
        /*
         * Even though this function is typically called from NMI/IRQ context
         * disable pagefaults so that its behaviour is consistent even when
index 0c936435ea9398727da162fd76c5ee4dc4a91892..83a3f4c935fc5ff00d6d408d8e709b92a86722fb 100644 (file)
@@ -292,6 +292,10 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
 
                choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush);
 
+               /* Let nmi_uaccess_okay() know that we're changing CR3. */
+               this_cpu_write(cpu_tlbstate.loaded_mm, LOADED_MM_SWITCHING);
+               barrier();
+
                if (need_flush) {
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id);
                        this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen);
@@ -322,6 +326,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                if (next != &init_mm)
                        this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
 
+               /* Make sure we write CR3 before loaded_mm. */
+               barrier();
+
                this_cpu_write(cpu_tlbstate.loaded_mm, next);
                this_cpu_write(cpu_tlbstate.loaded_mm_asid, new_asid);
        }