x86 mmiotrace: fix save/restore page table state
authorPekka Paalanen <pq@iki.fi>
Sun, 1 Mar 2009 14:11:58 +0000 (16:11 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 2 Mar 2009 09:20:36 +0000 (10:20 +0100)
From baa99e2b32449ec7bf147c234adfa444caecac8a Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sun, 22 Feb 2009 20:02:43 +0200

Blindly setting _PAGE_PRESENT in disarm_kmmio_fault_page() overlooks the
possibility, that the page was not present when it was armed.

Make arm_kmmio_fault_page() store the previous page presence in struct
kmmio_fault_page and use it on disarm.

This patch was originally written by Stuart Bennett, but Pekka Paalanen
rewrote it a little different.

Signed-off-by: Pekka Paalanen <pq@iki.fi>
Cc: Stuart Bennett <stuart@freedesktop.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/kmmio.c

index fb1f11546fcd6a5522389cb240e7e6271cc2c40c..be361eb828c83c597b1b1283d685aaec9b5c098e 100644 (file)
@@ -32,6 +32,8 @@ struct kmmio_fault_page {
        struct list_head list;
        struct kmmio_fault_page *release_next;
        unsigned long page; /* location of the fault page */
+       bool old_presence; /* page presence prior to arming */
+       bool armed;
 
        /*
         * Number of times this page has been registered as a part
@@ -105,8 +107,7 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
        return NULL;
 }
 
-static int set_page_present(unsigned long addr, bool present,
-                                                       unsigned int *pglevel)
+static int set_page_presence(unsigned long addr, bool present, bool *old)
 {
        pteval_t pteval;
        pmdval_t pmdval;
@@ -119,20 +120,21 @@ static int set_page_present(unsigned long addr, bool present,
                return -1;
        }
 
-       if (pglevel)
-               *pglevel = level;
-
        switch (level) {
        case PG_LEVEL_2M:
                pmd = (pmd_t *)pte;
-               pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
+               pmdval = pmd_val(*pmd);
+               *old = !!(pmdval & _PAGE_PRESENT);
+               pmdval &= ~_PAGE_PRESENT;
                if (present)
                        pmdval |= _PAGE_PRESENT;
                set_pmd(pmd, __pmd(pmdval));
                break;
 
        case PG_LEVEL_4K:
-               pteval = pte_val(*pte) & ~_PAGE_PRESENT;
+               pteval = pte_val(*pte);
+               *old = !!(pteval & _PAGE_PRESENT);
+               pteval &= ~_PAGE_PRESENT;
                if (present)
                        pteval |= _PAGE_PRESENT;
                set_pte_atomic(pte, __pte(pteval));
@@ -148,19 +150,39 @@ static int set_page_present(unsigned long addr, bool present,
        return 0;
 }
 
-/** Mark the given page as not present. Access to it will trigger a fault. */
-static int arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/*
+ * Mark the given page as not present. Access to it will trigger a fault.
+ *
+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
+ * protection is ignored here. RCU read lock is assumed held, so the struct
+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,
+ * that double arming the same virtual address (page) cannot occur.
+ *
+ * Double disarming on the other hand is allowed, and may occur when a fault
+ * and mmiotrace shutdown happen simultaneously.
+ */
+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-       int ret = set_page_present(page & PAGE_MASK, false, pglevel);
-       WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", page);
+       int ret;
+       WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+       if (f->armed) {
+               pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
+                                       f->page, f->count, f->old_presence);
+       }
+       ret = set_page_presence(f->page, false, &f->old_presence);
+       WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+       f->armed = true;
        return ret;
 }
 
-/** Mark the given page as present. */
-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/** Restore the given page to saved presence state. */
+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
-       int ret = set_page_present(page & PAGE_MASK, true, pglevel);
-       WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", page);
+       bool tmp;
+       int ret = set_page_presence(f->page, f->old_presence, &tmp);
+       WARN_ONCE(ret < 0,
+                       KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+       f->armed = false;
 }
 
 /*
@@ -207,7 +229,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 
        ctx = &get_cpu_var(kmmio_ctx);
        if (ctx->active) {
-               disarm_kmmio_fault_page(faultpage->page, NULL);
+               disarm_kmmio_fault_page(faultpage);
                if (addr == ctx->addr) {
                        /*
                         * On SMP we sometimes get recursive probe hits on the
@@ -249,7 +271,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
        regs->flags &= ~X86_EFLAGS_IF;
 
        /* Now we set present bit in PTE and single step. */
-       disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+       disarm_kmmio_fault_page(ctx->fpage);
 
        /*
         * If another cpu accesses the same page while we are stepping,
@@ -288,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
        if (ctx->probe && ctx->probe->post_handler)
                ctx->probe->post_handler(ctx->probe, condition, regs);
 
-       arm_kmmio_fault_page(ctx->fpage->page, NULL);
+       arm_kmmio_fault_page(ctx->fpage);
 
        regs->flags &= ~X86_EFLAGS_TF;
        regs->flags |= ctx->saved_flags;
@@ -320,19 +342,19 @@ static int add_kmmio_fault_page(unsigned long page)
        f = get_kmmio_fault_page(page);
        if (f) {
                if (!f->count)
-                       arm_kmmio_fault_page(f->page, NULL);
+                       arm_kmmio_fault_page(f);
                f->count++;
                return 0;
        }
 
-       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       f = kzalloc(sizeof(*f), GFP_ATOMIC);
        if (!f)
                return -1;
 
        f->count = 1;
        f->page = page;
 
-       if (arm_kmmio_fault_page(f->page, NULL)) {
+       if (arm_kmmio_fault_page(f)) {
                kfree(f);
                return -1;
        }
@@ -356,7 +378,7 @@ static void release_kmmio_fault_page(unsigned long page,
        f->count--;
        BUG_ON(f->count < 0);
        if (!f->count) {
-               disarm_kmmio_fault_page(f->page, NULL);
+               disarm_kmmio_fault_page(f);
                f->release_next = *release_list;
                *release_list = f;
        }