sh: vmalloc pgtable sync fix.
authorStuart Menefy <stuart.menefy@st.com>
Fri, 5 Sep 2008 07:17:15 +0000 (16:17 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 8 Sep 2008 01:35:04 +0000 (10:35 +0900)
This fixes a problem in the code which copies the vmalloc portion of the
kernel's page table into the current user space page table. The addition
of the four level page table code breaks on folded page tables, because
the pud level is always present (although folded). This updates the code
to use the same style of updates for the pud as is used for the pgd
level.

Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/mm/fault_32.c

index 0c776fdfbddae6800480dc0a3dc7be54103509b3..e8efda9846bb3d5204d5df517247a2b783cf0933 100644 (file)
@@ -61,7 +61,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
                pgd = get_TTB() + offset;
                pgd_k = swapper_pg_dir + offset;
 
-               /* This will never happen with the folded page table. */
                if (!pgd_present(*pgd)) {
                        if (!pgd_present(*pgd_k))
                                goto bad_area_nosemaphore;
@@ -71,9 +70,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 
                pud = pud_offset(pgd, address);
                pud_k = pud_offset(pgd_k, address);
-               if (pud_present(*pud) || !pud_present(*pud_k))
-                       goto bad_area_nosemaphore;
-               set_pud(pud, *pud_k);
+
+               if (!pud_present(*pud)) {
+                       if (!pud_present(*pud_k))
+                               goto bad_area_nosemaphore;
+                       set_pud(pud, *pud_k);
+                       return;
+               }
 
                pmd = pmd_offset(pud, address);
                pmd_k = pmd_offset(pud_k, address);