mm/pagewalk.c: walk_page_range should avoid VM_PFNMAP areas
authorCliff Wickman <cpw@sgi.com>
Fri, 24 May 2013 22:55:36 +0000 (15:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 24 May 2013 23:22:53 +0000 (16:22 -0700)
A panic can be caused by simply cat'ing /proc/<pid>/smaps while an
application has a VM_PFNMAP range.  It happened in-house when a
benchmarker was trying to decipher the memory layout of his program.

/proc/<pid>/smaps and similar walks through a user page table should not
be looking at VM_PFNMAP areas.

Certain tests in walk_page_range() (specifically split_huge_page_pmd())
assume that all the mapped PFN's are backed with page structures.  And
this is not usually true for VM_PFNMAP areas.  This can result in panics
on kernel page faults when attempting to address those page structures.

There are a half dozen callers of walk_page_range() that walk through a
task's entire page table (as N.  Horiguchi pointed out).  So rather than
change all of them, this patch changes just walk_page_range() to ignore
VM_PFNMAP areas.

The logic of hugetlb_vma() is moved back into walk_page_range(), as we
want to test any vma in the range.

VM_PFNMAP areas are used by:
- graphics memory manager   gpu/drm/drm_gem.c
- global reference unit     sgi-gru/grufile.c
- sgi special memory        char/mspec.c
- and probably several out-of-tree modules

[akpm@linux-foundation.org: remove now-unused hugetlb_vma() stub]
Signed-off-by: Cliff Wickman <cpw@sgi.com>
Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: David Sterba <dsterba@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/pagewalk.c

index 35aa294656cd812d2773fede0d51179a8b7ee09a..5da2cbcfdbb56b0e9f4fe27d6e04137e59dfce3b 100644 (file)
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
        return 0;
 }
 
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       struct vm_area_struct *vma;
-
-       /* We don't need vma lookup at all. */
-       if (!walk->hugetlb_entry)
-               return NULL;
-
-       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
-       vma = find_vma(walk->mm, addr);
-       if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
-               return vma;
-
-       return NULL;
-}
-
 #else /* CONFIG_HUGETLB_PAGE */
-static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
-{
-       return NULL;
-}
-
 static int walk_hugetlb_range(struct vm_area_struct *vma,
                              unsigned long addr, unsigned long end,
                              struct mm_walk *walk)
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
        if (!walk->mm)
                return -EINVAL;
 
+       VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
+
        pgd = pgd_offset(walk->mm, addr);
        do {
-               struct vm_area_struct *vma;
+               struct vm_area_struct *vma = NULL;
 
                next = pgd_addr_end(addr, end);
 
                /*
-                * handle hugetlb vma individually because pagetable walk for
-                * the hugetlb page is dependent on the architecture and
-                * we can't handled it in the same manner as non-huge pages.
+                * This function was not intended to be vma based.
+                * But there are vma special cases to be handled:
+                * - hugetlb vma's
+                * - VM_PFNMAP vma's
                 */
-               vma = hugetlb_vma(addr, walk);
+               vma = find_vma(walk->mm, addr);
                if (vma) {
-                       if (vma->vm_end < next)
+                       /*
+                        * There are no page structures backing a VM_PFNMAP
+                        * range, so do not allow split_huge_page_pmd().
+                        */
+                       if ((vma->vm_start <= addr) &&
+                           (vma->vm_flags & VM_PFNMAP)) {
                                next = vma->vm_end;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                        /*
-                        * Hugepage is very tightly coupled with vma, so
-                        * walk through hugetlb entries within a given vma.
+                        * Handle hugetlb vma individually because pagetable
+                        * walk for the hugetlb page is dependent on the
+                        * architecture and we can't handled it in the same
+                        * manner as non-huge pages.
                         */
-                       err = walk_hugetlb_range(vma, addr, next, walk);
-                       if (err)
-                               break;
-                       pgd = pgd_offset(walk->mm, next);
-                       continue;
+                       if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
+                           is_vm_hugetlb_page(vma)) {
+                               if (vma->vm_end < next)
+                                       next = vma->vm_end;
+                               /*
+                                * Hugepage is very tightly coupled with vma,
+                                * so walk through hugetlb entries within a
+                                * given vma.
+                                */
+                               err = walk_hugetlb_range(vma, addr, next, walk);
+                               if (err)
+                                       break;
+                               pgd = pgd_offset(walk->mm, next);
+                               continue;
+                       }
                }
 
                if (pgd_none_or_clear_bad(pgd)) {