arch/powerpc/mm/subpage-prot.c: use walk->vma and walk_page_vma()
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Wed, 11 Feb 2015 23:28:00 +0000 (15:28 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Feb 2015 01:06:06 +0000 (17:06 -0800)
We don't have to use mm_walk->private to pass vma to the callback function
because of mm_walk->vma.  And walk_page_vma() is useful if we walk over a
single vma.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/mm/subpage-prot.c

index 6c0b1f5f8d2ccef746ccddfed70684c45f3adec2..fa9fb5b4c66cf8b29fe550b4795fb4a3ec3aeeb7 100644 (file)
@@ -134,7 +134,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, struct mm_walk *walk)
 {
-       struct vm_area_struct *vma = walk->private;
+       struct vm_area_struct *vma = walk->vma;
        split_huge_page_pmd(vma, addr, pmd);
        return 0;
 }
@@ -163,9 +163,7 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
                if (vma->vm_start >= (addr + len))
                        break;
                vma->vm_flags |= VM_NOHUGEPAGE;
-               subpage_proto_walk.private = vma;
-               walk_page_range(vma->vm_start, vma->vm_end,
-                               &subpage_proto_walk);
+               walk_page_vma(vma, &subpage_proto_walk);
                vma = vma->vm_next;
        }
 }