metag: hugetlb: convert to vm_unmapped_area()
authorJames Hogan <james.hogan@imgtec.com>
Mon, 11 Feb 2013 17:28:10 +0000 (17:28 +0000)
committerJames Hogan <james.hogan@imgtec.com>
Sat, 2 Mar 2013 20:11:13 +0000 (20:11 +0000)
Convert hugetlb_get_unmapped_area_new_pmd() to use vm_unmapped_area()
rather than searching the virtual address space itself. This fixes the
following errors in linux-next due to the specified members being
removed after other architectures have already been converted:

arch/metag/mm/hugetlbpage.c: In function 'hugetlb_get_unmapped_area_new_pmd':
arch/metag/mm/hugetlbpage.c:199: error: 'struct mm_struct' has no member named 'cached_hole_size'
arch/metag/mm/hugetlbpage.c:200: error: 'struct mm_struct' has no member named 'free_area_cache'
arch/metag/mm/hugetlbpage.c:215: error: 'struct mm_struct' has no member named 'cached_hole_size'

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Acked-by: Michel Lespinasse <walken@google.com>
arch/metag/mm/hugetlbpage.c

index 24ceed4f4eedbc2f60978815ab62f46426e245f2..3c52fa6d0f8e24030294fecacc26498f5de9ffe5 100644 (file)
@@ -192,43 +192,15 @@ new_search:
 static unsigned long
 hugetlb_get_unmapped_area_new_pmd(unsigned long len)
 {
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long start_addr, addr;
-
-       if (ALIGN_HUGEPT(len) > mm->cached_hole_size)
-               start_addr = mm->free_area_cache;
-       else
-               start_addr = TASK_UNMAPPED_BASE;
-
-new_search:
-       addr = ALIGN_HUGEPT(start_addr);
-
-       for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
-               if (TASK_SIZE - len < addr) {
-                       /*
-                        * Start a new search - just in case we missed
-                        * some holes.
-                        */
-                       if (start_addr != TASK_UNMAPPED_BASE) {
-                               start_addr = TASK_UNMAPPED_BASE;
-                               mm->cached_hole_size = 0;
-                               goto new_search;
-                       }
-                       return 0;
-               }
-               /* skip ahead if we've aligned right over some vmas */
-               if (vma && vma->vm_end <= addr)
-                       continue;
-               if (!vma || ALIGN_HUGEPT(addr + len) <= vma->vm_start) {
-#if HPAGE_SHIFT < HUGEPT_SHIFT
-                       if (len & HUGEPT_MASK)
-                               mm->context.part_huge = addr + len;
-#endif
-                       return addr;
-               }
-               addr = ALIGN_HUGEPT(vma->vm_end);
-       }
+       struct vm_unmapped_area_info info;
+
+       info.flags = 0;
+       info.length = len;
+       info.low_limit = TASK_UNMAPPED_BASE;
+       info.high_limit = TASK_SIZE;
+       info.align_mask = PAGE_MASK & HUGEPT_MASK;
+       info.align_offset = 0;
+       return vm_unmapped_area(&info);
 }
 
 unsigned long
@@ -266,11 +238,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
         * Find an unmapped naturally aligned set of 4MB blocks that we can use
         * for huge pages.
         */
-       addr = hugetlb_get_unmapped_area_new_pmd(len);
-       if (likely(addr))
-               return addr;
-
-       return -EINVAL;
+       return hugetlb_get_unmapped_area_new_pmd(len);
 }
 
 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/