mm, hugetlb: move the error handle logic out of normal code path
authorJianyu Zhan <nasa4836@gmail.com>
Wed, 4 Jun 2014 23:10:36 +0000 (16:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 4 Jun 2014 23:54:10 +0000 (16:54 -0700)
alloc_huge_page() now mixes normal code path with error handle logic.
This patches move out the error handle logic, to make normal code path
more clean and redue code duplicate.

Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
Acked-by: Davidlohr Bueso <davidlohr@hp.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c

index 98f0bc105dfe196b148b392d7cf5add3995ee0c2..244194217e39acac9aee793259c05759f59a8131 100644 (file)
@@ -1386,24 +1386,17 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
                        return ERR_PTR(-ENOSPC);
 
        ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
-       if (ret) {
-               if (chg || avoid_reserve)
-                       hugepage_subpool_put_pages(spool, 1);
-               return ERR_PTR(-ENOSPC);
-       }
+       if (ret)
+               goto out_subpool_put;
+
        spin_lock(&hugetlb_lock);
        page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
        if (!page) {
                spin_unlock(&hugetlb_lock);
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
-               if (!page) {
-                       hugetlb_cgroup_uncharge_cgroup(idx,
-                                                      pages_per_huge_page(h),
-                                                      h_cg);
-                       if (chg || avoid_reserve)
-                               hugepage_subpool_put_pages(spool, 1);
-                       return ERR_PTR(-ENOSPC);
-               }
+               if (!page)
+                       goto out_uncharge_cgroup;
+
                spin_lock(&hugetlb_lock);
                list_move(&page->lru, &h->hugepage_activelist);
                /* Fall through */
@@ -1415,6 +1408,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
 
        vma_commit_reservation(h, vma, addr);
        return page;
+
+out_uncharge_cgroup:
+       hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
+out_subpool_put:
+       if (chg || avoid_reserve)
+               hugepage_subpool_put_pages(spool, 1);
+       return ERR_PTR(-ENOSPC);
 }
 
 /*