mm: hugetlb: defer freeing pages when gathering surplus pages
authorHillf Danton <dhillf@gmail.com>
Wed, 21 Mar 2012 23:34:00 +0000 (16:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 00:54:57 +0000 (17:54 -0700)
When gathering surplus pages, the number of needed pages is recomputed
after reacquiring hugetlb lock to catch changes in resv_huge_pages and
free_huge_pages.  Plus it is recomputed with the number of newly allocated
pages involved.

Thus freeing pages can be deferred a bit to see if the final page request
is satisfied, though pages could be allocated less than needed.

Signed-off-by: Hillf Danton <dhillf@gmail.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/hugetlb.c

index a876871f6be56d15b06d1da56a9f3dbbd61cda91..afe3e1ff919b150c33344a2acfa9f80d8d54b765 100644 (file)
@@ -852,6 +852,7 @@ static int gather_surplus_pages(struct hstate *h, int delta)
        struct page *page, *tmp;
        int ret, i;
        int needed, allocated;
+       bool alloc_ok = true;
 
        needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
        if (needed <= 0) {
@@ -867,17 +868,13 @@ retry:
        spin_unlock(&hugetlb_lock);
        for (i = 0; i < needed; i++) {
                page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
-               if (!page)
-                       /*
-                        * We were not able to allocate enough pages to
-                        * satisfy the entire reservation so we free what
-                        * we've allocated so far.
-                        */
-                       goto free;
-
+               if (!page) {
+                       alloc_ok = false;
+                       break;
+               }
                list_add(&page->lru, &surplus_list);
        }
-       allocated += needed;
+       allocated += i;
 
        /*
         * After retaking hugetlb_lock, we need to recalculate 'needed'
@@ -886,9 +883,16 @@ retry:
        spin_lock(&hugetlb_lock);
        needed = (h->resv_huge_pages + delta) -
                        (h->free_huge_pages + allocated);
-       if (needed > 0)
-               goto retry;
-
+       if (needed > 0) {
+               if (alloc_ok)
+                       goto retry;
+               /*
+                * We were not able to allocate enough pages to
+                * satisfy the entire reservation so we free what
+                * we've allocated so far.
+                */
+               goto free;
+       }
        /*
         * The surplus_list now contains _at_least_ the number of extra pages
         * needed to accommodate the reservation.  Add the appropriate number
@@ -914,10 +918,10 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+free:
        spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
-free:
        if (!list_empty(&surplus_list)) {
                list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                        list_del(&page->lru);