mm, thp: count thp_fault_fallback anytime thp fault fails
authorDavid Rientjes <rientjes@google.com>
Thu, 12 Sep 2013 22:14:06 +0000 (15:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Sep 2013 22:38:03 +0000 (15:38 -0700)
Currently, thp_fault_fallback in vmstat only gets incremented if a
hugepage allocation fails.  If current's memcg hits its limit or the page
fault handler returns an error, it is incorrectly accounted as a
successful thp_fault_alloc.

Count thp_fault_fallback anytime the page fault handler falls back to
using regular pages and only count thp_fault_alloc when a hugepage has
actually been faulted.

Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index 243f4cc757775533f369a1d8acaee7d12c607e53..f60c4ebaa30c6147dafb4ba650f388c62a47edc4 100644 (file)
@@ -820,17 +820,19 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
-       count_vm_event(THP_FAULT_ALLOC);
        if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
                put_page(page);
+               count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
        if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
                mem_cgroup_uncharge_page(page);
                put_page(page);
+               count_vm_event(THP_FAULT_FALLBACK);
                return VM_FAULT_FALLBACK;
        }
 
+       count_vm_event(THP_FAULT_ALLOC);
        return 0;
 }
 
@@ -1143,7 +1145,6 @@ alloc:
                new_page = NULL;
 
        if (unlikely(!new_page)) {
-               count_vm_event(THP_FAULT_FALLBACK);
                if (is_huge_zero_pmd(orig_pmd)) {
                        ret = do_huge_pmd_wp_zero_page_fallback(mm, vma,
                                        address, pmd, orig_pmd, haddr);
@@ -1154,9 +1155,9 @@ alloc:
                                split_huge_page(page);
                        put_page(page);
                }
+               count_vm_event(THP_FAULT_FALLBACK);
                goto out;
        }
-       count_vm_event(THP_FAULT_ALLOC);
 
        if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
                put_page(new_page);
@@ -1164,10 +1165,13 @@ alloc:
                        split_huge_page(page);
                        put_page(page);
                }
+               count_vm_event(THP_FAULT_FALLBACK);
                ret |= VM_FAULT_OOM;
                goto out;
        }
 
+       count_vm_event(THP_FAULT_ALLOC);
+
        if (is_huge_zero_pmd(orig_pmd))
                clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
        else