thp: do_huge_pmd_anonymous_page() cleanup
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 12 Sep 2013 22:14:03 +0000 (15:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Sep 2013 22:38:03 +0000 (15:38 -0700)
Minor cleanup: unindent most code of the fucntion by inverting one
condition.  It's preparation for the next patch.

No functional changes.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Hillf Danton <dhillf@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Hugh Dickins <hughd@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <willy@linux.intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index 60836870c6f7015e127f4eaf88805f099951a721..6551dd06dd643cd5856b2c65f1e6186e1b34fc7e 100644 (file)
@@ -785,55 +785,54 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long haddr = address & HPAGE_PMD_MASK;
        pte_t *pte;
 
-       if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
-               if (unlikely(anon_vma_prepare(vma)))
-                       return VM_FAULT_OOM;
-               if (unlikely(khugepaged_enter(vma)))
+       if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
+               goto out;
+       if (unlikely(anon_vma_prepare(vma)))
+               return VM_FAULT_OOM;
+       if (unlikely(khugepaged_enter(vma)))
+               return VM_FAULT_OOM;
+       if (!(flags & FAULT_FLAG_WRITE) &&
+                       transparent_hugepage_use_zero_page()) {
+               pgtable_t pgtable;
+               struct page *zero_page;
+               bool set;
+               pgtable = pte_alloc_one(mm, haddr);
+               if (unlikely(!pgtable))
                        return VM_FAULT_OOM;
-               if (!(flags & FAULT_FLAG_WRITE) &&
-                               transparent_hugepage_use_zero_page()) {
-                       pgtable_t pgtable;
-                       struct page *zero_page;
-                       bool set;
-                       pgtable = pte_alloc_one(mm, haddr);
-                       if (unlikely(!pgtable))
-                               return VM_FAULT_OOM;
-                       zero_page = get_huge_zero_page();
-                       if (unlikely(!zero_page)) {
-                               pte_free(mm, pgtable);
-                               count_vm_event(THP_FAULT_FALLBACK);
-                               goto out;
-                       }
-                       spin_lock(&mm->page_table_lock);
-                       set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
-                                       zero_page);
-                       spin_unlock(&mm->page_table_lock);
-                       if (!set) {
-                               pte_free(mm, pgtable);
-                               put_huge_zero_page();
-                       }
-                       return 0;
-               }
-               page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
-                                         vma, haddr, numa_node_id(), 0);
-               if (unlikely(!page)) {
+               zero_page = get_huge_zero_page();
+               if (unlikely(!zero_page)) {
+                       pte_free(mm, pgtable);
                        count_vm_event(THP_FAULT_FALLBACK);
                        goto out;
                }
-               count_vm_event(THP_FAULT_ALLOC);
-               if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
-                       put_page(page);
-                       goto out;
-               }
-               if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
-                                                         page))) {
-                       mem_cgroup_uncharge_page(page);
-                       put_page(page);
-                       goto out;
+               spin_lock(&mm->page_table_lock);
+               set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
+                               zero_page);
+               spin_unlock(&mm->page_table_lock);
+               if (!set) {
+                       pte_free(mm, pgtable);
+                       put_huge_zero_page();
                }
-
                return 0;
        }
+       page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
+                       vma, haddr, numa_node_id(), 0);
+       if (unlikely(!page)) {
+               count_vm_event(THP_FAULT_FALLBACK);
+               goto out;
+       }
+       count_vm_event(THP_FAULT_ALLOC);
+       if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+               put_page(page);
+               goto out;
+       }
+       if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
+               mem_cgroup_uncharge_page(page);
+               put_page(page);
+               goto out;
+       }
+
+       return 0;
 out:
        /*
         * Use __pte_alloc instead of pte_alloc_map, because we can't