mm/page_alloc: move freepage counting logic to __free_one_page()
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Thu, 13 Nov 2014 23:19:18 +0000 (15:19 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Nov 2014 00:17:05 +0000 (16:17 -0800)
All the caller of __free_one_page() has similar freepage counting logic,
so we can move it to __free_one_page().  This reduce line of code and
help future maintenance.

This is also preparation step for "mm/page_alloc: restrict max order of
merging on isolated pageblock" which fix the freepage counting problem
on freepage with more than pageblock order.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: Tang Chen <tangchen@cn.fujitsu.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Heesub Shin <heesub.shin@samsung.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Ritesh Harjani <ritesh.list@gmail.com>
Cc: Gioh Kim <gioh.kim@lge.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index 58923bea0d8b5197d7b007da3edd4287c102b970..9f689f16b5aaac84cbce41cedad01521bc448ad3 100644 (file)
@@ -577,6 +577,8 @@ static inline void __free_one_page(struct page *page,
                        return;
 
        VM_BUG_ON(migratetype == -1);
+       if (!is_migrate_isolate(migratetype))
+               __mod_zone_freepage_state(zone, 1 << order, migratetype);
 
        page_idx = pfn & ((1 << MAX_ORDER) - 1);
 
@@ -715,14 +717,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
                        mt = get_freepage_migratetype(page);
-                       if (unlikely(has_isolate_pageblock(zone))) {
+                       if (unlikely(has_isolate_pageblock(zone)))
                                mt = get_pageblock_migratetype(page);
-                               if (is_migrate_isolate(mt))
-                                       goto skip_counting;
-                       }
-                       __mod_zone_freepage_state(zone, 1, mt);
 
-skip_counting:
                        /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
                        __free_one_page(page, page_to_pfn(page), zone, 0, mt);
                        trace_mm_page_pcpu_drain(page, 0, mt);
@@ -745,12 +742,7 @@ static void free_one_page(struct zone *zone,
        if (unlikely(has_isolate_pageblock(zone) ||
                is_migrate_isolate(migratetype))) {
                migratetype = get_pfnblock_migratetype(page, pfn);
-               if (is_migrate_isolate(migratetype))
-                       goto skip_counting;
        }
-       __mod_zone_freepage_state(zone, 1 << order, migratetype);
-
-skip_counting:
        __free_one_page(page, pfn, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }