mm/page_owner: move page_owner specific function to page_owner.c
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Fri, 7 Oct 2016 23:58:21 +0000 (16:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Oct 2016 01:46:27 +0000 (18:46 -0700)
There is no reason that page_owner specific function resides on
vmstat.c.

Link: http://lkml.kernel.org/r/1471315879-32294-4-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/page_owner.h
mm/page_owner.c
mm/vmstat.c

index 30583ab0ffb1f8da50d6a3a5cd8e0277af78a304..2be728d156b5555a6331a89798bd4b0bfc2a9562 100644 (file)
@@ -14,6 +14,8 @@ extern void __split_page_owner(struct page *page, unsigned int order);
 extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(struct page *page);
+extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+                                       pg_data_t *pgdat, struct zone *zone);
 
 static inline void reset_page_owner(struct page *page, unsigned int order)
 {
index ec6dc1886f71fa2eb77c5629a9a0bc817613ac91..0f4246d109a08622b96b18f9e818064bdedd6e2b 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/jump_label.h>
 #include <linux/migrate.h>
 #include <linux/stackdepot.h>
+#include <linux/seq_file.h>
 
 #include "internal.h"
 
@@ -214,6 +215,82 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
 }
 
+void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+                                      pg_data_t *pgdat, struct zone *zone)
+{
+       struct page *page;
+       struct page_ext *page_ext;
+       unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
+       unsigned long end_pfn = pfn + zone->spanned_pages;
+       unsigned long count[MIGRATE_TYPES] = { 0, };
+       int pageblock_mt, page_mt;
+       int i;
+
+       /* Scan block by block. First and last block may be incomplete */
+       pfn = zone->zone_start_pfn;
+
+       /*
+        * Walk the zone in pageblock_nr_pages steps. If a page block spans
+        * a zone boundary, it will be double counted between zones. This does
+        * not matter as the mixed block count will still be correct
+        */
+       for (; pfn < end_pfn; ) {
+               if (!pfn_valid(pfn)) {
+                       pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
+                       continue;
+               }
+
+               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
+               block_end_pfn = min(block_end_pfn, end_pfn);
+
+               page = pfn_to_page(pfn);
+               pageblock_mt = get_pageblock_migratetype(page);
+
+               for (; pfn < block_end_pfn; pfn++) {
+                       if (!pfn_valid_within(pfn))
+                               continue;
+
+                       page = pfn_to_page(pfn);
+
+                       if (page_zone(page) != zone)
+                               continue;
+
+                       if (PageBuddy(page)) {
+                               pfn += (1UL << page_order(page)) - 1;
+                               continue;
+                       }
+
+                       if (PageReserved(page))
+                               continue;
+
+                       page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
+
+                       if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+                               continue;
+
+                       page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
+                       if (pageblock_mt != page_mt) {
+                               if (is_migrate_cma(pageblock_mt))
+                                       count[MIGRATE_MOVABLE]++;
+                               else
+                                       count[pageblock_mt]++;
+
+                               pfn = block_end_pfn;
+                               break;
+                       }
+                       pfn += (1UL << page_ext->order) - 1;
+               }
+       }
+
+       /* Print counts */
+       seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
+       for (i = 0; i < MIGRATE_TYPES; i++)
+               seq_printf(m, "%12lu ", count[i]);
+       seq_putc(m, '\n');
+}
+
 static ssize_t
 print_page_owner(char __user *buf, size_t count, unsigned long pfn,
                struct page *page, struct page_ext *page_ext,
index 89cec42d19ffa8da5ad1e3c8e64ff4df1b3e562b..dc04e76c79505fd67f1dc7da481781dbba95fed5 100644 (file)
@@ -1254,85 +1254,6 @@ static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
        return 0;
 }
 
-#ifdef CONFIG_PAGE_OWNER
-static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
-                                                       pg_data_t *pgdat,
-                                                       struct zone *zone)
-{
-       struct page *page;
-       struct page_ext *page_ext;
-       unsigned long pfn = zone->zone_start_pfn, block_end_pfn;
-       unsigned long end_pfn = pfn + zone->spanned_pages;
-       unsigned long count[MIGRATE_TYPES] = { 0, };
-       int pageblock_mt, page_mt;
-       int i;
-
-       /* Scan block by block. First and last block may be incomplete */
-       pfn = zone->zone_start_pfn;
-
-       /*
-        * Walk the zone in pageblock_nr_pages steps. If a page block spans
-        * a zone boundary, it will be double counted between zones. This does
-        * not matter as the mixed block count will still be correct
-        */
-       for (; pfn < end_pfn; ) {
-               if (!pfn_valid(pfn)) {
-                       pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
-                       continue;
-               }
-
-               block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               block_end_pfn = min(block_end_pfn, end_pfn);
-
-               page = pfn_to_page(pfn);
-               pageblock_mt = get_pageblock_migratetype(page);
-
-               for (; pfn < block_end_pfn; pfn++) {
-                       if (!pfn_valid_within(pfn))
-                               continue;
-
-                       page = pfn_to_page(pfn);
-
-                       if (page_zone(page) != zone)
-                               continue;
-
-                       if (PageBuddy(page)) {
-                               pfn += (1UL << page_order(page)) - 1;
-                               continue;
-                       }
-
-                       if (PageReserved(page))
-                               continue;
-
-                       page_ext = lookup_page_ext(page);
-                       if (unlikely(!page_ext))
-                               continue;
-
-                       if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
-                               continue;
-
-                       page_mt = gfpflags_to_migratetype(page_ext->gfp_mask);
-                       if (pageblock_mt != page_mt) {
-                               if (is_migrate_cma(pageblock_mt))
-                                       count[MIGRATE_MOVABLE]++;
-                               else
-                                       count[pageblock_mt]++;
-
-                               pfn = block_end_pfn;
-                               break;
-                       }
-                       pfn += (1UL << page_ext->order) - 1;
-               }
-       }
-
-       /* Print counts */
-       seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
-       for (i = 0; i < MIGRATE_TYPES; i++)
-               seq_printf(m, "%12lu ", count[i]);
-       seq_putc(m, '\n');
-}
-#endif /* CONFIG_PAGE_OWNER */
-
 /*
  * Print out the number of pageblocks for each migratetype that contain pages
  * of other types. This gives an indication of how well fallbacks are being