Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / mm / compaction.c
index 694eaabaaebdc0827c93d81c97f200fc534a115e..c62bd063d766c7333ca0370444636a52ddc70802 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/sysctl.h>
 #include <linux/sysfs.h>
+#include <linux/balloon_compaction.h>
 #include "internal.h"
 
+#ifdef CONFIG_COMPACTION
+static inline void count_compact_event(enum vm_event_item item)
+{
+       count_vm_event(item);
+}
+
+static inline void count_compact_events(enum vm_event_item item, long delta)
+{
+       count_vm_events(item, delta);
+}
+#else
+#define count_compact_event(item) do { } while (0)
+#define count_compact_events(item, delta) do { } while (0)
+#endif
+
 #if defined CONFIG_COMPACTION || defined CONFIG_CMA
 
 #define CREATE_TRACE_POINTS
@@ -214,60 +230,6 @@ static bool suitable_migration_target(struct page *page)
        return false;
 }
 
-static void compact_capture_page(struct compact_control *cc)
-{
-       unsigned long flags;
-       int mtype, mtype_low, mtype_high;
-
-       if (!cc->page || *cc->page)
-               return;
-
-       /*
-        * For MIGRATE_MOVABLE allocations we capture a suitable page ASAP
-        * regardless of the migratetype of the freelist is is captured from.
-        * This is fine because the order for a high-order MIGRATE_MOVABLE
-        * allocation is typically at least a pageblock size and overall
-        * fragmentation is not impaired. Other allocation types must
-        * capture pages from their own migratelist because otherwise they
-        * could pollute other pageblocks like MIGRATE_MOVABLE with
-        * difficult to move pages and making fragmentation worse overall.
-        */
-       if (cc->migratetype == MIGRATE_MOVABLE) {
-               mtype_low = 0;
-               mtype_high = MIGRATE_PCPTYPES;
-       } else {
-               mtype_low = cc->migratetype;
-               mtype_high = cc->migratetype + 1;
-       }
-
-       /* Speculatively examine the free lists without zone lock */
-       for (mtype = mtype_low; mtype < mtype_high; mtype++) {
-               int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct page *page;
-                       struct free_area *area;
-                       area = &(cc->zone->free_area[order]);
-                       if (list_empty(&area->free_list[mtype]))
-                               continue;
-
-                       /* Take the lock and attempt capture of the page */
-                       if (!compact_trylock_irqsave(&cc->zone->lock, &flags, cc))
-                               return;
-                       if (!list_empty(&area->free_list[mtype])) {
-                               page = list_entry(area->free_list[mtype].next,
-                                                       struct page, lru);
-                               if (capture_free_page(page, cc->order, mtype)) {
-                                       spin_unlock_irqrestore(&cc->zone->lock,
-                                                                       flags);
-                                       *cc->page = page;
-                                       return;
-                               }
-                       }
-                       spin_unlock_irqrestore(&cc->zone->lock, flags);
-               }
-       }
-}
-
 /*
  * Isolate free pages onto a private freelist. Caller must hold zone->lock.
  * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
@@ -356,6 +318,9 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
 
+       count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
+       if (total_isolated)
+               count_compact_events(COMPACTISOLATED, total_isolated);
        return total_isolated;
 }
 
@@ -565,9 +530,24 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                        goto next_pageblock;
                }
 
-               /* Check may be lockless but that's ok as we recheck later */
-               if (!PageLRU(page))
+               /*
+                * Check may be lockless but that's ok as we recheck later.
+                * It's possible to migrate LRU pages and balloon pages
+                * Skip any other type of page
+                */
+               if (!PageLRU(page)) {
+                       if (unlikely(balloon_page_movable(page))) {
+                               if (locked && balloon_page_isolate(page)) {
+                                       /* Successfully isolated */
+                                       cc->finished_update_migrate = true;
+                                       list_add(&page->lru, migratelist);
+                                       cc->nr_migratepages++;
+                                       nr_isolated++;
+                                       goto check_compact_cluster;
+                               }
+                       }
                        continue;
+               }
 
                /*
                 * PageLRU is set. lru_lock normally excludes isolation
@@ -621,6 +601,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
                cc->nr_migratepages++;
                nr_isolated++;
 
+check_compact_cluster:
                /* Avoid isolating too much */
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
                        ++low_pfn;
@@ -646,6 +627,10 @@ next_pageblock:
 
        trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
 
+       count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
+       if (nr_isolated)
+               count_compact_events(COMPACTISOLATED, nr_isolated);
+
        return low_pfn;
 }
 
@@ -831,6 +816,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 static int compact_finished(struct zone *zone,
                            struct compact_control *cc)
 {
+       unsigned int order;
        unsigned long watermark;
 
        if (fatal_signal_pending(current))
@@ -865,22 +851,16 @@ static int compact_finished(struct zone *zone,
                return COMPACT_CONTINUE;
 
        /* Direct compactor: Is a suitable page free? */
-       if (cc->page) {
-               /* Was a suitable page captured? */
-               if (*cc->page)
+       for (order = cc->order; order < MAX_ORDER; order++) {
+               struct free_area *area = &zone->free_area[order];
+
+               /* Job done if page is free of the right migratetype */
+               if (!list_empty(&area->free_list[cc->migratetype]))
+                       return COMPACT_PARTIAL;
+
+               /* Job done if allocation would set block type */
+               if (cc->order >= pageblock_order && area->nr_free)
                        return COMPACT_PARTIAL;
-       } else {
-               unsigned int order;
-               for (order = cc->order; order < MAX_ORDER; order++) {
-                       struct free_area *area = &zone->free_area[cc->order];
-                       /* Job done if page is free of the right migratetype */
-                       if (!list_empty(&area->free_list[cc->migratetype]))
-                               return COMPACT_PARTIAL;
-
-                       /* Job done if allocation would set block type */
-                       if (cc->order >= pageblock_order && area->nr_free)
-                               return COMPACT_PARTIAL;
-               }
        }
 
        return COMPACT_CONTINUE;
@@ -986,7 +966,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                switch (isolate_migratepages(zone, cc)) {
                case ISOLATE_ABORT:
                        ret = COMPACT_PARTIAL;
-                       putback_lru_pages(&cc->migratepages);
+                       putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
                        goto out;
                case ISOLATE_NONE:
@@ -998,29 +978,23 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                nr_migrate = cc->nr_migratepages;
                err = migrate_pages(&cc->migratepages, compaction_alloc,
                                (unsigned long)cc, false,
-                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
+                               cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
+                               MR_COMPACTION);
                update_nr_listpages(cc);
                nr_remaining = cc->nr_migratepages;
 
-               count_vm_event(COMPACTBLOCKS);
-               count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
-               if (nr_remaining)
-                       count_vm_events(COMPACTPAGEFAILED, nr_remaining);
                trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
                                                nr_remaining);
 
-               /* Release LRU pages not migrated */
+               /* Release isolated pages not migrated */
                if (err) {
-                       putback_lru_pages(&cc->migratepages);
+                       putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
                        if (err == -ENOMEM) {
                                ret = COMPACT_PARTIAL;
                                goto out;
                        }
                }
-
-               /* Capture a page now if it is a suitable size */
-               compact_capture_page(cc);
        }
 
 out:
@@ -1033,8 +1007,7 @@ out:
 
 static unsigned long compact_zone_order(struct zone *zone,
                                 int order, gfp_t gfp_mask,
-                                bool sync, bool *contended,
-                                struct page **page)
+                                bool sync, bool *contended)
 {
        unsigned long ret;
        struct compact_control cc = {
@@ -1044,7 +1017,6 @@ static unsigned long compact_zone_order(struct zone *zone,
                .migratetype = allocflags_to_migratetype(gfp_mask),
                .zone = zone,
                .sync = sync,
-               .page = page,
        };
        INIT_LIST_HEAD(&cc.freepages);
        INIT_LIST_HEAD(&cc.migratepages);
@@ -1074,7 +1046,7 @@ int sysctl_extfrag_threshold = 500;
  */
 unsigned long try_to_compact_pages(struct zonelist *zonelist,
                        int order, gfp_t gfp_mask, nodemask_t *nodemask,
-                       bool sync, bool *contended, struct page **page)
+                       bool sync, bool *contended)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        int may_enter_fs = gfp_mask & __GFP_FS;
@@ -1088,7 +1060,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
        if (!order || !may_enter_fs || !may_perform_io)
                return rc;
 
-       count_vm_event(COMPACTSTALL);
+       count_compact_event(COMPACTSTALL);
 
 #ifdef CONFIG_CMA
        if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
@@ -1100,7 +1072,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
                int status;
 
                status = compact_zone_order(zone, order, gfp_mask, sync,
-                                               contended, page);
+                                               contended);
                rc = max(status, rc);
 
                /* If a normal allocation would succeed, stop compacting */
@@ -1156,7 +1128,6 @@ int compact_pgdat(pg_data_t *pgdat, int order)
        struct compact_control cc = {
                .order = order,
                .sync = false,
-               .page = NULL,
        };
 
        return __compact_pgdat(pgdat, &cc);
@@ -1167,14 +1138,13 @@ static int compact_node(int nid)
        struct compact_control cc = {
                .order = -1,
                .sync = true,
-               .page = NULL,
        };
 
        return __compact_pgdat(NODE_DATA(nid), &cc);
 }
 
 /* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
 {
        int nid;
 
@@ -1183,8 +1153,6 @@ static int compact_nodes(void)
 
        for_each_online_node(nid)
                compact_node(nid);
-
-       return COMPACT_COMPLETE;
 }
 
 /* The written value is actually unused, all memory is compacted */
@@ -1195,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
                        void __user *buffer, size_t *length, loff_t *ppos)
 {
        if (write)
-               return compact_nodes();
+               compact_nodes();
 
        return 0;
 }