Merge tag 'v3.10.68' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / compaction.c
index 05ccb4cc0bdb984dc2613461703587ec199804cd..4d4e992b717a11edbeb7a26299333c4dd8d91776 100644 (file)
 #include <linux/page-isolation.h>
 #include "internal.h"
 
+#ifdef CONFIG_HAS_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
+
 #ifdef CONFIG_COMPACTION
 static inline void count_compact_event(enum vm_event_item item)
 {
@@ -134,6 +138,10 @@ static void update_pageblock_skip(struct compact_control *cc,
                        bool migrate_scanner)
 {
        struct zone *zone = cc->zone;
+
+       if (cc->ignore_skip_hint)
+               return;
+
        if (!page)
                return;
 
@@ -222,6 +230,9 @@ static bool suitable_migration_target(struct page *page)
        if (is_migrate_isolate(migratetype))
                return false;
 
+       if (is_migrate_mtkpasr(migratetype))
+               return false;
+
        /* If the page is a large free page, then allow migration */
        if (PageBuddy(page) && page_order(page) >= pageblock_order)
                return true;
@@ -248,7 +259,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 {
        int nr_scanned = 0, total_isolated = 0;
        struct page *cursor, *valid_page = NULL;
-       unsigned long nr_strict_required = end_pfn - blockpfn;
        unsigned long flags;
        bool locked = false;
 
@@ -261,11 +271,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                nr_scanned++;
                if (!pfn_valid_within(blockpfn))
-                       continue;
+                       goto isolate_fail;
+
                if (!valid_page)
                        valid_page = page;
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /*
                 * The zone lock must be held to isolate freepages.
@@ -286,12 +297,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Recheck this is a buddy page under lock */
                if (!PageBuddy(page))
-                       continue;
+                       goto isolate_fail;
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
-               if (!isolated && strict)
-                       break;
                total_isolated += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
@@ -302,7 +311,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
                if (isolated) {
                        blockpfn += isolated - 1;
                        cursor += isolated - 1;
+                       continue;
                }
+
+isolate_fail:
+               if (strict)
+                       break;
+               else
+                       continue;
+
        }
 
        trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -312,7 +329,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
         * pages requested were isolated. If there were any failures, 0 is
         * returned and CMA will fail.
         */
-       if (strict && nr_strict_required > total_isolated)
+       if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
        if (locked)
@@ -647,17 +664,21 @@ static void isolate_freepages(struct zone *zone,
                                struct compact_control *cc)
 {
        struct page *page;
-       unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
+       unsigned long high_pfn, low_pfn, pfn, z_end_pfn;
        int nr_freepages = cc->nr_freepages;
        struct list_head *freelist = &cc->freepages;
 
        /*
         * Initialise the free scanner. The starting point is where we last
-        * scanned from (or the end of the zone if starting). The low point
-        * is the end of the pageblock the migration scanner is using.
+        * successfully isolated from, zone-cached value, or the end of the
+        * zone when isolating for the first time. We need this aligned to
+        * the pageblock boundary, because we do pfn -= pageblock_nr_pages
+        * in the for loop.
+        * The low boundary is the end of the pageblock the migration scanner
+        * is using.
         */
-       pfn = cc->free_pfn;
-       low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+       pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
+       low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
 
        /*
         * Take care that if the migration scanner is at the end of the zone
@@ -673,9 +694,10 @@ static void isolate_freepages(struct zone *zone,
         * pages on cc->migratepages. We stop searching if the migrate
         * and free page scanners meet or enough free pages are isolated.
         */
-       for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+       for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
                                        pfn -= pageblock_nr_pages) {
                unsigned long isolated;
+               unsigned long end_pfn;
 
                if (!pfn_valid(pfn))
                        continue;
@@ -703,13 +725,10 @@ static void isolate_freepages(struct zone *zone,
                isolated = 0;
 
                /*
-                * As pfn may not start aligned, pfn+pageblock_nr_page
-                * may cross a MAX_ORDER_NR_PAGES boundary and miss
-                * a pfn_valid check. Ensure isolate_freepages_block()
-                * only scans within a pageblock
+                * Take care when isolating in last pageblock of a zone which
+                * ends in the middle of a pageblock.
                 */
-               end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
-               end_pfn = min(end_pfn, z_end_pfn);
+               end_pfn = min(pfn + pageblock_nr_pages, z_end_pfn);
                isolated = isolate_freepages_block(cc, pfn, end_pfn,
                                                   freelist, false);
                nr_freepages += isolated;
@@ -728,7 +747,14 @@ static void isolate_freepages(struct zone *zone,
        /* split_free_page does not map the pages */
        map_pages(freelist);
 
-       cc->free_pfn = high_pfn;
+       /*
+        * If we crossed the migrate scanner, we want to keep it that way
+        * so that compact_finished() may detect this
+        */
+       if (pfn < low_pfn)
+               cc->free_pfn = max(pfn, zone->zone_start_pfn);
+       else
+               cc->free_pfn = high_pfn;
        cc->nr_freepages = nr_freepages;
 }
 
@@ -936,6 +962,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                ;
        }
 
+       /*
+        * Clear pageblock skip if there were failures recently and compaction
+        * is about to be retried after being deferred. kswapd does not do
+        * this reset as it'll reset the cached information when going to sleep.
+        */
+       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+               __reset_isolation_suitable(zone);
+
        /*
         * Setup to move all movable pages to the end of the zone. Used cached
         * information on where the scanners should start but check that it
@@ -952,14 +986,6 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                zone->compact_cached_migrate_pfn = cc->migrate_pfn;
        }
 
-       /*
-        * Clear pageblock skip if there were failures recently and compaction
-        * is about to be retried after being deferred. kswapd does not do
-        * this reset as it'll reset the cached information when going to sleep.
-        */
-       if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
-               __reset_isolation_suitable(zone);
-
        migrate_prep_local();
 
        while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
@@ -993,7 +1019,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
                if (err) {
                        putback_movable_pages(&cc->migratepages);
                        cc->nr_migratepages = 0;
-                       if (err == -ENOMEM) {
+                       /*
+                        * migrate_pages() may return -ENOMEM when scanners meet
+                        * and we want compact_finished() to detect it
+                        */
+                       if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
                                ret = COMPACT_PARTIAL;
                                goto out;
                        }
@@ -1207,3 +1237,58 @@ void compaction_unregister_node(struct node *node)
 #endif /* CONFIG_SYSFS && CONFIG_NUMA */
 
 #endif /* CONFIG_COMPACTION */
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+extern void drop_pagecache(void);
+//extern void kick_lmk_from_compaction(gfp_t);
+static void kick_compaction_early_suspend(struct early_suspend *h)
+{
+       struct zone *z = &NODE_DATA(0)->node_zones[ZONE_NORMAL];
+       int status;
+       int retry = 3;
+       int safe_order = THREAD_SIZE_ORDER + 1; 
+       bool contended;
+       gfp_t gfp_mask = GFP_KERNEL;
+
+       /* Check whether gfp is restricted. */
+       if (gfp_mask != (gfp_mask & gfp_allowed_mask)) {
+               printk("XXXXXX GFP is restricted! XXXXXX\n");
+               return;
+       }
+
+       /* We try retry times at most. */
+       while (retry > 0) {
+               /* If it is safe under low watermark, then break. */
+               if (zone_watermark_ok(z, safe_order, low_wmark_pages(z), 0, 0))
+                       break;
+               status = compact_zone_order(z, safe_order, gfp_mask, true, &contended);
+               --retry;
+       }
+}
+
+static void kick_compaction_late_resume(struct early_suspend *h)
+{
+       /* Do nothing */
+}
+
+static struct early_suspend kick_compaction_early_suspend_desc = {
+       .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+       .suspend = kick_compaction_early_suspend,
+       .resume = kick_compaction_late_resume,
+};
+
+static int __init compaction_init(void)
+{
+       printk("@@@@@@ [%s] Register early suspend callback @@@@@@\n",__FUNCTION__);
+       register_early_suspend(&kick_compaction_early_suspend_desc);
+       return 0;
+}
+static void __exit compaction_exit(void)
+{
+       printk("@@@@@@ [%s] Unregister early suspend callback @@@@@@\n",__FUNCTION__);
+       unregister_early_suspend(&kick_compaction_early_suspend_desc);
+}
+
+module_init(compaction_init);
+module_exit(compaction_exit);
+#endif