Merge tag 'v3.10.87' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / mm / vmscan.c
index 233f0011f7680d99a2726db7a77fe19fcdd655b4..bf4921af2170ccce73fd8404959af657598f04eb 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/sysctl.h>
 #include <linux/oom.h>
 #include <linux/prefetch.h>
+#include <linux/debugfs.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -155,6 +156,39 @@ static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
        return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
 }
 
+struct dentry *debug_file;
+
+static int debug_shrinker_show(struct seq_file *s, void *unused)
+{
+       struct shrinker *shrinker;
+       struct shrink_control sc;
+
+       sc.gfp_mask = -1;
+       sc.nr_to_scan = 0;
+
+       down_read(&shrinker_rwsem);
+       list_for_each_entry(shrinker, &shrinker_list, list) {
+               int num_objs;
+
+               num_objs = shrinker->shrink(shrinker, &sc);
+               seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs);
+       }
+       up_read(&shrinker_rwsem);
+       return 0;
+}
+
+static int debug_shrinker_open(struct inode *inode, struct file *file)
+{
+        return single_open(file, debug_shrinker_show, inode->i_private);
+}
+
+static const struct file_operations debug_shrinker_fops = {
+        .open = debug_shrinker_open,
+        .read = seq_read,
+        .llseek = seq_lseek,
+        .release = single_release,
+};
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -167,6 +201,15 @@ void register_shrinker(struct shrinker *shrinker)
 }
 EXPORT_SYMBOL(register_shrinker);
 
+static int __init add_shrinker_debug(void)
+{
+       debugfs_create_file("shrinker", 0644, NULL, NULL,
+                           &debug_shrinker_fops);
+       return 0;
+}
+
+late_initcall(add_shrinker_debug);
+
 /*
  * Remove one
  */
@@ -1186,7 +1229,7 @@ static int too_many_isolated(struct zone *zone, int file,
 {
        unsigned long inactive, isolated;
 
-       if (current_is_kswapd())
+       if (current_is_kswapd() || sc->hibernation_mode)
                return 0;
 
        if (!global_reclaim(sc))
@@ -1636,6 +1679,42 @@ enum scan_balance {
        SCAN_FILE,
 };
 
+
+#ifdef CONFIG_ZRAM
+static int vmscan_swap_file_ratio = 1;
+module_param_named(swap_file_ratio, vmscan_swap_file_ratio, int, S_IRUGO | S_IWUSR);
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+
+// vmscan debug
+static int vmscan_swap_sum = 200;
+module_param_named(swap_sum, vmscan_swap_sum, int, S_IRUGO | S_IWUSR);
+
+
+static int vmscan_scan_file_sum = 0;
+static int vmscan_scan_anon_sum = 0;
+static int vmscan_recent_scanned_anon = 0;
+static int vmscan_recent_scanned_file = 0;
+static int vmscan_recent_rotated_anon = 0;
+static int vmscan_recent_rotated_file = 0;
+module_param_named(scan_file_sum, vmscan_scan_file_sum, int, S_IRUGO);
+module_param_named(scan_anon_sum, vmscan_scan_anon_sum, int, S_IRUGO);
+module_param_named(recent_scanned_anon, vmscan_recent_scanned_anon, int, S_IRUGO);
+module_param_named(recent_scanned_file, vmscan_recent_scanned_file, int, S_IRUGO);
+module_param_named(recent_rotated_anon, vmscan_recent_rotated_anon, int, S_IRUGO);
+module_param_named(recent_rotated_file, vmscan_recent_rotated_file, int, S_IRUGO);
+#endif // CONFIG_ZRAM
+
+
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+//#define LOGTAG "VMSCAN"
+static unsigned long t=0;
+static unsigned long history[2] = {0};
+extern int lowmem_minfree[9];
+#endif
+
+#endif // CONFIG_ZRAM
+
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1658,6 +1737,11 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        bool force_scan = false;
        unsigned long ap, fp;
        enum lru_list lru;
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+       int cpu;
+       unsigned long SwapinCount, SwapoutCount, cached;
+       bool bThrashing = false;
+#endif
 
        /*
         * If the zone or memcg is small, nr[l] can be 0.  This
@@ -1739,6 +1823,70 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
        anon_prio = vmscan_swappiness(sc);
        file_prio = 200 - anon_prio;
 
+       /*
+        * With swappiness at 100, anonymous and file have the same priority.
+        * This scanning priority is essentially the inverse of IO cost.
+        */
+#if defined(CONFIG_ZRAM) && defined(CONFIG_MTK_LCA_RAM_OPTIMIZE)
+    if (vmscan_swap_file_ratio) {
+
+               if(t == 0)
+                       t = jiffies;
+
+               if (time_after(jiffies, t + 1 * HZ)) {
+               
+                       for_each_online_cpu(cpu) {
+                               struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
+                               SwapinCount     += this->event[PSWPIN];
+                               SwapoutCount    += this->event[PSWPOUT];
+                       }
+
+                       if( ((SwapinCount-history[0] + SwapoutCount - history[1]) / (jiffies-t) * HZ) > 3000){
+                               bThrashing = true;
+                               //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "!!! thrashing !!!\n");
+                       }else{
+                               bThrashing = false;
+                               //xlog_printk(ANDROID_LOG_WARN, LOGTAG, "!!! NO thrashing !!!\n");
+                       }
+                       history[0] = SwapinCount;
+                       history[1] = SwapoutCount;
+
+
+                       t=jiffies;
+               }
+
+
+               if(!bThrashing){
+                       anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+                       file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+                       //xlog_printk(ANDROID_LOG_DEBUG, LOGTAG, "1 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
+
+               } else {
+                       cached = global_page_state(NR_FILE_PAGES) - global_page_state(NR_SHMEM) - total_swapcache_pages();
+                       if(cached > lowmem_minfree[2]) {
+                               anon_prio = vmscan_swappiness(sc);
+                               file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+                               //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "2 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
+                       } else {
+                               anon_prio = (vmscan_swappiness(sc) * anon) / (anon + file + 1);
+                               file_prio = (vmscan_swap_sum - vmscan_swappiness(sc)) * file / (anon + file + 1);
+                               //xlog_printk(ANDROID_LOG_ERROR, LOGTAG, "3 anon_prio: %d, file_prio: %d \n",  anon_prio, file_prio);
+                       }
+               }
+
+       } else {
+           anon_prio = vmscan_swappiness(sc);
+           file_prio = vmscan_swap_sum - vmscan_swappiness(sc);
+    }
+#elif defined(CONFIG_ZRAM) // CONFIG_ZRAM
+       if (vmscan_swap_file_ratio) {
+           anon_prio = anon_prio * anon / (anon + file + 1);
+           file_prio = file_prio * file / (anon + file + 1);
+       }
+#endif // CONFIG_ZRAM
+
+
+
        /*
         * OK, so we have swap space and a fair amount of page cache
         * pages.  We use the recently rotated / recently scanned
@@ -2076,7 +2224,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
                        if (zone->all_unreclaimable &&
                                        sc->priority != DEF_PRIORITY)
                                continue;       /* Let kswapd poll it */
-                       if (IS_ENABLED(CONFIG_COMPACTION)) {
+                       if (IS_ENABLED(CONFIG_COMPACTION) && !sc->hibernation_mode) {
                                /*
                                 * If we already have plenty of memory free for
                                 * compaction in this zone, don't free any more.
@@ -2178,6 +2326,11 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        unsigned long writeback_threshold;
        bool aborted_reclaim;
 
+#ifdef CONFIG_FREEZER
+       if (unlikely(pm_freezing && !sc->hibernation_mode))
+               return 0;
+#endif
+
        delayacct_freepages_start();
 
        if (global_reclaim(sc))
@@ -3104,6 +3257,11 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
        if (!populated_zone(zone))
                return;
 
+#ifdef CONFIG_FREEZER
+       if (pm_freezing)
+               return;
+#endif
+
        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        pgdat = zone->zone_pgdat;
@@ -3129,11 +3287,11 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
  * LRU order by reclaiming preferentially
  * inactive > active > active referenced > active mapped
  */
-unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+unsigned long shrink_memory_mask(unsigned long nr_to_reclaim, gfp_t mask)
 {
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
-               .gfp_mask = GFP_HIGHUSER_MOVABLE,
+               .gfp_mask = mask,
                .may_swap = 1,
                .may_unmap = 1,
                .may_writepage = 1,
@@ -3162,6 +3320,19 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 
        return nr_reclaimed;
 }
+EXPORT_SYMBOL_GPL(shrink_memory_mask);
+
+#ifdef CONFIG_MTKPASR
+extern void shrink_mtkpasr_all(void);
+#else
+#define shrink_mtkpasr_all()   do {} while (0)
+#endif
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
+{
+       shrink_mtkpasr_all();
+       return shrink_memory_mask(nr_to_reclaim, GFP_HIGHUSER_MOVABLE);
+}
+EXPORT_SYMBOL_GPL(shrink_all_memory);
 #endif /* CONFIG_HIBERNATION */
 
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
@@ -3582,3 +3753,283 @@ void scan_unevictable_unregister_node(struct node *node)
        device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages);
 }
 #endif
+
+#ifdef CONFIG_MTKPASR
+void try_to_shrink_slab(void)
+{
+       struct shrinker *shrinker;
+       struct shrink_control shrink = {
+               .gfp_mask = GFP_KERNEL|__GFP_HIGHMEM,
+       };
+
+       if (!down_read_trylock(&shrinker_rwsem)) {
+               return;
+       }
+       
+       list_for_each_entry(shrinker, &shrinker_list, list) {
+               int num_objs;
+               int shrink_ret = 0;
+               int retry = 2;
+               
+               num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+               if (num_objs <= 0)
+                       continue;
+
+               do {
+                       /* To shrink */
+                       shrink_ret = do_shrinker_shrink(shrinker, &shrink, num_objs);
+                       if (shrink_ret == -1)
+                               break;
+                       /* Check empty */       
+                       num_objs = do_shrinker_shrink(shrinker, &shrink, 0);
+                       if (num_objs <= 0)
+                               break;
+               } while (--retry);
+       }
+       
+       up_read(&shrinker_rwsem);
+}
+
+extern void free_hot_cold_page(struct page *page, int cold);
+/* Isolate pages for PASR */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+int mtkpasr_isolate_page(struct page *page, int check_swap)
+#else
+int mtkpasr_isolate_page(struct page *page)
+#endif
+{
+       struct zone *zone = page_zone(page);
+       struct lruvec *lruvec;
+       unsigned long flags;
+       isolate_mode_t mode = ISOLATE_ASYNC_MIGRATE;
+
+       /* Lock this zone - USE trylock version! */
+       if (!spin_trylock_irqsave(&zone->lru_lock, flags)) {
+               printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+               printk(KERN_ALERT"[%s][%d] Failed to lock this zone!\n",__FUNCTION__,__LINE__);
+               printk(KERN_ALERT"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n");
+               return -EAGAIN;
+       }
+
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+       /* Check whether we should handle SwapBacked, SwapCache pages */
+       if (check_swap) {
+               if (PageSwapBacked(page) || PageSwapCache(page)) {
+                       spin_unlock_irqrestore(&zone->lru_lock, flags);
+                       return -EACCES;
+               }
+       }
+#endif
+
+       /* Try to isolate this page */
+       if (__isolate_lru_page(page, mode) != 0) {
+               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               return -EACCES;
+       }
+       
+       /* Successfully isolated */
+       lruvec = mem_cgroup_page_lruvec(page, zone);
+       del_page_from_lru_list(page, lruvec, page_lru(page));
+       
+       /* Unlock this zone */
+       spin_unlock_irqrestore(&zone->lru_lock, flags);
+
+       return 0;
+}
+
+/* Drop page (in File/Anon LRUs) (Imitate the behavior of shrink_page_list) */
+/* If returns error, caller needs to putback page by itself. */
+int mtkpasr_drop_page(struct page *page)
+{
+       int ret;
+       unsigned long vm_flags = 0x0;
+       bool active = false;
+       struct address_space *mapping;
+       enum ttu_flags unmap_flags = TTU_UNMAP;
+
+       /* Suitable scan control */
+       struct scan_control sc = {
+               .gfp_mask = GFP_KERNEL,
+               .order = PAGE_ALLOC_COSTLY_ORDER + 1, 
+               //.reclaim_mode = RECLAIM_MODE_SINGLE|RECLAIM_MODE_SYNC,        // We only handle "SwapBacked" pages in this reclaim_mode!
+       };
+
+       /* Try to isolate this page */
+#ifdef CONFIG_MTKPASR_ALLEXTCOMP
+       ret = mtkpasr_isolate_page(page, 0x1);
+#else
+       ret = mtkpasr_isolate_page(page);
+#endif
+       if (ret) {
+               return ret;
+       }
+       
+       /* Check whether it is evictable! */
+       if (unlikely(!page_evictable(page))) {
+               putback_lru_page(page);
+               return -EACCES;
+       }
+
+       /* If it is Active, reference and deactivate it */
+       if (PageActive(page)) {
+               active = TestClearPageActive(page);
+       }
+
+       /* If we fail to lock this page, ignore it */   
+       if (!trylock_page(page)) {
+               goto putback;
+       }
+       
+       /* If page is in writeback, we don't handle it here! */
+       if (PageWriteback(page)) {
+               goto unlock;
+       }
+       
+       /*
+        * Anonymous process memory has backing store?
+        * Try to allocate it some swap space here.
+        */
+       if (PageAnon(page) && !PageSwapCache(page)) {
+               /* Check whether we have enough free memory */
+               if (vm_swap_full()) {
+                       goto unlock;
+               }
+
+               /* Ok! It is safe to add this page to swap. */
+               if (!add_to_swap(page, NULL)){
+                       goto unlock;
+               }
+       }
+       
+       /* We don't handle dirty file cache here (Related devices may be suspended) */
+       if (page_is_file_cache(page)) {
+               /* How do we handle pages in VM_EXEC vmas? */
+               if ((vm_flags & VM_EXEC)) {
+                       goto unlock;
+               }
+               /* We don't handle dirty file pages! */
+               if (PageDirty(page)) {
+#ifdef CONFIG_MTKPASR_DEBUG 
+                       printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+#endif
+                       goto unlock;
+               }
+       }
+               
+       /*
+        * The page is mapped into the page tables of one or more
+        * processes. Try to unmap it here.
+        */
+       mapping = page_mapping(page);
+       if (page_mapped(page) && mapping) {
+#if 0
+               /* Indicate unmap action for SwapBacked pages */
+               if (PageSwapBacked(page)) {
+                       unmap_flags |= TTU_IGNORE_ACCESS; 
+               }
+#endif
+               /* To unmap */
+               switch (try_to_unmap(page, unmap_flags)) {
+               case SWAP_SUCCESS:
+                       /* try to free the page below */
+                       break;
+               case SWAP_FAIL:
+                       goto restore_swap;
+               case SWAP_AGAIN:
+                       goto restore_swap;
+               case SWAP_MLOCK:
+                       goto restore_swap;
+
+               }
+       }
+       
+       /* Check whether it is dirtied. 
+        * We have filtered out dirty file pages above. (IMPORTANT!)
+        * "VM_BUG_ON(!PageSwapBacked(page))"
+        * */
+       if (PageDirty(page)) {
+               /* Page is dirty, try to write it out here */
+               /* It's ok for zram swap! */
+               /* Should we need to apply GFP_IOFS? */
+               switch (pageout(page, mapping, &sc)) {
+               case PAGE_SUCCESS:
+                       if (PageWriteback(page)) {
+                               goto putback;
+                       }
+                       if (PageDirty(page)) {
+                               goto putback;
+                       }
+
+                       /*
+                        * A synchronous write - probably a ramdisk.  Go
+                        * ahead and try to reclaim the page.
+                        */
+                       if (!trylock_page(page)) {
+                               goto putback;
+                       }
+                       if (PageDirty(page) || PageWriteback(page)) {
+                               goto unlock;
+                       }
+                       mapping = page_mapping(page);
+               case PAGE_CLEAN:
+                       /* try to free the page below */
+                       break;
+               default:
+#ifdef CONFIG_MTKPASR_DEBUG 
+                       /*printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d]\n\n\n\n\n\n",__FUNCTION__,__LINE__);*/
+#endif
+                       goto restore_unmap;
+               }
+       }
+
+       /* Release buffer */
+       if (page_has_private(page)) {
+               if (!try_to_release_page(page, sc.gfp_mask)) {
+                       goto unlock;
+               }
+               if (!mapping && page_count(page) == 1) {
+                       unlock_page(page);
+                       if (put_page_testzero(page)) {
+                               goto freeit;
+                       } else {
+                               /* Race! TOCHECK */
+                               printk(KERN_ALERT "\n\n\n\n\n\n [%s][%d] RACE!!\n\n\n\n\n\n",__FUNCTION__,__LINE__);
+                               goto notask;
+                       }
+               }
+       }
+       if (!mapping || !__remove_mapping(mapping, page)) {
+               goto unlock;
+       }
+               
+       __clear_page_locked(page);
+
+freeit:
+       free_hot_cold_page(page, 0);
+       return 0;       
+
+restore_unmap:
+       /* Do something */
+
+restore_swap:
+       if (PageSwapCache(page))
+               try_to_free_swap(page);
+
+unlock:
+       unlock_page(page);
+
+putback:       
+       /* Activate it again if needed! */
+       if (active)
+               SetPageActive(page);
+       
+       /* We don't putback them to corresponding LRUs, because we want to do more tasks outside this function!
+       putback_lru_page(page); */
+
+       /* Failedly dropped pages. Do migration! */
+       return -EBUSY;
+
+notask:
+       return 0;
+}
+#endif