mm: change lmk/cma using policy [1/1]
authorTao Zeng <tao.zeng@amlogic.com>
Wed, 16 Oct 2019 08:44:11 +0000 (16:44 +0800)
committerJianxin Pan <jianxin.pan@amlogic.com>
Wed, 30 Oct 2019 05:50:23 +0000 (22:50 -0700)
PD#TV-10462

Problem:
Memory will allocation fail if play secure vedio source. Usually
seen by zram/wifi driver

Solution:
1, wake up kswapd earlier if water mark without free cma is not ok;
2, using zone-filecache to increace active of lmk. Which can be more
accurate than using global page status;
3, remove some restrict of using cma when allocate movable page by
zram or migrate from cma pool;
4, try allocate hard for atomic request in soft IRQ

Verify:
T950L

Change-Id: Ibf03f3c11a32175e9983ee8a61a14ae4b2436f1e
Signed-off-by: Tao Zeng <tao.zeng@amlogic.com>
drivers/amlogic/memory_ext/aml_cma.c
drivers/staging/android/lowmemorykiller.c
mm/page_alloc.c

index 809cf131680bebcf7579effaa8630ee945836911..c75b823eb03a7ac3d0c35251e5e42c897c7eaf11 100644 (file)
@@ -117,13 +117,6 @@ bool can_use_cma(gfp_t gfp_flags)
        if (cma_forbidden_mask(gfp_flags))
                return false;
 
-       /*
-        * do not use cma pages when cma allocate is working. this is the
-        * weakest condition
-        */
-       if (cma_alloc_ref())
-               return false;
-
        if (task_nice(current) > 0)
                return false;
 
@@ -229,7 +222,7 @@ static unsigned long get_align_pfn_high(unsigned long pfn)
 static struct page *get_migrate_page(struct page *page, unsigned long private,
                                  int **resultp)
 {
-       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_BDEV;
+       gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
        struct page *new = NULL;
 #ifdef CONFIG_AMLOGIC_PAGE_TRACE
        struct page_trace *old_trace, *new_trace;
index 694637b9852b1c62f4d3904b3cded660c6afce87..eea43bb7c5a9c88cbc1460741839e892d355a2b0 100644 (file)
@@ -293,16 +293,38 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
        short selected_oom_score_adj;
        int array_size = ARRAY_SIZE(lowmem_adj);
        int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
-       int other_file = global_node_page_state(NR_FILE_PAGES) -
-                               global_node_page_state(NR_SHMEM) -
-                               global_node_page_state(NR_UNEVICTABLE) -
-                               total_swapcache_pages();
 #ifdef CONFIG_AMLOGIC_CMA
+       int other_file;
+       struct zone *z = NULL;
+       pg_data_t *pgdat;
        int free_cma   = 0;
        int file_cma   = 0;
        int cma_forbid = 0;
-
-       if (cma_forbidden_mask(sc->gfp_mask) && !current_is_kswapd()) {
+       int zfile      = 0;
+       int globle_file = global_node_page_state(NR_FILE_PAGES) -
+                         global_node_page_state(NR_SHMEM) -
+                         global_node_page_state(NR_UNEVICTABLE) -
+                         total_swapcache_pages();
+
+       if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
+               /* using zone page state for more accurate */
+               pgdat = NODE_DATA(sc->nid);
+               z     = &pgdat->node_zones[ZONE_NORMAL];
+               if (managed_zone(z)) {
+                       zfile = zone_page_state(z, NR_ZONE_INACTIVE_FILE) +
+                               zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+                       other_file = zfile -
+                                    global_node_page_state(NR_SHMEM) -
+                                    zone_page_state(z, NR_ZONE_UNEVICTABLE) -
+                                    total_swapcache_pages();
+               } else {
+                       other_file = globle_file;
+               }
+       } else {
+               other_file = globle_file;
+       }
+       if (cma_forbidden_mask(sc->gfp_mask) &&
+           (!current_is_kswapd() || cma_alloc_ref())) {
                free_cma    = global_page_state(NR_FREE_CMA_PAGES);
                file_cma    = global_page_state(NR_INACTIVE_FILE_CMA) +
                              global_page_state(NR_ACTIVE_FILE_CMA);
@@ -310,6 +332,11 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                other_file -= file_cma;
                cma_forbid  = 1;
        }
+#else
+       int other_file = global_node_page_state(NR_FILE_PAGES) -
+                               global_node_page_state(NR_SHMEM) -
+                               global_node_page_state(NR_UNEVICTABLE) -
+                               total_swapcache_pages();
 #endif /* CONFIG_AMLOGIC_CMA */
 
        if (lowmem_adj_size < array_size)
@@ -408,6 +435,12 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
                } else {
                        lowmem_deathpending_timeout = jiffies + HZ;
                }
+               if (z)
+                       pr_info("  zone:%s, file:%d, shmem:%ld, unevc:%ld, file_cma:%d\n",
+                               z->name, zfile,
+                               global_node_page_state(NR_SHMEM),
+                               zone_page_state(z, NR_ZONE_UNEVICTABLE),
+                               file_cma);
        #else
                lowmem_deathpending_timeout = jiffies + HZ;
        #endif /* CONFIG_AMLOGIC_CMA */
index 9711cb8ba13cab94f5f31b49b651e1dfcb67a542..fd40b3a5c1f6cb3b4c01aff548651dbc912f40c7 100644 (file)
@@ -3018,9 +3018,7 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
 
 #ifdef CONFIG_CMA
        /* If allocation can't use CMA areas don't use free CMA pages */
-#ifndef CONFIG_AMLOGIC_CMA /* always sub cma pages to avoid wm all CMA */
        if (!(alloc_flags & ALLOC_CMA))
-#endif
                free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
 #endif
 
@@ -3197,6 +3195,12 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
                        if (alloc_flags & ALLOC_NO_WATERMARKS)
                                goto try_this_zone;
 
+               #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+                       /* alloc harder if under softirq */
+                       if (in_serving_softirq() && (gfp_mask & __GFP_ATOMIC))
+                               goto try_this_zone;
+               #endif
+
                        if (node_reclaim_mode == 0 ||
                            !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
                                continue;
@@ -4004,15 +4008,22 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
        unsigned long free_pages, free_cma = 0;
        struct zoneref *z = ac->preferred_zoneref;
        struct zone *zone;
+       unsigned long high_wm;
 
-       if (!(gfp_mask & __GFP_RECLAIM))        /* not allowed */
+       /*
+        * 1, if flag not allow reclaim
+        * 2, if with aotimic, we still need enable pre-wake up of
+        *    kswap to avoid large amount memory request fail in very
+        *    short time
+        */
+       if (!(gfp_mask & __GFP_RECLAIM) && !(gfp_mask & __GFP_ATOMIC))
                return;
 
        for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
                                                                ac->nodemask) {
                free_pages = zone_page_state(zone, NR_FREE_PAGES);
        #ifdef CONFIG_AMLOGIC_CMA
-               if (can_use_cma(gfp_mask))
+               if (!can_use_cma(gfp_mask))
                        free_cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
        #endif /* CONFIG_AMLOGIC_CMA */
                free_pages -= free_cma;
@@ -4021,7 +4032,10 @@ static inline void should_wakeup_kswap(gfp_t gfp_mask, int order,
                 * fast reclaim process and can avoid memory become too low
                 * some times
                 */
-               if (free_pages <= high_wmark_pages(zone))
+               high_wm = high_wmark_pages(zone);
+               if (gfp_mask & __GFP_HIGH) /* 1.5x if __GFP_HIGH */
+                       high_wm = ((high_wm * 3) / 2);
+               if (free_pages <= high_wm)
                        wakeup_kswapd(zone, order, ac->high_zoneidx);
        }
 }