if (cma_forbidden_mask(gfp_flags))
return false;
- /*
- * do not use cma pages when cma allocate is working. this is the
- * weakest condition
- */
- if (cma_alloc_ref())
- return false;
-
if (task_nice(current) > 0)
return false;
static struct page *get_migrate_page(struct page *page, unsigned long private,
int **resultp)
{
- gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_BDEV;
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
struct page *new = NULL;
#ifdef CONFIG_AMLOGIC_PAGE_TRACE
struct page_trace *old_trace, *new_trace;
short selected_oom_score_adj;
int array_size = ARRAY_SIZE(lowmem_adj);
int other_free = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
- int other_file = global_node_page_state(NR_FILE_PAGES) -
- global_node_page_state(NR_SHMEM) -
- global_node_page_state(NR_UNEVICTABLE) -
- total_swapcache_pages();
#ifdef CONFIG_AMLOGIC_CMA
+ int other_file;
+ struct zone *z = NULL;
+ pg_data_t *pgdat;
int free_cma = 0;
int file_cma = 0;
int cma_forbid = 0;
-
- if (cma_forbidden_mask(sc->gfp_mask) && !current_is_kswapd()) {
+ int zfile = 0;
+ int globle_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ global_node_page_state(NR_UNEVICTABLE) -
+ total_swapcache_pages();
+
+ if (gfp_zone(sc->gfp_mask) == ZONE_NORMAL) {
+ /* using zone page state for more accurate */
+ pgdat = NODE_DATA(sc->nid);
+ z = &pgdat->node_zones[ZONE_NORMAL];
+ if (managed_zone(z)) {
+ zfile = zone_page_state(z, NR_ZONE_INACTIVE_FILE) +
+ zone_page_state(z, NR_ZONE_ACTIVE_FILE);
+ other_file = zfile -
+ global_node_page_state(NR_SHMEM) -
+ zone_page_state(z, NR_ZONE_UNEVICTABLE) -
+ total_swapcache_pages();
+ } else {
+ other_file = globle_file;
+ }
+ } else {
+ other_file = globle_file;
+ }
+ if (cma_forbidden_mask(sc->gfp_mask) &&
+ (!current_is_kswapd() || cma_alloc_ref())) {
free_cma = global_page_state(NR_FREE_CMA_PAGES);
file_cma = global_page_state(NR_INACTIVE_FILE_CMA) +
global_page_state(NR_ACTIVE_FILE_CMA);
other_file -= file_cma;
cma_forbid = 1;
}
+#else
+ int other_file = global_node_page_state(NR_FILE_PAGES) -
+ global_node_page_state(NR_SHMEM) -
+ global_node_page_state(NR_UNEVICTABLE) -
+ total_swapcache_pages();
#endif /* CONFIG_AMLOGIC_CMA */
if (lowmem_adj_size < array_size)
} else {
lowmem_deathpending_timeout = jiffies + HZ;
}
+ if (z)
+ pr_info(" zone:%s, file:%d, shmem:%ld, unevc:%ld, file_cma:%d\n",
+ z->name, zfile,
+ global_node_page_state(NR_SHMEM),
+ zone_page_state(z, NR_ZONE_UNEVICTABLE),
+ file_cma);
#else
lowmem_deathpending_timeout = jiffies + HZ;
#endif /* CONFIG_AMLOGIC_CMA */
#ifdef CONFIG_CMA
/* If allocation can't use CMA areas don't use free CMA pages */
-#ifndef CONFIG_AMLOGIC_CMA /* always sub cma pages to avoid wm all CMA */
if (!(alloc_flags & ALLOC_CMA))
-#endif
free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
#endif
if (alloc_flags & ALLOC_NO_WATERMARKS)
goto try_this_zone;
+ #ifdef CONFIG_AMLOGIC_MEMORY_EXTEND
+ /* alloc harder if under softirq */
+ if (in_serving_softirq() && (gfp_mask & __GFP_ATOMIC))
+ goto try_this_zone;
+ #endif
+
if (node_reclaim_mode == 0 ||
!zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
continue;
unsigned long free_pages, free_cma = 0;
struct zoneref *z = ac->preferred_zoneref;
struct zone *zone;
+ unsigned long high_wm;
- if (!(gfp_mask & __GFP_RECLAIM)) /* not allowed */
+ /*
+ * 1, if flag not allow reclaim
+ * 2, if with aotimic, we still need enable pre-wake up of
+ * kswap to avoid large amount memory request fail in very
+ * short time
+ */
+ if (!(gfp_mask & __GFP_RECLAIM) && !(gfp_mask & __GFP_ATOMIC))
return;
for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
ac->nodemask) {
free_pages = zone_page_state(zone, NR_FREE_PAGES);
#ifdef CONFIG_AMLOGIC_CMA
- if (can_use_cma(gfp_mask))
+ if (!can_use_cma(gfp_mask))
free_cma = zone_page_state(zone, NR_FREE_CMA_PAGES);
#endif /* CONFIG_AMLOGIC_CMA */
free_pages -= free_cma;
* fast reclaim process and can avoid memory become too low
* some times
*/
- if (free_pages <= high_wmark_pages(zone))
+ high_wm = high_wmark_pages(zone);
+ if (gfp_mask & __GFP_HIGH) /* 1.5x if __GFP_HIGH */
+ high_wm = ((high_wm * 3) / 2);
+ if (free_pages <= high_wm)
wakeup_kswapd(zone, order, ac->high_zoneidx);
}
}