From: Cho KyongHo Date: Wed, 19 Apr 2017 14:57:19 +0000 (+0900) Subject: [COMMON] mm: hpa/cma: introduce alloc_contig_range_fast() X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=1977d9f20b463702a229725683a881728cb752b0;p=GitHub%2FLineageOS%2Fandroid_kernel_motorola_exynos9610.git [COMMON] mm: hpa/cma: introduce alloc_contig_range_fast() HPA allocates lots of order-4 pages from alloc_contig_range(). But it consumes the most of time to flush per-cpu LRU and per-cpu free pages that is not required for HPA. alloc_contig_range_fast() does not flush the per-cpu pages. Therefore it may fail to reclaim the given range of pages. The users that need guarantee of success of reclamation of the given range of pages, they can call alloc_contig_range() as they did. Its behavior is unchanged. Change-Id: Ie9d545132c7450713ff7724099d24a2ffd766cb1 Signed-off-by: Cho KyongHo --- diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 16cf067bcbe7..e333fdbc3d7a 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -577,6 +577,8 @@ static inline bool pm_suspended_storage(void) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); +extern int alloc_contig_range_fast(unsigned long start, unsigned long end, + unsigned migratetype); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); #endif diff --git a/mm/hpa.c b/mm/hpa.c index 7aa7a0bf8e4a..b7ba5972bee8 100644 --- a/mm/hpa.c +++ b/mm/hpa.c @@ -277,7 +277,7 @@ retry: if (!is_movable_chunk(pfn, order)) continue; - ret = alloc_contig_range(pfn, pfn + nr_pages, + ret = alloc_contig_range_fast(pfn, pfn + nr_pages, get_pageblock_migratetype(pfn_to_page(pfn))); if (ret == 0) prep_highorder_pages(pfn, order); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a200d3a3e589..a8b20ffa3b35 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7433,7 +7433,8 @@ static unsigned long pfn_max_align_up(unsigned long pfn) /* [start, end) must belong to a single zone. */ static int __alloc_contig_migrate_range(struct compact_control *cc, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, + bool drain) { /* This function is based on compact_zone() from compaction.c. */ unsigned long nr_reclaimed; @@ -7441,7 +7442,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, unsigned int tries = 0; int ret = 0; - migrate_prep(); + if (drain) + migrate_prep(); while (pfn < end || !list_empty(&cc->migratepages)) { if (fatal_signal_pending(current)) { @@ -7497,8 +7499,8 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, * pages which PFN is in [start, end) are allocated for the caller and * need to be freed with free_contig_range(). */ -int alloc_contig_range(unsigned long start, unsigned long end, - unsigned migratetype, gfp_t gfp_mask) +int __alloc_contig_range(unsigned long start, unsigned long end, + unsigned migratetype, gfp_t gfp_mask, bool drain) { unsigned long outer_start, outer_end; unsigned int order; @@ -7554,7 +7556,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, * allocated. So, if we fall through be sure to clear ret so that * -EBUSY is not accidentally used or returned to caller. */ - ret = __alloc_contig_migrate_range(&cc, start, end); + ret = __alloc_contig_migrate_range(&cc, start, end, drain); if (ret && ret != -EBUSY) goto done; ret =0; @@ -7576,38 +7578,41 @@ int alloc_contig_range(unsigned long start, unsigned long end, * isolated thus they won't get removed from buddy. */ - lru_add_drain_all(); - drain_all_pages(cc.zone); - order = 0; outer_start = start; - while (!PageBuddy(pfn_to_page(outer_start))) { - if (++order >= MAX_ORDER) { - outer_start = start; - break; + + if (drain) { + lru_add_drain_all(); + drain_all_pages(cc.zone); + + while (!PageBuddy(pfn_to_page(outer_start))) { + if (++order >= MAX_ORDER) { + outer_start = start; + break; + } + outer_start &= ~0UL << order; } - outer_start &= ~0UL << order; - } - if (outer_start != start) { - order = page_order(pfn_to_page(outer_start)); + if (outer_start != start) { + order = page_order(pfn_to_page(outer_start)); - /* - * outer_start page could be small order buddy page and - * it doesn't include start page. Adjust outer_start - * in this case to report failed page properly - * on tracepoint in test_pages_isolated() - */ - if (outer_start + (1UL << order) <= start) - outer_start = start; - } + /* + * outer_start page could be small order buddy page and + * it doesn't include start page. Adjust outer_start + * in this case to report failed page properly + * on tracepoint in test_pages_isolated() + */ + if (outer_start + (1UL << order) <= start) + outer_start = start; + } - /* Make sure the range is really isolated. */ - if (test_pages_isolated(outer_start, end, false)) { - pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", - __func__, outer_start, end); - ret = -EBUSY; - goto done; + /* Make sure the range is really isolated. */ + if (test_pages_isolated(outer_start, end, false)) { + pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n", + __func__, outer_start, end); + ret = -EBUSY; + goto done; + } } /* Grab isolated pages from freelists. */ @@ -7629,6 +7634,18 @@ done: return ret; } +int alloc_contig_range(unsigned long start, unsigned long end, + unsigned migratetype, gfp_t gfp_mask) +{ + return __alloc_contig_range(start, end, migratetype, gfp_mask, true); +} + +int alloc_contig_range_fast(unsigned long start, unsigned long end, + unsigned migratetype) +{ + return __alloc_contig_range(start, end, migratetype, GFP_KERNEL, false); +} + void free_contig_range(unsigned long pfn, unsigned nr_pages) { unsigned int count = 0;