From fda33f1cd6c1707f1c08eb9e8cda8263298a1a23 Mon Sep 17 00:00:00 2001 From: Cho KyongHo Date: Thu, 3 May 2018 17:09:42 +0900 Subject: [PATCH] mm/hpa: add alloc_pages_highorder_except() alloc_pages_highorder() allocates given number of pages of given order. It is a page allocator with a restriction of page order. Therefore, it is used by the drivers of H/Ws with a requirement of higher memory alignment than PAGE_SIZE. Moreover some H/W have additional restriction of memory addressing. They are not accessible to some range of memory. alloc_pages_highorder_except() provides a way to avoid unaccessible memory area to HPA not to allocate inappropriate pages to the drivers. Change-Id: I6c5fe40c7dd5367ea15eb69c7bf68325070dafa4 Signed-off-by: Cho KyongHo --- include/linux/gfp.h | 16 +++++++++--- mm/hpa.c | 64 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 74 insertions(+), 6 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 97fb776e8976..6a79783bb831 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -611,12 +611,22 @@ extern void init_cma_reserved_pageblock(struct page *page); #endif #ifdef CONFIG_HPA -int alloc_pages_highorder(int order, struct page **pages, int nents); +int alloc_pages_highorder_except(int order, struct page **pages, int nents, + phys_addr_t exception_areas[][2], + int nr_exception); #else -static inline int alloc_pages_highorder(int order, struct page **pages, int nents) +static inline int alloc_pages_highorder_except(int order, + struct page **pages, int nents, + phys_addr_t exception_areas[][2], + int nr_exception) { - return 0; + return -ENOENT; } #endif +static inline int alloc_pages_highorder(int order, struct page **pages, + int nents) +{ + return alloc_pages_highorder_except(order, pages, nents, NULL, 0); +} #endif /* __LINUX_GFP_H */ diff --git a/mm/hpa.c b/mm/hpa.c index ca96b3e9d855..026c2b21a4f6 100644 --- a/mm/hpa.c +++ b/mm/hpa.c @@ -142,14 +142,31 @@ static bool is_movable_chunk(unsigned long pfn, unsigned int order) return true; } +static int get_exception_of_page(phys_addr_t phys, + phys_addr_t exception_areas[][2], + int nr_exception) +{ + int i; + + for (i = 0; i < nr_exception; i++) + if ((exception_areas[i][0] <= phys) && + (phys <= exception_areas[i][1])) + return i; + return -1; + +} + static int alloc_freepages_range(struct zone *zone, unsigned int order, - struct page **pages, int required) + struct page **pages, int required, + phys_addr_t exception_areas[][2], + int nr_exception) { unsigned int current_order; unsigned int mt; unsigned long wmark; unsigned long flags; + LIST_HEAD(isolated); struct free_area *area; struct page *page; int i; @@ -177,6 +194,14 @@ static int alloc_freepages_range(struct zone *zone, unsigned int order, page = list_entry(area->free_list[mt].next, struct page, lru); list_del(&page->lru); + + if (get_exception_of_page(page_to_phys(page), + exception_areas, + nr_exception) >= 0) { + list_add_tail(&page->lru, &isolated); + continue; + } + __ClearPageBuddy(page); set_page_private(page, 0); set_pcppage_migratetype(page, mt); @@ -196,6 +221,8 @@ static int alloc_freepages_range(struct zone *zone, unsigned int order, page += 1 << order; } } + + list_splice_init(&isolated, &area->free_list[mt]); } } @@ -214,7 +241,29 @@ static void prep_highorder_pages(unsigned long base_pfn, int order) set_page_count(pfn_to_page(pfn), 0); } -int alloc_pages_highorder(int order, struct page **pages, int nents) +/** + * alloc_pages_highorder_except() - allocate large order pages + * @order: required page order + * @pages: array to store allocated @order order pages + * @nents: number of @order order pages + * @exception_areas: memory areas that should not include pages in @pages + * @nr_exception: number of memory areas in @exception_areas + * + * Returns 0 on allocation success. -error otherwise. + * + * Allocates @nents pages of @order << PAGE_SHIFT number of consecutive pages + * and store the page descriptors of the allocated pages to @pages. Every page + * in @pages should also be aligned by @order << PAGE_SHIFT. + * + * If @nr_exception is larger than 0, alloc_page_highorder_except() does not + * allocate pages in the areas described in @exception_areas. @exception_areas + * is an array of array with two elements: The first element is the start + * address of an area and the last element is the end address. The end address + * is the last byte address in the area, that is "[start address] + [size] - 1". + */ +int alloc_pages_highorder_except(int order, struct page **pages, int nents, + phys_addr_t exception_areas[][2], + int nr_exception) { struct zone *zone; unsigned int nr_pages = 1 << order; @@ -231,7 +280,8 @@ retry: continue; allocated = alloc_freepages_range(zone, order, - pages + nents - remained, remained); + pages + nents - remained, remained, + exception_areas, nr_exception); remained -= allocated; if (remained == 0) @@ -266,6 +316,14 @@ retry: continue; } + ret = get_exception_of_page(pfn << PAGE_SHIFT, + exception_areas, nr_exception); + if (ret >= 0) { + pfn = (exception_areas[ret][1] + 1) >> PAGE_SHIFT; + pfn -= nr_pages; + continue; + } + if (!is_movable_chunk(pfn, order)) continue; -- 2.20.1