mm/hpa: add alloc_pages_highorder_except()
authorCho KyongHo <pullip.cho@samsung.com>
Thu, 3 May 2018 08:09:42 +0000 (17:09 +0900)
committerCosmin Tanislav <demonsingur@gmail.com>
Mon, 22 Apr 2024 17:22:20 +0000 (20:22 +0300)
alloc_pages_highorder() allocates given number of pages of given order.
It is a page allocator with a restriction of page order. Therefore, it
is used by the drivers of H/Ws with a requirement of higher memory
alignment than PAGE_SIZE.
Moreover some H/W have additional restriction of memory addressing.
They are not accessible to some range of memory.
alloc_pages_highorder_except() provides a way to avoid unaccessible
memory area to HPA not to allocate inappropriate pages to the drivers.

Change-Id: I6c5fe40c7dd5367ea15eb69c7bf68325070dafa4
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
include/linux/gfp.h
mm/hpa.c

index 97fb776e897676b983c98ab13f9942320caf2f38..6a79783bb831e059f5f04f2a25f9229a64c6308f 100644 (file)
@@ -611,12 +611,22 @@ extern void init_cma_reserved_pageblock(struct page *page);
 #endif
 
 #ifdef CONFIG_HPA
-int alloc_pages_highorder(int order, struct page **pages, int nents);
+int alloc_pages_highorder_except(int order, struct page **pages, int nents,
+                                phys_addr_t exception_areas[][2],
+                                int nr_exception);
 #else
-static inline int alloc_pages_highorder(int order, struct page **pages, int nents)
+static inline int alloc_pages_highorder_except(int order,
+                                              struct page **pages, int nents,
+                                              phys_addr_t exception_areas[][2],
+                                              int nr_exception)
 {
-       return 0;
+       return -ENOENT;
 }
 #endif
+static inline int alloc_pages_highorder(int order, struct page **pages,
+                                       int nents)
+{
+       return alloc_pages_highorder_except(order, pages, nents, NULL, 0);
+}
 
 #endif /* __LINUX_GFP_H */
index ca96b3e9d855faba1427179c6883db7d34de7f4b..026c2b21a4f651663f78d6cd15d57b86d846daa3 100644 (file)
--- a/mm/hpa.c
+++ b/mm/hpa.c
@@ -142,14 +142,31 @@ static bool is_movable_chunk(unsigned long pfn, unsigned int order)
        return true;
 }
 
+static int get_exception_of_page(phys_addr_t phys,
+                                phys_addr_t exception_areas[][2],
+                                int nr_exception)
+{
+       int i;
+
+       for (i = 0; i < nr_exception; i++)
+               if ((exception_areas[i][0] <= phys) &&
+                   (phys <= exception_areas[i][1]))
+                       return i;
+       return -1;
+
+}
+
 static int alloc_freepages_range(struct zone *zone, unsigned int order,
-                                struct page **pages, int required)
+                                struct page **pages, int required,
+                                phys_addr_t exception_areas[][2],
+                                int nr_exception)
 
 {
        unsigned int current_order;
        unsigned int mt;
        unsigned long wmark;
        unsigned long flags;
+       LIST_HEAD(isolated);
        struct free_area *area;
        struct page *page;
        int i;
@@ -177,6 +194,14 @@ static int alloc_freepages_range(struct zone *zone, unsigned int order,
                                page = list_entry(area->free_list[mt].next,
                                                        struct page, lru);
                                list_del(&page->lru);
+
+                               if (get_exception_of_page(page_to_phys(page),
+                                                         exception_areas,
+                                                         nr_exception) >= 0) {
+                                       list_add_tail(&page->lru, &isolated);
+                                       continue;
+                               }
+
                                __ClearPageBuddy(page);
                                set_page_private(page, 0);
                                set_pcppage_migratetype(page, mt);
@@ -196,6 +221,8 @@ static int alloc_freepages_range(struct zone *zone, unsigned int order,
                                        page += 1 << order;
                                }
                        }
+
+                       list_splice_init(&isolated, &area->free_list[mt]);
                }
        }
 
@@ -214,7 +241,29 @@ static void prep_highorder_pages(unsigned long base_pfn, int order)
                set_page_count(pfn_to_page(pfn), 0);
 }
 
-int alloc_pages_highorder(int order, struct page **pages, int nents)
+/**
+ * alloc_pages_highorder_except() - allocate large order pages
+ * @order:           required page order
+ * @pages:           array to store allocated @order order pages
+ * @nents:           number of @order order pages
+ * @exception_areas: memory areas that should not include pages in @pages
+ * @nr_exception:    number of memory areas in @exception_areas
+ *
+ * Returns 0 on allocation success. -error otherwise.
+ *
+ * Allocates @nents pages of @order << PAGE_SHIFT number of consecutive pages
+ * and store the page descriptors of the allocated pages to @pages. Every page
+ * in @pages should also be aligned by @order << PAGE_SHIFT.
+ *
+ * If @nr_exception is larger than 0, alloc_page_highorder_except() does not
+ * allocate pages in the areas described in @exception_areas. @exception_areas
+ * is an array of array with two elements: The first element is the start
+ * address of an area and the last element is the end address. The end address
+ * is the last byte address in the area, that is "[start address] + [size] - 1".
+ */
+int alloc_pages_highorder_except(int order, struct page **pages, int nents,
+                                phys_addr_t exception_areas[][2],
+                                int nr_exception)
 {
        struct zone *zone;
        unsigned int nr_pages = 1 << order;
@@ -231,7 +280,8 @@ retry:
                        continue;
 
                allocated = alloc_freepages_range(zone, order,
-                                       pages + nents - remained, remained);
+                                       pages + nents - remained, remained,
+                                       exception_areas, nr_exception);
                remained -= allocated;
 
                if (remained == 0)
@@ -266,6 +316,14 @@ retry:
                        continue;
                }
 
+               ret = get_exception_of_page(pfn << PAGE_SHIFT,
+                                           exception_areas, nr_exception);
+               if (ret >= 0) {
+                       pfn = (exception_areas[ret][1] + 1) >> PAGE_SHIFT;
+                       pfn -= nr_pages;
+                       continue;
+               }
+
                if (!is_movable_chunk(pfn, order))
                        continue;