android: ion: add HPA heap
authorCho KyongHo <pullip.cho@samsung.com>
Wed, 21 Mar 2018 12:37:11 +0000 (21:37 +0900)
committerSangwook Ju <sw.ju@samsung.com>
Mon, 14 May 2018 10:45:25 +0000 (19:45 +0900)
Introducing HPA heap backed by High-order Pages Allocator which
guarantees allocation of higher order pages than PAGE_SIZE.

Change-Id: I1ff9c5727509e9e8cdec00469c704d489614bdf2
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
drivers/staging/android/ion/Kconfig
drivers/staging/android/ion/Makefile
drivers/staging/android/ion/ion_exynos.h
drivers/staging/android/ion/ion_fdt_exynos.c
drivers/staging/android/ion/ion_hpa_heap.c [new file with mode: 0644]
drivers/staging/android/uapi/ion.h

index 4aed58a9a7dec66f2abcac4a6df4729a40634f1a..cb620d3a8641b15660fb32f5e02a9310a268cf58 100644 (file)
@@ -56,3 +56,11 @@ config ION_CMA_HEAP
          Choose this option to enable CMA heaps with Ion. This heap is backed
          by the Contiguous Memory Allocator (CMA). If your system has these
          regions, you should say Y here.
+
+config ION_HPA_HEAP
+       bool "Ion HPA heap support"
+       depends on ION && HPA
+       help
+         Choose this option to enable HPA heaps with Ion. This heap is backed
+         by the High-order Pages Allocator (HPA). If your system needs HPA for
+         some reason, you should say Y here.
index 8eeb259771e677075d4f5ad9500c02e9771ed710..1a7c0c815d384eb45bcfa0f675e7d55d6b901572 100644 (file)
@@ -4,5 +4,6 @@ obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
 obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
 obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
 obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
+obj-$(CONFIG_ION_HPA_HEAP) += ion_hpa_heap.o
 obj-$(CONFIG_ION_TEST) += ion_test.o
 obj-$(CONFIG_ION_EXYNOS) += ion_fdt_exynos.o ion_buffer_protect.o ion_exynos.o
index 1d9c9410cdd5d3b6c004867491b866a924aa132e..fc9f5ef16b60f83f0a8a5003f2327241c3e5ba7f 100644 (file)
@@ -59,6 +59,12 @@ extern struct ion_heap *ion_cma_heap_create(struct cma *cma,
 #define ion_cma_heap_create(cma, p) ERR_PTR(-ENODEV)
 #endif
 
+#if defined(CONFIG_ION_HPA_HEAP)
+extern struct ion_heap *ion_hpa_heap_create(struct ion_platform_heap *pheap);
+#else
+#define ion_hpa_heap_create(p) ERR_PTR(-ENODEV)
+#endif
+
 #if defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) && defined(CONFIG_ION_EXYNOS)
 int __init ion_secure_iova_pool_create(void)
 #else
index 1a64e9bb3493148b0d3083d635d388fd788a382d..15b4e85fd084028d86605526990ced83e2f839d9 100644 (file)
@@ -107,10 +107,80 @@ static int __init exynos_ion_reserved_mem_setup(struct reserved_mem *rmem)
 
 RESERVEDMEM_OF_DECLARE(ion, "exynos9820-ion", exynos_ion_reserved_mem_setup);
 
+static bool __init register_hpa_heap(struct device_node *np,
+                                    unsigned int prot_id_map)
+{
+       struct ion_platform_heap pheap;
+       struct ion_heap *heap;
+       u32 align;
+
+       if (of_property_read_string(np, "ion,heapname", &pheap.name)) {
+               pr_err("%s: failed to read ion,heapname in '%s'\n",
+                      __func__, np->name);
+               return false;
+       }
+
+       pheap.secure = of_property_read_bool(np, "ion,secure");
+
+       if (pheap.secure) {
+               if (of_property_read_u32(np, "ion,protection_id", &pheap.id)) {
+                       pr_err("%s: failed to read ion,protection_id in '%s'\n",
+                              __func__, np->name);
+                       return false;
+               }
+
+               if (pheap.id > 32) {
+                       pr_err("%s: too large protection id %d of '%s'\n",
+                              __func__, pheap.id, pheap.name);
+                       return false;
+               }
+
+               if ((1 << pheap.id) & prot_id_map) {
+                       pr_err("%s: protection_id %d in '%s' already exists\n",
+                              __func__, pheap.id, np->name);
+                       return false;
+               }
+       }
+
+       if (!of_property_read_u32(np, "ion,alignment", &align))
+               pheap.align = align;
+       else
+               pheap.align = SZ_64K;
+
+       pheap.type = ION_HEAP_TYPE_HPA;
+       heap = ion_hpa_heap_create(&pheap);
+
+       if (IS_ERR(heap)) {
+               pr_err("%s: failed to register '%s' heap\n",
+                      __func__, pheap.name);
+               return false;
+       }
+
+       ion_device_add_heap(heap);
+       pr_info("ION: registered '%s' heap\n", pheap.name);
+
+       return pheap.secure;
+
+}
+
+static bool __init exynos_ion_register_hpa_heaps(unsigned int prot_id_map)
+{
+       struct device_node *np, *child;
+       bool secure = false;
+
+       for_each_node_by_name(np, "ion-hpa-heap")
+               for_each_child_of_node(np, child)
+                       if (of_device_is_compatible(child, "exynos9820-ion"))
+                               secure |= register_hpa_heap(child, prot_id_map);
+
+       return secure;
+}
+
 static int __init exynos_ion_register_heaps(void)
 {
        unsigned int i;
        bool secure = false;
+       unsigned int prot_id_map = 0;
 
        for (i = 0; i < reserved_mem_count; i++) {
                struct ion_platform_heap pheap;
@@ -124,6 +194,18 @@ static int __init exynos_ion_register_heaps(void)
                pheap.secure      = ion_reserved_mem[i].secure;
                pheap.untouchable = ion_reserved_mem[i].untouchable;
 
+               if (pheap.id > 32) {
+                       pr_err("%s: too large protection id %d of '%s'\n",
+                              __func__, pheap.id, pheap.name);
+                       continue;
+               }
+
+               if (pheap.secure && ((1 << pheap.id) & prot_id_map)) {
+                       pr_err("%s: protection id %d of '%s' already exists\n",
+                              __func__, pheap.id, pheap.name);
+                       continue;
+               }
+
                if (ion_reserved_mem[i].cma) {
                        pheap.type = ION_HEAP_TYPE_DMA;
                        heap = ion_cma_heap_create(ion_reserved_mem[i].cma,
@@ -139,13 +221,17 @@ static int __init exynos_ion_register_heaps(void)
                        continue;
                }
 
+               if (pheap.secure)
+                       prot_id_map |= 1 << pheap.id;
+
                ion_device_add_heap(heap);
                pr_info("ION: registered '%s' heap\n", pheap.name);
 
-               if (pheap.secure)
-                       secure = true;
+               secure |= pheap.secure;
        }
 
+       secure |= exynos_ion_register_hpa_heaps(prot_id_map);
+
        /*
         * ion_secure_iova_pool_create() should success. If it fails, it is
         * because of design flaw or out of memory. Nothing to do with the
diff --git a/drivers/staging/android/ion/ion_hpa_heap.c b/drivers/staging/android/ion/ion_hpa_heap.c
new file mode 100644 (file)
index 0000000..650a1ec
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * drivers/staging/android/ion/ion_hpa_heap.c
+ *
+ * Copyright (C) 2016, 2018 Samsung Electronics Co., Ltd.
+ * Author: <pullip.cho@samsung.com> for Exynos SoCs
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include "ion.h"
+
+#define ION_HPA_CHUNK_SIZE(heap)  (PAGE_SIZE << (heap)->order)
+#define ION_HPA_PAGE_COUNT(len, heap) \
+       (ALIGN(len, ION_HPA_CHUNK_SIZE(heap)) / ION_HPA_CHUNK_SIZE(heap))
+#define HPA_MAX_CHUNK_COUNT ((PAGE_SIZE * 2) / sizeof(struct page *))
+
+struct ion_hpa_heap {
+       struct ion_heap heap;
+       unsigned int order;
+       unsigned int protection_id;
+       bool secure;
+};
+
+#define to_hpa_heap(x) container_of(x, struct ion_hpa_heap, heap)
+
+static int ion_hpa_compare_pages(const void *p1, const void *p2)
+{
+       if (*((unsigned long *)p1) > (*((unsigned long *)p2)))
+               return 1;
+       else if (*((unsigned long *)p1) < (*((unsigned long *)p2)))
+               return -1;
+       return 0;
+}
+
+static int ion_hpa_allocate(struct ion_heap *heap,
+                                struct ion_buffer *buffer, unsigned long len,
+                                unsigned long flags)
+{
+       struct ion_hpa_heap *hpa_heap = to_hpa_heap(heap);
+       unsigned int count = ION_HPA_PAGE_COUNT((unsigned int)len, hpa_heap);
+       bool zero = !(flags & ION_FLAG_NOZEROED);
+       bool cacheflush = !(flags & ION_FLAG_CACHED) ||
+                         ((flags & ION_FLAG_SYNC_FORCE) != 0);
+       size_t desc_size = sizeof(struct page *) * count;
+       struct page **pages;
+       struct sg_table *sgt;
+       struct scatterlist *sg;
+       int ret, i;
+
+       if (count > HPA_MAX_CHUNK_COUNT) {
+               pr_info("ION HPA heap does not allow buffers > %zu\n",
+                       HPA_MAX_CHUNK_COUNT * ION_HPA_CHUNK_SIZE(hpa_heap));
+               return -ENOMEM;
+       }
+
+       pages = kmalloc(desc_size, GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+       if (!sgt) {
+               ret = -ENOMEM;
+               goto err_sgt;
+       }
+
+       ret = sg_alloc_table(sgt, count, GFP_KERNEL);
+       if (ret)
+               goto err_sg;
+
+       ret = alloc_pages_highorder(hpa_heap->order, pages, count);
+       if (ret)
+               goto err_pages;
+
+       sort(pages, count, sizeof(*pages), ion_hpa_compare_pages, NULL);
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               if (zero)
+                       memset(page_address(pages[i]), 0,
+                              ION_HPA_CHUNK_SIZE(hpa_heap));
+               if (cacheflush)
+                       __flush_dcache_area(page_address(pages[i]),
+                                           ION_HPA_CHUNK_SIZE(hpa_heap));
+
+               sg_set_page(sg, pages[i], ION_HPA_CHUNK_SIZE(hpa_heap), 0);
+       }
+
+       kfree(pages);
+
+       buffer->sg_table = sgt;
+
+       return 0;
+err_pages:
+       sg_free_table(sgt);
+err_sg:
+       kfree(sgt);
+err_sgt:
+       kfree(pages);
+
+       return ret;
+}
+
+static void ion_hpa_free(struct ion_buffer *buffer)
+{
+       struct ion_hpa_heap *hpa_heap = to_hpa_heap(buffer->heap);
+       struct sg_table *sgt = buffer->sg_table;
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+               __free_pages(sg_page(sg), hpa_heap->order);
+       sg_free_table(sgt);
+       kfree(sgt);
+}
+
+static void hpa_heap_query(struct ion_heap *heap, struct ion_heap_data *data)
+{
+       struct ion_hpa_heap *hpa_heap = to_hpa_heap(heap);
+
+       if (hpa_heap->secure)
+               data->heap_flags |= ION_HEAPDATA_FLAGS_ALLOW_PROTECTION;
+}
+
+static struct ion_heap_ops ion_hpa_ops = {
+       .allocate = ion_hpa_allocate,
+       .free = ion_hpa_free,
+       .map_user = ion_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .query_heap = hpa_heap_query,
+};
+
+struct ion_heap *ion_hpa_heap_create(struct ion_platform_heap *data)
+{
+       struct ion_hpa_heap *heap;
+
+       heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+       if (!heap)
+               return ERR_PTR(-ENOMEM);
+
+       heap->heap.ops = &ion_hpa_ops;
+       heap->heap.type = ION_HEAP_TYPE_HPA;
+       heap->heap.name = kstrndup(data->name, MAX_HEAP_NAME - 1, GFP_KERNEL);
+       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       heap->order = get_order(data->align);
+       heap->protection_id = data->id;
+       heap->secure = data->secure;
+
+
+       return &heap->heap;
+}
index 7c111bbf54ab6e87741b9a9d4924609a0cacc837..0b894f0efc4946c06d08b194cccabb2ab2a08913 100644 (file)
@@ -42,6 +42,7 @@ enum ion_heap_type {
                               * must be last so device specific heaps always
                               * are at the end of this enum
                               */
+       ION_HEAP_TYPE_HPA = ION_HEAP_TYPE_CUSTOM,
 };
 
 #define ION_NUM_HEAP_IDS               (sizeof(unsigned int) * 8)