RESERVEDMEM_OF_DECLARE(ion, "exynos9820-ion", exynos_ion_reserved_mem_setup);
+static bool __init register_hpa_heap(struct device_node *np,
+ unsigned int prot_id_map)
+{
+ struct ion_platform_heap pheap;
+ struct ion_heap *heap;
+ u32 align;
+
+ if (of_property_read_string(np, "ion,heapname", &pheap.name)) {
+ pr_err("%s: failed to read ion,heapname in '%s'\n",
+ __func__, np->name);
+ return false;
+ }
+
+ pheap.secure = of_property_read_bool(np, "ion,secure");
+
+ if (pheap.secure) {
+ if (of_property_read_u32(np, "ion,protection_id", &pheap.id)) {
+ pr_err("%s: failed to read ion,protection_id in '%s'\n",
+ __func__, np->name);
+ return false;
+ }
+
+ if (pheap.id > 32) {
+ pr_err("%s: too large protection id %d of '%s'\n",
+ __func__, pheap.id, pheap.name);
+ return false;
+ }
+
+ if ((1 << pheap.id) & prot_id_map) {
+ pr_err("%s: protection_id %d in '%s' already exists\n",
+ __func__, pheap.id, np->name);
+ return false;
+ }
+ }
+
+ if (!of_property_read_u32(np, "ion,alignment", &align))
+ pheap.align = align;
+ else
+ pheap.align = SZ_64K;
+
+ pheap.type = ION_HEAP_TYPE_HPA;
+ heap = ion_hpa_heap_create(&pheap);
+
+ if (IS_ERR(heap)) {
+ pr_err("%s: failed to register '%s' heap\n",
+ __func__, pheap.name);
+ return false;
+ }
+
+ ion_device_add_heap(heap);
+ pr_info("ION: registered '%s' heap\n", pheap.name);
+
+ return pheap.secure;
+
+}
+
+static bool __init exynos_ion_register_hpa_heaps(unsigned int prot_id_map)
+{
+ struct device_node *np, *child;
+ bool secure = false;
+
+ for_each_node_by_name(np, "ion-hpa-heap")
+ for_each_child_of_node(np, child)
+ if (of_device_is_compatible(child, "exynos9820-ion"))
+ secure |= register_hpa_heap(child, prot_id_map);
+
+ return secure;
+}
+
static int __init exynos_ion_register_heaps(void)
{
unsigned int i;
bool secure = false;
+ unsigned int prot_id_map = 0;
for (i = 0; i < reserved_mem_count; i++) {
struct ion_platform_heap pheap;
pheap.secure = ion_reserved_mem[i].secure;
pheap.untouchable = ion_reserved_mem[i].untouchable;
+ if (pheap.id > 32) {
+ pr_err("%s: too large protection id %d of '%s'\n",
+ __func__, pheap.id, pheap.name);
+ continue;
+ }
+
+ if (pheap.secure && ((1 << pheap.id) & prot_id_map)) {
+ pr_err("%s: protection id %d of '%s' already exists\n",
+ __func__, pheap.id, pheap.name);
+ continue;
+ }
+
if (ion_reserved_mem[i].cma) {
pheap.type = ION_HEAP_TYPE_DMA;
heap = ion_cma_heap_create(ion_reserved_mem[i].cma,
continue;
}
+ if (pheap.secure)
+ prot_id_map |= 1 << pheap.id;
+
ion_device_add_heap(heap);
pr_info("ION: registered '%s' heap\n", pheap.name);
- if (pheap.secure)
- secure = true;
+ secure |= pheap.secure;
}
+ secure |= exynos_ion_register_hpa_heaps(prot_id_map);
+
/*
* ion_secure_iova_pool_create() should success. If it fails, it is
* because of design flaw or out of memory. Nothing to do with the
--- /dev/null
+/*
+ * drivers/staging/android/ion/ion_hpa_heap.c
+ *
+ * Copyright (C) 2016, 2018 Samsung Electronics Co., Ltd.
+ * Author: <pullip.cho@samsung.com> for Exynos SoCs
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include "ion.h"
+
+#define ION_HPA_CHUNK_SIZE(heap) (PAGE_SIZE << (heap)->order)
+#define ION_HPA_PAGE_COUNT(len, heap) \
+ (ALIGN(len, ION_HPA_CHUNK_SIZE(heap)) / ION_HPA_CHUNK_SIZE(heap))
+#define HPA_MAX_CHUNK_COUNT ((PAGE_SIZE * 2) / sizeof(struct page *))
+
+struct ion_hpa_heap {
+ struct ion_heap heap;
+ unsigned int order;
+ unsigned int protection_id;
+ bool secure;
+};
+
+#define to_hpa_heap(x) container_of(x, struct ion_hpa_heap, heap)
+
+static int ion_hpa_compare_pages(const void *p1, const void *p2)
+{
+ if (*((unsigned long *)p1) > (*((unsigned long *)p2)))
+ return 1;
+ else if (*((unsigned long *)p1) < (*((unsigned long *)p2)))
+ return -1;
+ return 0;
+}
+
+static int ion_hpa_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer, unsigned long len,
+ unsigned long flags)
+{
+ struct ion_hpa_heap *hpa_heap = to_hpa_heap(heap);
+ unsigned int count = ION_HPA_PAGE_COUNT((unsigned int)len, hpa_heap);
+ bool zero = !(flags & ION_FLAG_NOZEROED);
+ bool cacheflush = !(flags & ION_FLAG_CACHED) ||
+ ((flags & ION_FLAG_SYNC_FORCE) != 0);
+ size_t desc_size = sizeof(struct page *) * count;
+ struct page **pages;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ int ret, i;
+
+ if (count > HPA_MAX_CHUNK_COUNT) {
+ pr_info("ION HPA heap does not allow buffers > %zu\n",
+ HPA_MAX_CHUNK_COUNT * ION_HPA_CHUNK_SIZE(hpa_heap));
+ return -ENOMEM;
+ }
+
+ pages = kmalloc(desc_size, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto err_sgt;
+ }
+
+ ret = sg_alloc_table(sgt, count, GFP_KERNEL);
+ if (ret)
+ goto err_sg;
+
+ ret = alloc_pages_highorder(hpa_heap->order, pages, count);
+ if (ret)
+ goto err_pages;
+
+ sort(pages, count, sizeof(*pages), ion_hpa_compare_pages, NULL);
+
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+ if (zero)
+ memset(page_address(pages[i]), 0,
+ ION_HPA_CHUNK_SIZE(hpa_heap));
+ if (cacheflush)
+ __flush_dcache_area(page_address(pages[i]),
+ ION_HPA_CHUNK_SIZE(hpa_heap));
+
+ sg_set_page(sg, pages[i], ION_HPA_CHUNK_SIZE(hpa_heap), 0);
+ }
+
+ kfree(pages);
+
+ buffer->sg_table = sgt;
+
+ return 0;
+err_pages:
+ sg_free_table(sgt);
+err_sg:
+ kfree(sgt);
+err_sgt:
+ kfree(pages);
+
+ return ret;
+}
+
+static void ion_hpa_free(struct ion_buffer *buffer)
+{
+ struct ion_hpa_heap *hpa_heap = to_hpa_heap(buffer->heap);
+ struct sg_table *sgt = buffer->sg_table;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
+ __free_pages(sg_page(sg), hpa_heap->order);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void hpa_heap_query(struct ion_heap *heap, struct ion_heap_data *data)
+{
+ struct ion_hpa_heap *hpa_heap = to_hpa_heap(heap);
+
+ if (hpa_heap->secure)
+ data->heap_flags |= ION_HEAPDATA_FLAGS_ALLOW_PROTECTION;
+}
+
+static struct ion_heap_ops ion_hpa_ops = {
+ .allocate = ion_hpa_allocate,
+ .free = ion_hpa_free,
+ .map_user = ion_heap_map_user,
+ .map_kernel = ion_heap_map_kernel,
+ .unmap_kernel = ion_heap_unmap_kernel,
+ .query_heap = hpa_heap_query,
+};
+
+struct ion_heap *ion_hpa_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_hpa_heap *heap;
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->heap.ops = &ion_hpa_ops;
+ heap->heap.type = ION_HEAP_TYPE_HPA;
+ heap->heap.name = kstrndup(data->name, MAX_HEAP_NAME - 1, GFP_KERNEL);
+ heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+ heap->order = get_order(data->align);
+ heap->protection_id = data->id;
+ heap->secure = data->secure;
+
+
+ return &heap->heap;
+}