RESERVEDMEM_OF_DECLARE(ion, "exynos9820-ion", exynos_ion_reserved_mem_setup);
+#define MAX_HPA_EXCEPTION_AREAS 4
+
+static int hpa_num_exception_areas;
+static phys_addr_t hpa_alloc_exceptions[MAX_HPA_EXCEPTION_AREAS][2];
+
static bool __init register_hpa_heap(struct device_node *np,
unsigned int prot_id_map)
{
pheap.align = SZ_64K;
pheap.type = ION_HEAP_TYPE_HPA;
- heap = ion_hpa_heap_create(&pheap);
-
+ heap = ion_hpa_heap_create(&pheap, hpa_alloc_exceptions,
+ hpa_num_exception_areas);
if (IS_ERR(heap)) {
pr_err("%s: failed to register '%s' heap\n",
__func__, pheap.name);
struct device_node *np, *child;
bool secure = false;
- for_each_node_by_name(np, "ion-hpa-heap")
+ for_each_node_by_name(np, "ion-hpa-heap") {
+ const __be32 *range;
+ int len;
+
+ range = of_get_property(np, "ion,hpa_alloc_exception", &len);
+ if (range && (len > 0)) {
+ int n_addr = of_n_addr_cells(np);
+ int n_size = of_n_size_cells(np);
+ int n_area = len / (sizeof(*range) * (n_size + n_addr));
+ const void *prop;
+ phys_addr_t base, size;
+ int i;
+
+ /*
+ * If 'ion-hpa-heap' node defines its own range properties,
+ * override the range properties defined by its parent.
+ */
+ prop = of_get_property(np, "#address-cells", NULL);
+ if (prop)
+ n_addr = be32_to_cpup(prop);
+
+ prop = of_get_property(np, "#size-cells", NULL);
+ if (prop)
+ n_size = be32_to_cpup(prop);
+
+ for (i = hpa_num_exception_areas;
+ i < min(n_area, MAX_HPA_EXCEPTION_AREAS) ; i++) {
+ base = (phys_addr_t)of_read_number(range, n_addr);
+ range += n_addr;
+ size = (phys_addr_t)of_read_number(range, n_size);
+ range += n_size;
+
+ hpa_alloc_exceptions[i][0] = base;
+ hpa_alloc_exceptions[i][1] = base + size - 1;
+ }
+
+ hpa_num_exception_areas = i;
+ }
+
for_each_child_of_node(np, child)
if (of_device_is_compatible(child, "exynos9820-ion"))
secure |= register_hpa_heap(child, prot_id_map);
+ }
return secure;
}
return 0;
}
-device_initcall(exynos_ion_register_heaps);
+subsys_initcall(exynos_ion_register_heaps);
struct ion_heap heap;
unsigned int order;
unsigned int protection_id;
+ phys_addr_t (*exception_areas)[2];
+ int exception_count;
bool secure;
};
if (ret)
goto err_sg;
- ret = alloc_pages_highorder(hpa_heap->order, pages, count);
+ i = protected ? hpa_heap->exception_count : 0;
+ ret = alloc_pages_highorder_except(hpa_heap->order, pages, count,
+ hpa_heap->exception_areas, i);
if (ret)
goto err_pages;
.query_heap = hpa_heap_query,
};
-struct ion_heap *ion_hpa_heap_create(struct ion_platform_heap *data)
+struct ion_heap *ion_hpa_heap_create(struct ion_platform_heap *data,
+ phys_addr_t except_areas[][2],
+ int n_except_areas)
{
struct ion_hpa_heap *heap;
heap->order = get_order(data->align);
heap->protection_id = data->id;
heap->secure = data->secure;
-
+ heap->exception_areas = except_areas;
+ heap->exception_count = n_except_areas;
return &heap->heap;
}