ion_heap_data is lack of heap properties that is critical to users.
Users may want to know the total size of the cma/carved out memory
pools served by cma heap and carveout heap, respectively. They also
need to know if a heap provides buffer protection for DRM video
streams or allocated buffer from a heap can be accessed.
Change-Id: I3c3a6b9a41d32398ddfe1b1c21b3977e790e5c25
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
struct ion_heap *heap;
struct ion_heap_data hdata;
- memset(&hdata, 0, sizeof(hdata));
-
down_read(&dev->lock);
if (!buffer) {
query->cnt = dev->heap_cnt;
max_cnt = query->cnt;
plist_for_each_entry(heap, &dev->heaps, node) {
+ memset(&hdata, 0, sizeof(hdata));
+
strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
hdata.name[sizeof(hdata.name) - 1] = '\0';
hdata.type = heap->type;
hdata.heap_id = heap->id;
+ if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+ hdata.heap_flags = ION_HEAPDATA_FLAGS_DEFER_FREE;
+
+ if (heap->ops->query_heap)
+ heap->ops->query_heap(heap, &hdata);
+
if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
ret = -EFAULT;
goto out;
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
+ * @query_heap specifies heap specific data to ion_heap_data to users
*
* allocate, phys, and map_user return 0 on success, -errno on error.
* map_dma and map_kernel return pointer on success, ERR_PTR on
* the buffer's private_flags when called from a shrinker. In that
* case, the pages being free'd must be truly free'd back to the
* system, not put in a page pool or otherwise cached.
+ * @query_heap is optional.
*/
struct ion_heap_ops {
int (*allocate)(struct ion_heap *heap,
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+ void (*query_heap)(struct ion_heap *heap, struct ion_heap_data *data);
};
/**
return ion_heap_map_kernel(heap, buffer);
}
+static void carveout_heap_query(struct ion_heap *heap,
+ struct ion_heap_data *data)
+{
+ struct ion_carveout_heap *carveout_heap =
+ container_of(heap, struct ion_carveout_heap, heap);
+
+ data->size = carveout_heap->size;
+ if (carveout_heap->secure)
+ data->heap_flags |= ION_HEAPDATA_FLAGS_ALLOW_PROTECTION;
+ if (carveout_heap->untouchable)
+ data->heap_flags |= ION_HEAPDATA_FLAGS_UNTOUCHABLE;
+}
+
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
.map_user = carveout_heap_map_user,
.map_kernel = carveout_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
+ .query_heap = carveout_heap_query,
};
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
kfree(buffer->sg_table);
}
+static void cma_heap_query(struct ion_heap *heap, struct ion_heap_data *data)
+{
+ struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+ data->size = (u32)cma_get_size(cma_heap->cma);
+ if (cma_heap->secure)
+ data->heap_flags |= ION_HEAPDATA_FLAGS_ALLOW_PROTECTION;
+}
+
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
+ .query_heap = cma_heap_query,
};
#ifdef CONFIG_ION_EXYNOS
#define MAX_HEAP_NAME 32
+/**
+ * freed buffer to the heap may not be returned to the free pool immediately
+ */
+#define ION_HEAPDATA_FLAGS_DEFER_FREE 1
+/**
+ * ION_FLAG_PROTECTED is applicable.
+ */
+#define ION_HEAPDATA_FLAGS_ALLOW_PROTECTION 2
+/**
+ * access to the buffer from this heap is not allowed in both of userland and
+ * kernel space. mmap() and dmabuf kmap/vmap always fail.
+ */
+#define ION_HEAPDATA_FLAGS_UNTOUCHABLE 4
/**
* struct ion_heap_data - data about a heap
* @name - first 32 characters of the heap name
* @type - heap type
* @heap_id - heap id for the heap
+ * @size - size of the memory pool if the heap type is dma, carveout and chunk
+ * @heap_flags - properties of heap
+ *
*/
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
- __u32 reserved0;
- __u32 reserved1;
+ __u32 size; /* reserved0 */
+ __u32 heap_flags; /* reserved1 */
__u32 reserved2;
};