If you're not using Android its probably safe to
say N here.
+config ION_SYSTEM_HEAP
+ bool "Ion system heap"
+ depends on ION
+ help
+ Choose this option to enable the Ion system heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config ION_CARVEOUT_HEAP
+ bool "Ion carveout heap support"
+ depends on ION
+ help
+ Choose this option to enable carveout heaps with Ion. Carveout heaps
+ are backed by memory reserved from the system. Allocation times are
+ typically faster at the cost of memory not being used. Unless you
+ know your system has these regions, you should say N here.
+
+config ION_CHUNK_HEAP
+ bool "Ion chunk heap support"
+ depends on ION
+ help
+ Choose this option to enable chunk heaps with Ion. This heap is
+ similar in function the carveout heap but memory is broken down
+ into smaller chunk sizes, typically corresponding to a TLB size.
+ Unless you know your system has these regions, you should say N here.
+
config ION_CMA_HEAP
bool "Ion CMA heap support"
depends on ION && CMA
-obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \
- ion_page_pool.o ion_system_heap.o \
- ion_carveout_heap.o ion_chunk_heap.o
+obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o
+obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
+obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o
+obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o
obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
#include "ion.h"
+static struct ion_device *internal_dev;
+static int heap_id = 0;
+
bool ion_buffer_cached(struct ion_buffer *buffer)
{
return !!(buffer->flags & ION_FLAG_CACHED);
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
debug_shrink_set, "%llu\n");
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+void ion_device_add_heap(struct ion_heap *heap)
{
struct dentry *debug_file;
+ struct ion_device *dev = internal_dev;
if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
heap->dev = dev;
down_write(&dev->lock);
+ heap->id = heap_id++;
/*
* use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
}
EXPORT_SYMBOL(ion_device_add_heap);
-struct ion_device *ion_device_create(void)
+int ion_device_create(void)
{
struct ion_device *idev;
int ret;
idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
idev->dev.minor = MISC_DYNAMIC_MINOR;
idev->dev.name = "ion";
if (ret) {
pr_err("ion: failed to register misc device.\n");
kfree(idev);
- return ERR_PTR(ret);
+ return ret;
}
idev->debug_root = debugfs_create_dir("ion", NULL);
pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
-
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
idev->clients = RB_ROOT;
ion_root_client = &idev->clients;
mutex_init(&debugfs_mutex);
- return idev;
-}
-EXPORT_SYMBOL(ion_device_create);
-
-void ion_device_destroy(struct ion_device *dev)
-{
- misc_deregister(&dev->dev);
- debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
- kfree(dev);
+ internal_dev = idev;
+ return 0;
}
-EXPORT_SYMBOL(ion_device_destroy);
+subsys_initcall(ion_device_create);
*/
bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
-/**
- * ion_device_create - allocates and returns an ion device
- *
- * returns a valid device or -PTR_ERR
- */
-struct ion_device *ion_device_create(void);
-
-/**
- * ion_device_destroy - free and device and it's resource
- * @dev: the device
- */
-void ion_device_destroy(struct ion_device *dev);
-
/**
* ion_device_add_heap - adds a heap to the ion device
- * @dev: the device
* @heap: the heap to add
*/
-void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+void ion_device_add_heap(struct ion_heap *heap);
/**
* some helpers for common operations on buffers using the sg_table
size_t ion_heap_freelist_size(struct ion_heap *heap);
-/**
- * functions for creating and destroying the built in ion heaps.
- * architectures can add their own custom architecture specific
- * heaps as appropriate.
- */
-
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data);
-void ion_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused);
-void ion_system_heap_destroy(struct ion_heap *heap);
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap);
-void ion_system_contig_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data);
-void ion_carveout_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data);
-void ion_chunk_heap_destroy(struct ion_heap *heap);
-
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data);
-void ion_cma_heap_destroy(struct ion_heap *heap);
-
/**
* functions for creating and destroying a heap pool -- allows you
* to keep a pool of pre allocated memory to use from your heap. Keeping
return &carveout_heap->heap;
}
-
-void ion_carveout_heap_destroy(struct ion_heap *heap)
-{
- struct ion_carveout_heap *carveout_heap =
- container_of(heap, struct ion_carveout_heap, heap);
-
- gen_pool_destroy(carveout_heap->pool);
- kfree(carveout_heap);
- carveout_heap = NULL;
-}
return ERR_PTR(ret);
}
-void ion_chunk_heap_destroy(struct ion_heap *heap)
-{
- struct ion_chunk_heap *chunk_heap =
- container_of(heap, struct ion_chunk_heap, heap);
-
- gen_pool_destroy(chunk_heap->pool);
- kfree(chunk_heap);
- chunk_heap = NULL;
-}
.unmap_kernel = ion_heap_unmap_kernel,
};
-struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
{
struct ion_cma_heap *cma_heap;
* get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
- cma_heap->cma = data->priv;
+ cma_heap->cma = cma;
cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap;
}
-void ion_cma_heap_destroy(struct ion_heap *heap)
+int __ion_add_cma_heaps(struct cma *cma, void *data)
{
- struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+ struct ion_heap *heap;
+
+ heap = __ion_cma_heap_create(cma);
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
- kfree(cma_heap);
+ heap->name = cma_get_name(cma);
+
+ ion_device_add_heap(heap);
+ return 0;
+}
+
+static int ion_add_cma_heaps(void)
+{
+ cma_for_each_area(__ion_add_cma_heaps, NULL);
+ return 0;
}
+device_initcall(ion_add_cma_heaps);
heap->shrinker.batch = 0;
register_shrinker(&heap->shrinker);
}
-
-struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
-{
- struct ion_heap *heap = NULL;
-
- switch (heap_data->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- heap = ion_system_contig_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- heap = ion_system_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- heap = ion_carveout_heap_create(heap_data);
- break;
- case ION_HEAP_TYPE_CHUNK:
- heap = ion_chunk_heap_create(heap_data);
- break;
-#ifdef CONFIG_ION_CMA_HEAP
- case ION_HEAP_TYPE_DMA:
- heap = ion_cma_heap_create(heap_data);
- break;
-#endif
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap_data->type);
- return ERR_PTR(-EINVAL);
- }
-
- if (IS_ERR_OR_NULL(heap)) {
- pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
- __func__, heap_data->name, heap_data->type,
- &heap_data->base, heap_data->size);
- return ERR_PTR(-EINVAL);
- }
-
- heap->name = heap_data->name;
- heap->id = heap_data->id;
- return heap;
-}
-EXPORT_SYMBOL(ion_heap_create);
-
-void ion_heap_destroy(struct ion_heap *heap)
-{
- if (!heap)
- return;
-
- switch (heap->type) {
- case ION_HEAP_TYPE_SYSTEM_CONTIG:
- ion_system_contig_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_SYSTEM:
- ion_system_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CARVEOUT:
- ion_carveout_heap_destroy(heap);
- break;
- case ION_HEAP_TYPE_CHUNK:
- ion_chunk_heap_destroy(heap);
- break;
-#ifdef CONFIG_ION_CMA_HEAP
- case ION_HEAP_TYPE_DMA:
- ion_cma_heap_destroy(heap);
- break;
-#endif
- default:
- pr_err("%s: Invalid heap type %d\n", __func__,
- heap->type);
- }
-}
-EXPORT_SYMBOL(ion_heap_destroy);
return -ENOMEM;
}
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_heap_create(void)
{
struct ion_system_heap *heap;
return ERR_PTR(-ENOMEM);
}
-void ion_system_heap_destroy(struct ion_heap *heap)
+static int ion_system_heap_create(void)
{
- struct ion_system_heap *sys_heap = container_of(heap,
- struct ion_system_heap,
- heap);
- int i;
+ struct ion_heap *heap;
- for (i = 0; i < NUM_ORDERS; i++) {
- ion_page_pool_destroy(sys_heap->uncached_pools[i]);
- ion_page_pool_destroy(sys_heap->cached_pools[i]);
- }
- kfree(sys_heap);
+ heap = __ion_system_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+ heap->name = "ion_system_heap";
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_heap_create);
static int ion_system_contig_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
.map_user = ion_heap_map_user,
};
-struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+static struct ion_heap *__ion_system_contig_heap_create(void)
{
struct ion_heap *heap;
return ERR_PTR(-ENOMEM);
heap->ops = &kmalloc_ops;
heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+ heap->name = "ion_system_contig_heap";
return heap;
}
-void ion_system_contig_heap_destroy(struct ion_heap *heap)
+static int ion_system_contig_heap_create(void)
{
- kfree(heap);
+ struct ion_heap *heap;
+
+ heap = __ion_system_contig_heap_create();
+ if (IS_ERR(heap))
+ return PTR_ERR(heap);
+
+ ion_device_add_heap(heap);
+ return 0;
}
+device_initcall(ion_system_contig_heap_create);
+