buffer->heap = heap;
buffer->flags = flags;
- kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, flags);
kfree(buffer);
}
-static void _ion_buffer_destroy(struct kref *kref)
+static void _ion_buffer_destroy(struct ion_buffer *buffer)
{
- struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
struct ion_heap *heap = buffer->heap;
struct ion_device *dev = buffer->dev;
ion_buffer_destroy(buffer);
}
-static void ion_buffer_get(struct ion_buffer *buffer)
-{
- kref_get(&buffer->ref);
-}
-
-static int ion_buffer_put(struct ion_buffer *buffer)
-{
- return kref_put(&buffer->ref, _ion_buffer_destroy);
-}
-
-static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
-{
- mutex_lock(&buffer->lock);
- buffer->handle_count++;
- mutex_unlock(&buffer->lock);
-}
-
-static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
-{
- /*
- * when a buffer is removed from a handle, if it is not in
- * any other handles, copy the taskcomm and the pid of the
- * process it's being removed from into the buffer. At this
- * point there will be no way to track what processes this buffer is
- * being used by, it only exists as a dma_buf file descriptor.
- * The taskcomm and pid can provide a debug hint as to where this fd
- * is in the system
- */
- mutex_lock(&buffer->lock);
- buffer->handle_count--;
- BUG_ON(buffer->handle_count < 0);
- if (!buffer->handle_count) {
- struct task_struct *task;
-
- task = current->group_leader;
- get_task_comm(buffer->task_comm, task);
- buffer->pid = task_pid_nr(task);
- }
- mutex_unlock(&buffer->lock);
-}
-
-static struct ion_handle *ion_handle_create(struct ion_client *client,
- struct ion_buffer *buffer)
-{
- struct ion_handle *handle;
-
- handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle)
- return ERR_PTR(-ENOMEM);
- kref_init(&handle->ref);
- RB_CLEAR_NODE(&handle->node);
- handle->client = client;
- ion_buffer_get(buffer);
- ion_buffer_add_to_handle(buffer);
- handle->buffer = buffer;
-
- return handle;
-}
-
-static void ion_handle_kmap_put(struct ion_handle *);
-
-static void ion_handle_destroy(struct kref *kref)
-{
- struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
- struct ion_client *client = handle->client;
- struct ion_buffer *buffer = handle->buffer;
-
- mutex_lock(&buffer->lock);
- while (handle->kmap_cnt)
- ion_handle_kmap_put(handle);
- mutex_unlock(&buffer->lock);
-
- idr_remove(&client->idr, handle->id);
- if (!RB_EMPTY_NODE(&handle->node))
- rb_erase(&handle->node, &client->handles);
-
- ion_buffer_remove_from_handle(buffer);
- ion_buffer_put(buffer);
-
- kfree(handle);
-}
-
-static void ion_handle_get(struct ion_handle *handle)
-{
- kref_get(&handle->ref);
-}
-
-int ion_handle_put_nolock(struct ion_handle *handle)
-{
- return kref_put(&handle->ref, ion_handle_destroy);
-}
-
-int ion_handle_put(struct ion_handle *handle)
-{
- struct ion_client *client = handle->client;
- int ret;
-
- mutex_lock(&client->lock);
- ret = ion_handle_put_nolock(handle);
- mutex_unlock(&client->lock);
-
- return ret;
-}
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- handle = idr_find(&client->idr, id);
- if (handle)
- ion_handle_get(handle);
-
- return handle ? handle : ERR_PTR(-EINVAL);
-}
-
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
-
- return handle;
-}
-
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return idr_find(&client->idr, handle->id) == handle;
-}
-
-static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
-{
- int id;
- struct rb_node **p = &client->handles.rb_node;
- struct rb_node *parent = NULL;
- struct ion_handle *entry;
-
- id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
- return id;
-
- handle->id = id;
-
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_handle, node);
-
- if (handle->buffer < entry->buffer)
- p = &(*p)->rb_left;
- else if (handle->buffer > entry->buffer)
- p = &(*p)->rb_right;
- else
- WARN(1, "%s: buffer already found.", __func__);
- }
-
- rb_link_node(&handle->node, parent, p);
- rb_insert_color(&handle->node, &client->handles);
-
- return 0;
-}
-
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_handle *handle;
- struct ion_device *dev = client->dev;
- struct ion_buffer *buffer = NULL;
- struct ion_heap *heap;
- int ret;
-
- pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
- len, heap_id_mask, flags);
- /*
- * traverse the list of heaps available in this system in priority
- * order. If the heap type is supported by the client, and matches the
- * request of the caller allocate from it. Repeat until allocate has
- * succeeded or all heaps have been tried
- */
- len = PAGE_ALIGN(len);
-
- if (!len)
- return ERR_PTR(-EINVAL);
-
- down_read(&dev->lock);
- plist_for_each_entry(heap, &dev->heaps, node) {
- /* if the caller didn't specify this heap id */
- if (!((1 << heap->id) & heap_id_mask))
- continue;
- buffer = ion_buffer_create(heap, dev, len, flags);
- if (!IS_ERR(buffer))
- break;
- }
- up_read(&dev->lock);
-
- if (buffer == NULL)
- return ERR_PTR(-ENODEV);
-
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- handle = ion_handle_create(client, buffer);
-
- /*
- * ion_buffer_create will create a buffer with a ref_cnt of 1,
- * and ion_handle_create will take a second reference, drop one here
- */
- ion_buffer_put(buffer);
-
- if (IS_ERR(handle))
- return handle;
-
- mutex_lock(&client->lock);
- ret = ion_handle_add(client, handle);
- mutex_unlock(&client->lock);
- if (ret) {
- ion_handle_put(handle);
- handle = ERR_PTR(ret);
- }
-
- return handle;
-}
-EXPORT_SYMBOL(ion_alloc);
-
-void ion_free_nolock(struct ion_client *client,
- struct ion_handle *handle)
-{
- if (!ion_handle_validate(client, handle)) {
- WARN(1, "%s: invalid handle passed to free.\n", __func__);
- return;
- }
- ion_handle_put_nolock(handle);
-}
-
-void ion_free(struct ion_client *client, struct ion_handle *handle)
-{
- BUG_ON(client != handle->client);
-
- mutex_lock(&client->lock);
- ion_free_nolock(client, handle);
- mutex_unlock(&client->lock);
-}
-EXPORT_SYMBOL(ion_free);
-
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
{
void *vaddr;
}
}
-static void ion_handle_kmap_put(struct ion_handle *handle)
-{
- struct ion_buffer *buffer = handle->buffer;
-
- if (!handle->kmap_cnt) {
- WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
- return;
- }
- handle->kmap_cnt--;
- if (!handle->kmap_cnt)
- ion_buffer_kmap_put(buffer);
-}
-
-static struct mutex debugfs_mutex;
-static struct rb_root *ion_root_client;
-static int is_client_alive(struct ion_client *client)
-{
- struct rb_node *node;
- struct ion_client *tmp;
- struct ion_device *dev;
-
- node = ion_root_client->rb_node;
- dev = container_of(ion_root_client, struct ion_device, clients);
-
- down_read(&dev->lock);
- while (node) {
- tmp = rb_entry(node, struct ion_client, node);
- if (client < tmp) {
- node = node->rb_left;
- } else if (client > tmp) {
- node = node->rb_right;
- } else {
- up_read(&dev->lock);
- return 1;
- }
- }
-
- up_read(&dev->lock);
- return 0;
-}
-
-static int ion_debug_client_show(struct seq_file *s, void *unused)
-{
- struct ion_client *client = s->private;
- struct rb_node *n;
- size_t sizes[ION_NUM_HEAP_IDS] = {0};
- const char *names[ION_NUM_HEAP_IDS] = {NULL};
- int i;
-
- mutex_lock(&debugfs_mutex);
- if (!is_client_alive(client)) {
- seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
- client);
- mutex_unlock(&debugfs_mutex);
- return 0;
- }
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- unsigned int id = handle->buffer->heap->id;
-
- if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- mutex_unlock(&debugfs_mutex);
-
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
- for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
- if (!names[i])
- continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
- }
- return 0;
-}
-
-static int ion_debug_client_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_client_show, inode->i_private);
-}
-
-static const struct file_operations debug_client_fops = {
- .open = ion_debug_client_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ion_get_client_serial(const struct rb_root *root,
- const unsigned char *name)
-{
- int serial = -1;
- struct rb_node *node;
-
- for (node = rb_first(root); node; node = rb_next(node)) {
- struct ion_client *client = rb_entry(node, struct ion_client,
- node);
-
- if (strcmp(client->name, name))
- continue;
- serial = max(serial, client->display_serial);
- }
- return serial + 1;
-}
-
-struct ion_client *ion_client_create(struct ion_device *dev,
- const char *name)
-{
- struct ion_client *client;
- struct task_struct *task;
- struct rb_node **p;
- struct rb_node *parent = NULL;
- struct ion_client *entry;
- pid_t pid;
-
- if (!name) {
- pr_err("%s: Name cannot be null\n", __func__);
- return ERR_PTR(-EINVAL);
- }
-
- get_task_struct(current->group_leader);
- task_lock(current->group_leader);
- pid = task_pid_nr(current->group_leader);
- /*
- * don't bother to store task struct for kernel threads,
- * they can't be killed anyway
- */
- if (current->group_leader->flags & PF_KTHREAD) {
- put_task_struct(current->group_leader);
- task = NULL;
- } else {
- task = current->group_leader;
- }
- task_unlock(current->group_leader);
-
- client = kzalloc(sizeof(*client), GFP_KERNEL);
- if (!client)
- goto err_put_task_struct;
-
- client->dev = dev;
- client->handles = RB_ROOT;
- idr_init(&client->idr);
- mutex_init(&client->lock);
- client->task = task;
- client->pid = pid;
- client->name = kstrdup(name, GFP_KERNEL);
- if (!client->name)
- goto err_free_client;
-
- down_write(&dev->lock);
- client->display_serial = ion_get_client_serial(&dev->clients, name);
- client->display_name = kasprintf(
- GFP_KERNEL, "%s-%d", name, client->display_serial);
- if (!client->display_name) {
- up_write(&dev->lock);
- goto err_free_client_name;
- }
- p = &dev->clients.rb_node;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct ion_client, node);
-
- if (client < entry)
- p = &(*p)->rb_left;
- else if (client > entry)
- p = &(*p)->rb_right;
- }
- rb_link_node(&client->node, parent, p);
- rb_insert_color(&client->node, &dev->clients);
-
- client->debug_root = debugfs_create_file(client->display_name, 0664,
- dev->clients_debug_root,
- client, &debug_client_fops);
- if (!client->debug_root) {
- char buf[256], *path;
-
- path = dentry_path(dev->clients_debug_root, buf, 256);
- pr_err("Failed to create client debugfs at %s/%s\n",
- path, client->display_name);
- }
-
- up_write(&dev->lock);
-
- return client;
-
-err_free_client_name:
- kfree(client->name);
-err_free_client:
- kfree(client);
-err_put_task_struct:
- if (task)
- put_task_struct(current->group_leader);
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(ion_client_create);
-
-void ion_client_destroy(struct ion_client *client)
-{
- struct ion_device *dev = client->dev;
- struct rb_node *n;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- mutex_lock(&debugfs_mutex);
- while ((n = rb_first(&client->handles))) {
- struct ion_handle *handle = rb_entry(n, struct ion_handle,
- node);
- ion_handle_destroy(&handle->ref);
- }
-
- idr_destroy(&client->idr);
-
- down_write(&dev->lock);
- if (client->task)
- put_task_struct(client->task);
- rb_erase(&client->node, &dev->clients);
- debugfs_remove_recursive(client->debug_root);
- up_write(&dev->lock);
-
- kfree(client->display_name);
- kfree(client->name);
- kfree(client);
- mutex_unlock(&debugfs_mutex);
-}
-EXPORT_SYMBOL(ion_client_destroy);
-
static struct sg_table *dup_sg_table(struct sg_table *table)
{
struct sg_table *new_table;
{
struct ion_buffer *buffer = dmabuf->priv;
- ion_buffer_put(buffer);
+ _ion_buffer_destroy(buffer);
}
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
.kunmap = ion_dma_buf_kunmap,
};
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
{
+ struct ion_device *dev = internal_dev;
+ struct ion_buffer *buffer = NULL;
+ struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct ion_buffer *buffer;
+ int fd;
struct dma_buf *dmabuf;
- bool valid_handle;
- mutex_lock(&client->lock);
- valid_handle = ion_handle_validate(client, handle);
- if (!valid_handle) {
- WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
- return ERR_PTR(-EINVAL);
+ pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+ len, heap_id_mask, flags);
+ /*
+ * traverse the list of heaps available in this system in priority
+ * order. If the heap type is supported by the client, and matches the
+ * request of the caller allocate from it. Repeat until allocate has
+ * succeeded or all heaps have been tried
+ */
+ len = PAGE_ALIGN(len);
+
+ if (!len)
+ return -EINVAL;
+
+ down_read(&dev->lock);
+ plist_for_each_entry(heap, &dev->heaps, node) {
+ /* if the caller didn't specify this heap id */
+ if (!((1 << heap->id) & heap_id_mask))
+ continue;
+ buffer = ion_buffer_create(heap, dev, len, flags);
+ if (!IS_ERR(buffer))
+ break;
}
- buffer = handle->buffer;
- ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
+ up_read(&dev->lock);
+
+ if (buffer == NULL)
+ return -ENODEV;
+
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
- ion_buffer_put(buffer);
- return dmabuf;
- }
-
- return dmabuf;
-}
-EXPORT_SYMBOL(ion_share_dma_buf);
-
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
-{
- struct dma_buf *dmabuf;
- int fd;
-
- dmabuf = ion_share_dma_buf(client, handle);
- if (IS_ERR(dmabuf))
+ _ion_buffer_destroy(buffer);
return PTR_ERR(dmabuf);
+ }
fd = dma_buf_fd(dmabuf, O_CLOEXEC);
if (fd < 0)
return fd;
}
-EXPORT_SYMBOL(ion_share_dma_buf_fd);
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+int ion_query_heaps(struct ion_heap_query *query)
{
- struct ion_device *dev = client->dev;
+ struct ion_device *dev = internal_dev;
struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
int ret = -EINVAL, cnt = 0, max_cnt;
struct ion_heap *heap;
return ret;
}
-static int ion_release(struct inode *inode, struct file *file)
-{
- struct ion_client *client = file->private_data;
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- ion_client_destroy(client);
- return 0;
-}
-
-static int ion_open(struct inode *inode, struct file *file)
-{
- struct miscdevice *miscdev = file->private_data;
- struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
- struct ion_client *client;
- char debug_name[64];
-
- pr_debug("%s: %d\n", __func__, __LINE__);
- snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
- client = ion_client_create(dev, debug_name);
- if (IS_ERR(client))
- return PTR_ERR(client);
- file->private_data = client;
-
- return 0;
-}
-
static const struct file_operations ion_fops = {
.owner = THIS_MODULE,
- .open = ion_open,
- .release = ion_release,
.unlocked_ioctl = ion_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = ion_ioctl,
#endif
};
-static size_t ion_debug_heap_total(struct ion_client *client,
- unsigned int id)
-{
- size_t size = 0;
- struct rb_node *n;
-
- mutex_lock(&client->lock);
- for (n = rb_first(&client->handles); n; n = rb_next(n)) {
- struct ion_handle *handle = rb_entry(n,
- struct ion_handle,
- node);
- if (handle->buffer->heap->id == id)
- size += handle->buffer->size;
- }
- mutex_unlock(&client->lock);
- return size;
-}
-
-static int ion_debug_heap_show(struct seq_file *s, void *unused)
-{
- struct ion_heap *heap = s->private;
- struct ion_device *dev = heap->dev;
- struct rb_node *n;
- size_t total_size = 0;
- size_t total_orphaned_size = 0;
-
- seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
- seq_puts(s, "----------------------------------------------------\n");
-
- mutex_lock(&debugfs_mutex);
- for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
- struct ion_client *client = rb_entry(n, struct ion_client,
- node);
- size_t size = ion_debug_heap_total(client, heap->id);
-
- if (!size)
- continue;
- if (client->task) {
- char task_comm[TASK_COMM_LEN];
-
- get_task_comm(task_comm, client->task);
- seq_printf(s, "%16s %16u %16zu\n", task_comm,
- client->pid, size);
- } else {
- seq_printf(s, "%16s %16u %16zu\n", client->name,
- client->pid, size);
- }
- }
- mutex_unlock(&debugfs_mutex);
-
- seq_puts(s, "----------------------------------------------------\n");
- seq_puts(s, "orphaned allocations (info is from last known client):\n");
- mutex_lock(&dev->buffer_lock);
- for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
- struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
- node);
- if (buffer->heap->id != heap->id)
- continue;
- total_size += buffer->size;
- if (!buffer->handle_count) {
- seq_printf(s, "%16s %16u %16zu %d %d\n",
- buffer->task_comm, buffer->pid,
- buffer->size, buffer->kmap_cnt,
- kref_read(&buffer->ref));
- total_orphaned_size += buffer->size;
- }
- }
- mutex_unlock(&dev->buffer_lock);
- seq_puts(s, "----------------------------------------------------\n");
- seq_printf(s, "%16s %16zu\n", "total orphaned",
- total_orphaned_size);
- seq_printf(s, "%16s %16zu\n", "total ", total_size);
- if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
- seq_printf(s, "%16s %16zu\n", "deferred free",
- heap->free_list_size);
- seq_puts(s, "----------------------------------------------------\n");
-
- if (heap->debug_show)
- heap->debug_show(heap, s, unused);
-
- return 0;
-}
-
-static int ion_debug_heap_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ion_debug_heap_show, inode->i_private);
-}
-
-static const struct file_operations debug_heap_fops = {
- .open = ion_debug_heap_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
*/
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- debug_file = debugfs_create_file(heap->name, 0664,
- dev->heaps_debug_root, heap,
- &debug_heap_fops);
-
- if (!debug_file) {
- char buf[256], *path;
-
- path = dentry_path(dev->heaps_debug_root, buf, 256);
- pr_err("Failed to create heap debugfs at %s/%s\n",
- path, heap->name);
- }
if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
char debug_name[64];
snprintf(debug_name, 64, "%s_shrink", heap->name);
debug_file = debugfs_create_file(
- debug_name, 0644, dev->heaps_debug_root, heap,
+ debug_name, 0644, dev->debug_root, heap,
&debug_shrink_fops);
if (!debug_file) {
char buf[256], *path;
- path = dentry_path(dev->heaps_debug_root, buf, 256);
+ path = dentry_path(dev->debug_root, buf, 256);
pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
path, debug_name);
}
pr_err("ion: failed to create debugfs root directory.\n");
goto debugfs_done;
}
- idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
- if (!idev->heaps_debug_root) {
- pr_err("ion: failed to create debugfs heaps directory.\n");
- goto debugfs_done;
- }
- idev->clients_debug_root = debugfs_create_dir("clients",
- idev->debug_root);
- if (!idev->clients_debug_root)
- pr_err("ion: failed to create debugfs clients directory.\n");
debugfs_done:
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
- idev->clients = RB_ROOT;
- ion_root_client = &idev->clients;
- mutex_init(&debugfs_mutex);
internal_dev = idev;
return 0;
}