*/
#include <linux/device.h>
+#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/file.h>
#include <linux/freezer.h>
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
#include <linux/idr.h>
+#include <linux/exynos_iovmm.h>
+#include <linux/exynos_ion.h>
+#include <linux/highmem.h>
#include "ion.h"
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#define CREATE_TRACE_POINTS
#include "ion_priv.h"
#include "compat_ion.h"
struct dentry *debug_root;
struct dentry *heaps_debug_root;
struct dentry *clients_debug_root;
+
+#ifdef CONFIG_ION_EXYNOS_STAT_LOG
+ /* event log */
+ struct dentry *buffer_debug_file;
+ struct dentry *event_debug_file;
+ struct ion_eventlog eventlog[ION_EVENT_LOG_MAX];
+ atomic_t event_idx;
+#endif
};
/**
*/
struct ion_handle {
struct kref ref;
+ unsigned int user_ref_count;
struct ion_client *client;
struct ion_buffer *buffer;
struct rb_node node;
int id;
};
-bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
-{
- return (buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
-}
-
-bool ion_buffer_cached(struct ion_buffer *buffer)
-{
- return !!(buffer->flags & ION_FLAG_CACHED);
-}
+struct ion_device *g_idev;
static inline struct page *ion_buffer_page(struct page *page)
{
*page = (struct page *)((unsigned long)(*page) & ~(1UL));
}
+void ion_debug_heap_usage_show(struct ion_heap *heap)
+{
+ struct scatterlist *sg;
+ struct sg_table *table;
+ struct rb_node *n;
+ struct page *page;
+ struct ion_device *dev = heap->dev;
+ int i;
+ ion_phys_addr_t paddr;
+
+ /* show the usage for only contiguous buffer */
+ if ((heap->type != ION_HEAP_TYPE_CARVEOUT)
+ && (heap->type != ION_HEAP_TYPE_DMA))
+ return;
+
+ pr_err("[HEAP %16s (id %4d) DETAIL USAGE]\n", heap->name, heap->id);
+
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ if (buffer->heap->id != heap->id)
+ continue;
+ table = buffer->sg_table;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ page = sg_page(sg);
+ paddr = PFN_PHYS(page_to_pfn(page));
+ pr_err("[%16lx--%16lx] %16zu\n",
+ paddr, paddr + sg->length, buffer->size);
+ }
+ }
+ mutex_unlock(&dev->buffer_lock);
+}
+
+#ifdef CONFIG_ION_EXYNOS_STAT_LOG
+static inline void ION_EVENT_ALLOC(struct ion_buffer *buffer, ktime_t begin)
+{
+ struct ion_device *dev = buffer->dev;
+ int idx = atomic_inc_return(&dev->event_idx);
+ struct ion_eventlog *log = &dev->eventlog[idx % ION_EVENT_LOG_MAX];
+ struct ion_event_alloc *data = &log->data.alloc;
+
+ log->type = ION_EVENT_TYPE_ALLOC;
+ log->begin = begin;
+ log->done = ktime_get();
+ data->id = buffer;
+ data->heap = buffer->heap;
+ data->size = buffer->size;
+ data->flags = buffer->flags;
+}
+
+static inline void ION_EVENT_FREE(struct ion_buffer *buffer, ktime_t begin)
+{
+ struct ion_device *dev = buffer->dev;
+ int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
+ struct ion_eventlog *log = &dev->eventlog[idx];
+ struct ion_event_free *data = &log->data.free;
+
+ log->type = ION_EVENT_TYPE_FREE;
+ log->begin = begin;
+ log->done = ktime_get();
+ data->id = buffer;
+ data->heap = buffer->heap;
+ data->size = buffer->size;
+ data->shrinker = (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
+}
+
+static inline void ION_EVENT_MMAP(struct ion_buffer *buffer, ktime_t begin)
+{
+ struct ion_device *dev = buffer->dev;
+ int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
+ struct ion_eventlog *log = &dev->eventlog[idx];
+ struct ion_event_mmap *data = &log->data.mmap;
+
+ log->type = ION_EVENT_TYPE_MMAP;
+ log->begin = begin;
+ log->done = ktime_get();
+ data->id = buffer;
+ data->heap = buffer->heap;
+ data->size = buffer->size;
+}
+
+void ION_EVENT_SHRINK(struct ion_device *dev, size_t size)
+{
+ int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
+ struct ion_eventlog *log = &dev->eventlog[idx];
+
+ log->type = ION_EVENT_TYPE_SHRINK;
+ log->begin = ktime_get();
+ log->done = ktime_set(0, 0);
+ log->data.shrink.size = size;
+}
+
+void ION_EVENT_CLEAR(struct ion_buffer *buffer, ktime_t begin)
+{
+ struct ion_device *dev = buffer->dev;
+ int idx = atomic_inc_return(&dev->event_idx) % ION_EVENT_LOG_MAX;
+ struct ion_eventlog *log = &dev->eventlog[idx];
+ struct ion_event_clear *data = &log->data.clear;
+
+ log->type = ION_EVENT_TYPE_CLEAR;
+ log->begin = begin;
+ log->done = ktime_get();
+ data->id = buffer;
+ data->heap = buffer->heap;
+ data->size = buffer->size;
+ data->flags = buffer->flags;
+}
+
+static struct ion_task *ion_buffer_task_lookup(struct ion_buffer *buffer,
+ struct device *master)
+{
+ bool found = false;
+ struct ion_task *task;
+
+ list_for_each_entry(task, &buffer->master_list, list) {
+ if (task->master == master) {
+ found = true;
+ break;
+ }
+ }
+
+ return found ? task : NULL;
+}
+
+static void ion_buffer_set_task_info(struct ion_buffer *buffer)
+{
+ INIT_LIST_HEAD(&buffer->master_list);
+ get_task_comm(buffer->task_comm, current->group_leader);
+ get_task_comm(buffer->thread_comm, current);
+ buffer->pid = task_pid_nr(current->group_leader);
+ buffer->tid = task_pid_nr(current);
+}
+
+static void ion_buffer_task_add(struct ion_buffer *buffer,
+ struct device *master)
+{
+ struct ion_task *task;
+
+ task = ion_buffer_task_lookup(buffer, master);
+ if (!task) {
+ task = kzalloc(sizeof(*task), GFP_KERNEL);
+ if (task) {
+ task->master = master;
+ kref_init(&task->ref);
+ list_add_tail(&task->list, &buffer->master_list);
+ }
+ } else {
+ kref_get(&task->ref);
+ }
+}
+
+static void ion_buffer_task_add_lock(struct ion_buffer *buffer,
+ struct device *master)
+{
+ mutex_lock(&buffer->lock);
+ ion_buffer_task_add(buffer, master);
+ mutex_unlock(&buffer->lock);
+}
+
+static void __ion_buffer_task_remove(struct kref *kref)
+{
+ struct ion_task *task = container_of(kref, struct ion_task, ref);
+
+ list_del(&task->list);
+ kfree(task);
+}
+
+static void ion_buffer_task_remove(struct ion_buffer *buffer,
+ struct device *master)
+{
+ struct ion_task *task, *tmp;
+
+ list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
+ if (task->master == master) {
+ kref_put(&task->ref, __ion_buffer_task_remove);
+ break;
+ }
+ }
+}
+
+static void ion_buffer_task_remove_lock(struct ion_buffer *buffer,
+ struct device *master)
+{
+ mutex_lock(&buffer->lock);
+ ion_buffer_task_remove(buffer, master);
+ mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_task_remove_all(struct ion_buffer *buffer)
+{
+ struct ion_task *task, *tmp;
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
+ list_del(&task->list);
+ kfree(task);
+ }
+ mutex_unlock(&buffer->lock);
+}
+#else
+#define ION_EVENT_ALLOC(buffer, begin) do { } while (0)
+#define ION_EVENT_FREE(buffer, begin) do { } while (0)
+#define ION_EVENT_MMAP(buffer, begin) do { } while (0)
+#define ion_buffer_set_task_info(buffer) do { } while (0)
+#define ion_buffer_task_add(buffer, master) do { } while (0)
+#define ion_buffer_task_add_lock(buffer, master) do { } while (0)
+#define ion_buffer_task_remove(buffer, master) do { } while (0)
+#define ion_buffer_task_remove_lock(buffer, master) do { } while (0)
+#define ion_buffer_task_remove_all(buffer) do { } while (0)
+#endif
+
/* this function should only be called while dev->lock is held */
static void ion_buffer_add(struct ion_device *dev,
struct ion_buffer *buffer)
rb_link_node(&buffer->node, parent, p);
rb_insert_color(&buffer->node, &dev->buffers);
+
+ ion_buffer_set_task_info(buffer);
+ ion_buffer_task_add(buffer, dev->dev.this_device);
}
/* this function should only be called while dev->lock is held */
buffer->heap = heap;
buffer->flags = flags;
+ buffer->size = len;
kref_init(&buffer->ref);
ret = heap->ops->allocate(heap, buffer, len, align, flags);
}
buffer->dev = dev;
- buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
+ INIT_LIST_HEAD(&buffer->iovas);
mutex_init(&buffer->lock);
/*
* this will set up dma addresses for the sglist -- it is not
void ion_buffer_destroy(struct ion_buffer *buffer)
{
+ struct ion_iovm_map *iovm_map;
+ struct ion_iovm_map *tmp;
+
+ ION_EVENT_BEGIN();
+ trace_ion_free_start((unsigned long) buffer, buffer->size,
+ buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
+
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+
+ list_for_each_entry_safe(iovm_map, tmp, &buffer->iovas, list) {
+ iovmm_unmap(iovm_map->dev, iovm_map->iova);
+ list_del(&iovm_map->list);
+ kfree(iovm_map);
+ }
+
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
+
+ ion_buffer_task_remove_all(buffer);
+ ION_EVENT_FREE(buffer, ION_EVENT_DONE());
+ trace_ion_free_end((unsigned long) buffer, buffer->size,
+ buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE);
kfree(buffer);
}
mutex_unlock(&buffer->lock);
}
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ WARN_ON(!mutex_is_locked(&client->lock));
+ return idr_find(&client->idr, handle->id) == handle;
+}
+
static struct ion_handle *ion_handle_create(struct ion_client *client,
struct ion_buffer *buffer)
{
kref_get(&handle->ref);
}
+/* Must hold the client lock */
+static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+{
+ if (atomic_read(&handle->ref.refcount) + 1 == 0)
+ return ERR_PTR(-EOVERFLOW);
+ ion_handle_get(handle);
+ return handle;
+}
+
static int ion_handle_put_nolock(struct ion_handle *handle)
+{
+ return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+int ion_handle_put(struct ion_handle *handle)
{
int ret;
- ret = kref_put(&handle->ref, ion_handle_destroy);
+ mutex_lock(&handle->client->lock);
+ if (!ion_handle_validate(handle->client, handle)) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ mutex_unlock(&handle->client->lock);
+ return -EINVAL;
+ }
+
+ ret = ion_handle_put_nolock(handle);
+ mutex_unlock(&handle->client->lock);
return ret;
}
-int ion_handle_put(struct ion_handle *handle)
+/* Must hold the client lock */
+static void user_ion_handle_get(struct ion_handle *handle)
+{
+ if (handle->user_ref_count++ == 0) {
+ kref_get(&handle->ref);
+ }
+}
+
+/* Must hold the client lock */
+static struct ion_handle* user_ion_handle_get_check_overflow(struct ion_handle *handle)
+{
+ if (handle->user_ref_count + 1 == 0)
+ return ERR_PTR(-EOVERFLOW);
+ user_ion_handle_get(handle);
+ return handle;
+}
+
+/* passes a kref to the user ref count.
+ * We know we're holding a kref to the object before and
+ * after this call, so no need to reverify handle. */
+static struct ion_handle* pass_to_user(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
- int ret;
+ struct ion_handle *ret;
mutex_lock(&client->lock);
- ret = ion_handle_put_nolock(handle);
+ ret = user_ion_handle_get_check_overflow(handle);
+ ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
+ return ret;
+}
+
+/* Must hold the client lock */
+static int user_ion_handle_put_nolock(struct ion_handle *handle)
+{
+ int ret = 0;
+
+ if (--handle->user_ref_count == 0) {
+ ret = ion_handle_put_nolock(handle);
+ }
return ret;
}
handle = idr_find(&client->idr, id);
if (handle)
- ion_handle_get(handle);
+ return ion_handle_get_check_overflow(handle);
- return handle ? handle : ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
}
struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
return handle;
}
-static bool ion_handle_validate(struct ion_client *client,
- struct ion_handle *handle)
-{
- WARN_ON(!mutex_is_locked(&client->lock));
- return idr_find(&client->idr, handle->id) == handle;
-}
-
static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
{
int id;
struct ion_handle *entry;
id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ pr_err("%s: Fail to get bad id (ret %d)\n", __func__, id);
return id;
+ }
handle->id = id;
return 0;
}
-struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+unsigned int ion_parse_heap_id(unsigned int heap_id_mask, unsigned int flags);
+
+static size_t ion_buffer_get_total_size_by_pid(struct ion_client *client)
+{
+ struct ion_device *dev = client->dev;
+ pid_t pid = client->pid;
+ size_t pid_total_size = 0;
+ struct rb_node *n;
+
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ mutex_lock(&buffer->lock);
+ if (pid == buffer->pid)
+ pid_total_size += buffer->size;
+ mutex_unlock(&buffer->lock);
+ }
+ mutex_unlock(&dev->buffer_lock);
+
+ return pid_total_size;
+}
+
+static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int heap_id_mask,
- unsigned int flags)
+ unsigned int flags, bool grab_handle)
{
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_heap *heap;
int ret;
+ ION_EVENT_BEGIN();
+ trace_ion_alloc_start(client->name, 0, len, align, heap_id_mask, flags);
+
pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
len, align, heap_id_mask, flags);
/*
* succeeded or all heaps have been tried
*/
len = PAGE_ALIGN(len);
-
- if (!len)
+ if (WARN_ON(!len)) {
+ trace_ion_alloc_fail(client->name, EINVAL, len,
+ align, heap_id_mask, flags);
return ERR_PTR(-EINVAL);
+ }
+
+ if (len / PAGE_SIZE > totalram_pages / 4) {
+ size_t pid_total_size = ion_buffer_get_total_size_by_pid(client);
+
+ if ((len + pid_total_size) / PAGE_SIZE > totalram_pages / 2) {
+ pr_err("%s: len %zu total %zu heap_id_mask %u flags %x\n",
+ __func__, len, pid_total_size, heap_id_mask, flags);
+ return ERR_PTR(-EINVAL);
+ }
+ }
down_read(&dev->lock);
+ heap_id_mask = ion_parse_heap_id(heap_id_mask, flags);
+ if (heap_id_mask == 0)
+ return ERR_PTR(-EINVAL);
+
plist_for_each_entry(heap, &dev->heaps, node) {
/* if the caller didn't specify this heap id */
if (!((1 << heap->id) & heap_id_mask))
}
up_read(&dev->lock);
- if (buffer == NULL)
+ if (buffer == NULL) {
+ trace_ion_alloc_fail(client->name, ENODEV, len,
+ align, heap_id_mask, flags);
return ERR_PTR(-ENODEV);
+ }
- if (IS_ERR(buffer))
+ if (IS_ERR(buffer)) {
+ trace_ion_alloc_fail(client->name, PTR_ERR(buffer),
+ len, align, heap_id_mask, flags);
return ERR_CAST(buffer);
+ }
handle = ion_handle_create(client, buffer);
*/
ion_buffer_put(buffer);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ trace_ion_alloc_fail(client->name, (unsigned long) buffer,
+ len, align, heap_id_mask, flags);
return handle;
+ }
mutex_lock(&client->lock);
+ if (grab_handle)
+ ion_handle_get(handle);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
ion_handle_put(handle);
handle = ERR_PTR(ret);
+ trace_ion_alloc_fail(client->name, (unsigned long ) buffer,
+ len, align, heap_id_mask, flags);
}
+ ION_EVENT_ALLOC(buffer, ION_EVENT_DONE());
+ trace_ion_alloc_end(client->name, (unsigned long) buffer,
+ len, align, heap_id_mask, flags);
+
return handle;
}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+ size_t align, unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ return __ion_alloc(client, len, align, heap_id_mask, flags, false);
+}
EXPORT_SYMBOL(ion_alloc);
static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
ion_handle_put_nolock(handle);
}
+static void user_ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
+{
+ bool valid_handle;
+
+ BUG_ON(client != handle->client);
+
+ valid_handle = ion_handle_validate(client, handle);
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed to free.\n", __func__);
+ return;
+ }
+ if (!(handle->user_ref_count > 0)) {
+ WARN(1, "%s: User does not have access!\n", __func__);
+ return;
+ }
+ user_ion_handle_put_nolock(handle);
+}
+
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
BUG_ON(client != handle->client);
return vaddr;
buffer->vaddr = vaddr;
buffer->kmap_cnt++;
+
return vaddr;
}
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
+ size_t sizes_pss[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
+ down_read(&g_idev->lock);
+
+ /* check validity of the client */
+ for (n = rb_first(&g_idev->clients); n; n = rb_next(n)) {
+ struct ion_client *c = rb_entry(n, struct ion_client, node);
+ if (client == c)
+ break;
+ }
+
+ if (IS_ERR_OR_NULL(n)) {
+ pr_err("%s: invalid client %p\n", __func__, client);
+ up_read(&g_idev->lock);
+ return -EINVAL;
+ }
+
+ seq_printf(s, "%16.s %4.s %16.s %4.s %10.s %8.s %9.s\n",
+ "task", "pid", "thread", "tid", "size", "# procs", "flag");
+ seq_printf(s, "----------------------------------------------"
+ "--------------------------------------------\n");
+
mutex_lock(&client->lock);
for (n = rb_first(&client->handles); n; n = rb_next(n)) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
- unsigned int id = handle->buffer->heap->id;
+ struct ion_buffer *buffer = handle->buffer;
+ unsigned int id = buffer->heap->id;
if (!names[id])
- names[id] = handle->buffer->heap->name;
- sizes[id] += handle->buffer->size;
+ names[id] = buffer->heap->name;
+ sizes[id] += buffer->size;
+ sizes_pss[id] += (buffer->size / buffer->handle_count);
+ seq_printf(s, "%16.s %4u %16.s %4u %10zu %8d %9lx\n",
+ buffer->task_comm, buffer->pid,
+ buffer->thread_comm, buffer->tid, buffer->size,
+ buffer->handle_count, buffer->flags);
}
mutex_unlock(&client->lock);
+ up_read(&g_idev->lock);
- seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+ seq_printf(s, "----------------------------------------------"
+ "--------------------------------------------\n");
+ seq_printf(s, "%16.16s: %16.16s %18.18s\n", "heap_name",
+ "size_in_bytes", "size_in_bytes(pss)");
for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
if (!names[i])
continue;
- seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+ seq_printf(s, "%16.16s: %16zu %18zu\n",
+ names[i], sizes[i], sizes_pss[i]);
}
return 0;
}
struct rb_node *n;
pr_debug("%s: %d\n", __func__, __LINE__);
+
+ mutex_lock(&client->lock);
while ((n = rb_first(&client->handles))) {
struct ion_handle *handle = rb_entry(n, struct ion_handle,
node);
ion_handle_destroy(&handle->ref);
}
+ mutex_unlock(&client->lock);
idr_destroy(&client->idr);
down_write(&dev->lock);
struct ion_buffer *buffer = dmabuf->priv;
ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+
+ ion_buffer_task_add_lock(buffer, attachment->dev);
+
return buffer->sg_table;
}
struct sg_table *table,
enum dma_data_direction direction)
{
+ ion_buffer_task_remove_lock(attachment->dmabuf->priv, attachment->dev);
}
void ion_pages_sync_for_device(struct device *dev, struct page *page,
int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
int i;
+ if (!ion_buffer_cached(buffer))
+ return;
+
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
mutex_lock(&buffer->lock);
list_add(&vma_list->list, &buffer->vmas);
mutex_unlock(&buffer->lock);
- pr_debug("%s: adding %p\n", __func__, vma);
+ pr_debug("%s: adding %pK\n", __func__, vma);
}
static void ion_vm_close(struct vm_area_struct *vma)
continue;
list_del(&vma_list->list);
kfree(vma_list);
- pr_debug("%s: deleting %p\n", __func__, vma);
+ pr_debug("%s: deleting %pK\n", __func__, vma);
break;
}
mutex_unlock(&buffer->lock);
struct ion_buffer *buffer = dmabuf->priv;
int ret = 0;
+ ION_EVENT_BEGIN();
+
+ if (buffer->flags & ION_FLAG_NOZEROED) {
+ pr_err("%s: mmap non-zeroed buffer to user is prohibited!\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (buffer->flags & ION_FLAG_PROTECTED) {
+ pr_err("%s: mmap protected buffer to user is prohibited!\n",
+ __func__);
+ return -EPERM;
+ }
+
+ if ((((vma->vm_pgoff << PAGE_SHIFT) >= buffer->size)) ||
+ ((vma->vm_end - vma->vm_start) >
+ (buffer->size - (vma->vm_pgoff << PAGE_SHIFT)))) {
+ pr_err("%s: trying to map outside of buffer.\n", __func__);
+ return -EINVAL;
+ }
+
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping to userspace\n",
__func__);
return -EINVAL;
}
+ trace_ion_mmap_start((unsigned long) buffer, buffer->size,
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+
if (ion_buffer_fault_user_mappings(buffer)) {
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
VM_DONTDUMP;
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
+ ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
+ trace_ion_mmap_end((unsigned long) buffer, buffer->size,
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
return 0;
}
pr_err("%s: failure mapping buffer to userspace\n",
__func__);
+ ION_EVENT_MMAP(buffer, ION_EVENT_DONE());
+ trace_ion_mmap_end((unsigned long) buffer, buffer->size,
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+
return ret;
}
ion_buffer_put(buffer);
}
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
- size_t len,
- enum dma_data_direction direction)
+static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
- return -ENODEV;
+ return ERR_PTR(-ENODEV);
}
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
- return PTR_ERR_OR_ZERO(vaddr);
-}
+
+ return vaddr;
+}
+
+static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *ptr)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
+ size_t len,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+
+ if (!buffer->heap->ops->map_kernel) {
+ pr_err("%s: map kernel is not implemented by this heap.\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ return PTR_ERR_OR_ZERO(vaddr);
+}
static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
size_t len,
mutex_unlock(&buffer->lock);
}
+static void ion_dma_buf_set_privflag(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ buffer->private_flags |= ION_PRIV_FLAG_NEED_TO_FLUSH;
+ mutex_unlock(&buffer->lock);
+}
+
+static bool ion_dma_buf_get_privflag(struct dma_buf *dmabuf, bool clear)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ bool ret;
+
+ mutex_lock(&buffer->lock);
+ ret = !!(buffer->private_flags & ION_PRIV_FLAG_NEED_TO_FLUSH);
+ if (clear)
+ buffer->private_flags &= ~ION_PRIV_FLAG_NEED_TO_FLUSH;
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.kunmap_atomic = ion_dma_buf_kunmap,
.kmap = ion_dma_buf_kmap,
.kunmap = ion_dma_buf_kunmap,
+ .vmap = ion_dma_buf_vmap,
+ .vunmap = ion_dma_buf_vunmap,
+ .set_privflag = ion_dma_buf_set_privflag,
+ .get_privflag = ion_dma_buf_get_privflag,
};
struct dma_buf *ion_share_dma_buf(struct ion_client *client,
/* if a handle exists for this buffer just take a reference to it */
handle = ion_handle_lookup(client, buffer);
if (!IS_ERR(handle)) {
- ion_handle_get(handle);
+ handle = ion_handle_get_check_overflow(handle);
mutex_unlock(&client->lock);
goto end;
}
}
EXPORT_SYMBOL(ion_import_dma_buf);
+int ion_cached_needsync_dmabuf(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ unsigned long cacheflag = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
+
+ if (dmabuf->ops != &dma_buf_ops)
+ return -EINVAL;
+
+ return ((buffer->flags & cacheflag) == cacheflag) ? 1 : 0;
+}
+EXPORT_SYMBOL(ion_cached_needsync_dmabuf);
+
+bool ion_may_hwrender_dmabuf(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ if (dmabuf->ops != &dma_buf_ops) {
+ WARN(1, "%s: given dmabuf is not exported by ION\n", __func__);
+ return false;
+ }
+
+ return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
+}
+EXPORT_SYMBOL(ion_may_hwrender_dmabuf);
+
+bool ion_may_hwrender_handle(struct ion_client *client, struct ion_handle *handle)
+{
+ struct ion_buffer *buffer = handle->buffer;
+ bool valid_handle;
+
+ mutex_lock(&client->lock);
+ valid_handle = ion_handle_validate(client, handle);
+
+ if (!valid_handle) {
+ WARN(1, "%s: invalid handle passed\n", __func__);
+ mutex_unlock(&client->lock);
+ return false;
+ }
+ mutex_unlock(&client->lock);
+
+ return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
+}
+EXPORT_SYMBOL(ion_may_hwrender_handle);
+
static int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
+ struct scatterlist *sg, *sgl;
+ int nelems;
+ void *vaddr;
+ int i = 0;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ /* if this memory came from ion */
+ if (dmabuf->ops != &dma_buf_ops) {
+ pr_err("%s: can not sync dmabuf from another exporter\n",
+ __func__);
+ dma_buf_put(dmabuf);
+ return -EINVAL;
+ }
+ buffer = dmabuf->priv;
+
+ if (!ion_buffer_cached(buffer) ||
+ ion_buffer_fault_user_mappings(buffer)) {
+ dma_buf_put(dmabuf);
+ return 0;
+ }
+
+ trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
+ DMA_BIDIRECTIONAL, buffer->size,
+ buffer->vaddr, 0, false);
+
+ sgl = buffer->sg_table->sgl;
+ nelems = buffer->sg_table->nents;
+
+ for_each_sg(sgl, sg, nelems, i) {
+ vaddr = phys_to_virt(sg_phys(sg));
+ __dma_flush_range(vaddr, vaddr + sg->length);
+ }
+
+ trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
+ DMA_BIDIRECTIONAL, buffer->size,
+ buffer->vaddr, 0, false);
+
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+static int ion_sync_partial_for_device(struct ion_client *client, int fd,
+ off_t offset, size_t len)
+{
+ struct dma_buf *dmabuf;
+ struct ion_buffer *buffer;
+ struct scatterlist *sg, *sgl;
+ size_t remained = len;
+ int nelems;
+ int i;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
}
buffer = dmabuf->priv;
- dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
- buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+ if (!ion_buffer_cached(buffer) ||
+ ion_buffer_fault_user_mappings(buffer)) {
+ dma_buf_put(dmabuf);
+ return 0;
+ }
+
+ trace_ion_sync_start(_RET_IP_, buffer->dev->dev.this_device,
+ DMA_BIDIRECTIONAL, buffer->size,
+ buffer->vaddr, 0, false);
+
+ sgl = buffer->sg_table->sgl;
+ nelems = buffer->sg_table->nents;
+
+ for_each_sg(sgl, sg, nelems, i) {
+ size_t len_to_flush;
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ continue;
+ }
+
+ len_to_flush = sg->length - offset;
+ if (remained < len_to_flush) {
+ len_to_flush = remained;
+ remained = 0;
+ } else {
+ remained -= len_to_flush;
+ }
+
+ __dma_map_area(phys_to_virt(sg_phys(sg)) + offset,
+ len_to_flush, DMA_TO_DEVICE);
+
+ if (remained == 0)
+ break;
+ offset = 0;
+ }
+
+ trace_ion_sync_end(_RET_IP_, buffer->dev->dev.this_device,
+ DMA_BIDIRECTIONAL, buffer->size,
+ buffer->vaddr, 0, false);
+
dma_buf_put(dmabuf);
+
return 0;
}
{
switch (cmd) {
case ION_IOC_SYNC:
+ case ION_IOC_SYNC_PARTIAL:
case ION_IOC_FREE:
case ION_IOC_CUSTOM:
return _IOC_WRITE;
union {
struct ion_fd_data fd;
+ struct ion_fd_partial_data fd_partial;
struct ion_allocation_data allocation;
struct ion_handle_data handle;
struct ion_custom_data custom;
{
struct ion_handle *handle;
- handle = ion_alloc(client, data.allocation.len,
+ handle = __ion_alloc(client, data.allocation.len,
data.allocation.align,
data.allocation.heap_id_mask,
- data.allocation.flags);
- if (IS_ERR(handle))
+ data.allocation.flags, true);
+ if (IS_ERR(handle)) {
+ pr_err("%s: len %zu align %zu heap_id_mask %u flags %x (ret %ld)\n",
+ __func__, data.allocation.len,
+ data.allocation.align,
+ data.allocation.heap_id_mask,
+ data.allocation.flags, PTR_ERR(handle));
return PTR_ERR(handle);
-
+ }
+ pass_to_user(handle);
data.allocation.handle = handle->id;
cleanup_handle = handle;
mutex_unlock(&client->lock);
return PTR_ERR(handle);
}
- ion_free_nolock(client, handle);
+ user_ion_free_nolock(client, handle);
ion_handle_put_nolock(handle);
mutex_unlock(&client->lock);
break;
struct ion_handle *handle;
handle = ion_import_dma_buf(client, data.fd.fd);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
- else
- data.handle.handle = handle->id;
+ } else {
+ handle = pass_to_user(handle);
+ if (IS_ERR(handle))
+ ret = PTR_ERR(handle);
+ else
+ data.handle.handle = handle->id;
+ }
break;
}
case ION_IOC_SYNC:
ret = ion_sync_for_device(client, data.fd.fd);
break;
}
+ case ION_IOC_SYNC_PARTIAL:
+ {
+ ret = ion_sync_partial_for_device(client, data.fd_partial.fd,
+ data.fd_partial.offset, data.fd_partial.len);
+ break;
+ }
case ION_IOC_CUSTOM:
{
if (!dev->custom_ioctl)
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
- if (cleanup_handle)
- ion_free(client, cleanup_handle);
+ if (cleanup_handle) {
+ mutex_lock(&client->lock);
+ user_ion_free_nolock(client, cleanup_handle);
+ ion_handle_put_nolock(cleanup_handle);
+ mutex_unlock(&client->lock);
+ }
return -EFAULT;
}
}
+ if (cleanup_handle)
+ ion_handle_put(cleanup_handle);
return ret;
}
seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
seq_puts(s, "----------------------------------------------------\n");
+ down_read(&dev->lock);
+
for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
struct ion_client *client = rb_entry(n, struct ion_client,
node);
if (heap->debug_show)
heap->debug_show(heap, s, unused);
+ up_read(&dev->lock);
+
return 0;
}
}
EXPORT_SYMBOL(ion_device_add_heap);
+#ifdef CONFIG_ION_EXYNOS_STAT_LOG
+
+#define MAX_DUMP_TASKS 8
+#define MAX_DUMP_NAME_LEN 32
+#define MAX_DUMP_BUFF_LEN 512
+
+static void ion_buffer_dump_flags(struct seq_file *s, unsigned long flags)
+{
+ if ((flags & ION_FLAG_CACHED) && !(flags & ION_FLAG_CACHED_NEEDS_SYNC))
+ seq_printf(s, "cached|faultmap");
+ else if (flags & ION_FLAG_CACHED)
+ seq_printf(s, "cached|needsync");
+ else
+ seq_printf(s, "noncached");
+
+ if (flags & ION_FLAG_NOZEROED)
+ seq_printf(s, "|nozeroed");
+
+ if (flags & ION_FLAG_PROTECTED)
+ seq_printf(s, "|protected");
+}
+
+static void ion_buffer_dump_tasks(struct ion_buffer *buffer, char *str)
+{
+ struct ion_task *task, *tmp;
+ const char *delim = "|";
+ size_t total_len = 0;
+ int count = 0;
+
+ list_for_each_entry_safe(task, tmp, &buffer->master_list, list) {
+ const char *name;
+ size_t len = strlen(dev_name(task->master));
+
+ if (len > MAX_DUMP_NAME_LEN)
+ len = MAX_DUMP_NAME_LEN;
+ if (!strncmp(dev_name(task->master), "ion", len)) {
+ continue;
+ } else {
+ name = dev_name(task->master) + 9;
+ len -= 9;
+ }
+ if (total_len + len + 1 > MAX_DUMP_BUFF_LEN)
+ break;
+
+ strncat((char *)(str + total_len), name, len);
+ total_len += len;
+ if (!list_is_last(&task->list, &buffer->master_list))
+ str[total_len++] = *delim;
+
+ if (++count > MAX_DUMP_TASKS)
+ break;
+ }
+}
+
+static int ion_debug_buffer_show(struct seq_file *s, void *unused)
+{
+ struct ion_device *dev = s->private;
+ struct rb_node *n;
+ char *master_name;
+ size_t total_size = 0;
+
+ master_name = kzalloc(MAX_DUMP_BUFF_LEN, GFP_KERNEL);
+ if (!master_name) {
+ pr_err("%s: no memory for client string buffer\n", __func__);
+ return -ENOMEM;
+ }
+
+ seq_printf(s, "%20.s %16.s %4.s %16.s %4.s %10.s %4.s %3.s %6.s "
+ "%24.s %9.s\n",
+ "heap", "task", "pid", "thread", "tid",
+ "size", "kmap", "ref", "handle",
+ "master", "flag");
+ seq_printf(s, "------------------------------------------"
+ "----------------------------------------"
+ "----------------------------------------"
+ "--------------------------------------\n");
+
+ mutex_lock(&dev->buffer_lock);
+ for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+ struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+ node);
+ mutex_lock(&buffer->lock);
+ ion_buffer_dump_tasks(buffer, master_name);
+ total_size += buffer->size;
+ seq_printf(s, "%20.s %16.s %4u %16.s %4u %10zu %4d %3d %6d "
+ "%24.s %9lx", buffer->heap->name,
+ buffer->task_comm, buffer->pid,
+ buffer->thread_comm,
+ buffer->tid, buffer->size, buffer->kmap_cnt,
+ atomic_read(&buffer->ref.refcount),
+ buffer->handle_count, master_name,
+ buffer->flags);
+ seq_printf(s, "(");
+ ion_buffer_dump_flags(s, buffer->flags);
+ seq_printf(s, ")\n");
+ mutex_unlock(&buffer->lock);
+
+ memset(master_name, 0, MAX_DUMP_BUFF_LEN);
+ }
+ mutex_unlock(&dev->buffer_lock);
+
+ seq_printf(s, "------------------------------------------"
+ "----------------------------------------"
+ "----------------------------------------"
+ "--------------------------------------\n");
+ seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+ seq_printf(s, "------------------------------------------"
+ "----------------------------------------"
+ "----------------------------------------"
+ "--------------------------------------\n");
+
+ kfree(master_name);
+
+ return 0;
+}
+
+static int ion_debug_buffer_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_buffer_show, inode->i_private);
+}
+
+static const struct file_operations debug_buffer_fops = {
+ .open = ion_debug_buffer_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void ion_debug_event_show_one(struct seq_file *s,
+ struct ion_eventlog *log)
+{
+ struct timeval tv = ktime_to_timeval(log->begin);
+ long elapsed = ktime_us_delta(log->done, log->begin);
+
+ if (elapsed == 0)
+ return;
+
+ seq_printf(s, "[%06ld.%06ld] ", tv.tv_sec, tv.tv_usec);
+
+ switch (log->type) {
+ case ION_EVENT_TYPE_ALLOC:
+ {
+ struct ion_event_alloc *data = &log->data.alloc;
+ seq_printf(s, "%8s %pK %18s %11zd ", "alloc",
+ data->id, data->heap->name, data->size);
+ break;
+ }
+ case ION_EVENT_TYPE_FREE:
+ {
+ struct ion_event_free *data = &log->data.free;
+ seq_printf(s, "%8s %pK %18s %11zd ", "free",
+ data->id, data->heap->name, data->size);
+ break;
+ }
+ case ION_EVENT_TYPE_MMAP:
+ {
+ struct ion_event_mmap *data = &log->data.mmap;
+ seq_printf(s, "%8s %pK %18s %11zd ", "mmap",
+ data->id, data->heap->name, data->size);
+ break;
+ }
+ case ION_EVENT_TYPE_SHRINK:
+ {
+ struct ion_event_shrink *data = &log->data.shrink;
+ seq_printf(s, "%8s %16lx %18s %11zd ", "shrink",
+ 0l, "ion_noncontig_heap", data->size);
+ elapsed = 0;
+ break;
+ }
+ case ION_EVENT_TYPE_CLEAR:
+ {
+ struct ion_event_clear *data = &log->data.clear;
+ seq_printf(s, "%8s %pK %18s %11zd ", "clear",
+ data->id, data->heap->name, data->size);
+ break;
+ }
+ }
+
+ seq_printf(s, "%9ld", elapsed);
+
+ if (elapsed > 100 * USEC_PER_MSEC)
+ seq_printf(s, " *");
+
+ if (log->type == ION_EVENT_TYPE_ALLOC) {
+ seq_printf(s, " ");
+ ion_buffer_dump_flags(s, log->data.alloc.flags);
+ } else if (log->type == ION_EVENT_TYPE_CLEAR) {
+ seq_printf(s, " ");
+ ion_buffer_dump_flags(s, log->data.clear.flags);
+ }
+
+ if (log->type == ION_EVENT_TYPE_FREE && log->data.free.shrinker)
+ seq_printf(s, " shrinker");
+
+ seq_printf(s, "\n");
+}
+
+static int ion_debug_event_show(struct seq_file *s, void *unused)
+{
+ struct ion_device *dev = s->private;
+ int index = atomic_read(&dev->event_idx) % ION_EVENT_LOG_MAX;
+ int last = index;
+
+ seq_printf(s, "%13s %10s %8s %18s %11s %10s %24s\n", "timestamp",
+ "type", "id", "heap", "size", "time (us)", "remarks");
+ seq_printf(s, "-------------------------------------------");
+ seq_printf(s, "-------------------------------------------");
+ seq_printf(s, "-----------------------------------------\n");
+
+ do {
+ if (++index >= ION_EVENT_LOG_MAX)
+ index = 0;
+ ion_debug_event_show_one(s, &dev->eventlog[index]);
+ } while (index != last);
+
+ return 0;
+}
+
+static int ion_debug_event_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_event_show, inode->i_private);
+}
+
+static const struct file_operations debug_event_fops = {
+ .open = ion_debug_event_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
struct ion_device *ion_device_create(long (*custom_ioctl)
(struct ion_client *client,
unsigned int cmd,
}
idev->clients_debug_root = debugfs_create_dir("clients",
idev->debug_root);
- if (!idev->clients_debug_root)
+ if (!idev->clients_debug_root) {
pr_err("ion: failed to create debugfs clients directory.\n");
+ goto debugfs_done;
+ }
+
+#ifdef CONFIG_ION_EXYNOS_STAT_LOG
+ atomic_set(&idev->event_idx, -1);
+ idev->buffer_debug_file = debugfs_create_file("buffer", 0444,
+ idev->debug_root, idev,
+ &debug_buffer_fops);
+ if (!idev->buffer_debug_file) {
+ pr_err("%s: failed to create buffer debug file\n", __func__);
+ goto debugfs_done;
+ }
+
+ idev->event_debug_file = debugfs_create_file("event", 0444,
+ idev->debug_root, idev,
+ &debug_event_fops);
+ if (!idev->event_debug_file)
+ pr_err("%s: failed to create event debug file\n", __func__);
+#endif
debugfs_done:
init_rwsem(&idev->lock);
plist_head_init(&idev->heaps);
idev->clients = RB_ROOT;
+
+ /* backup of ion device: assumes there is only one ion device */
+ g_idev = idev;
+
return idev;
}
EXPORT_SYMBOL(ion_device_create);
{
misc_deregister(&dev->dev);
debugfs_remove_recursive(dev->debug_root);
- /* XXX need to free the heaps and clients ? */
kfree(dev);
}
EXPORT_SYMBOL(ion_device_destroy);
data->heaps[i].size);
}
}
+
+static struct ion_iovm_map *ion_buffer_iova_create(struct ion_buffer *buffer,
+ struct device *dev, enum dma_data_direction dir, int prop)
+{
+ /* Must be called under buffer->lock held */
+ struct ion_iovm_map *iovm_map;
+ int ret = 0;
+
+ iovm_map = kzalloc(sizeof(struct ion_iovm_map), GFP_KERNEL);
+ if (!iovm_map) {
+ pr_err("%s: Failed to allocate ion_iovm_map for %s\n",
+ __func__, dev_name(dev));
+ return ERR_PTR(-ENOMEM);
+ }
+
+ iovm_map->iova = iovmm_map(dev, buffer->sg_table->sgl,
+ 0, buffer->size, dir, prop);
+
+ if (iovm_map->iova == (dma_addr_t)-ENOSYS) {
+ size_t len;
+ ion_phys_addr_t addr;
+
+ BUG_ON(!buffer->heap->ops->phys);
+ ret = buffer->heap->ops->phys(buffer->heap, buffer,
+ &addr, &len);
+ if (ret)
+ pr_err("%s: Unable to get PA for %s\n",
+ __func__, dev_name(dev));
+ } else if (IS_ERR_VALUE(iovm_map->iova)) {
+ ret = iovm_map->iova;
+ pr_err("%s: Unable to allocate IOVA for %s\n",
+ __func__, dev_name(dev));
+ }
+
+ if (ret) {
+ kfree(iovm_map);
+ return ERR_PTR(ret);
+ }
+
+ iovm_map->dev = dev;
+ iovm_map->domain = get_domain_from_dev(dev);
+ iovm_map->map_cnt = 1;
+
+ pr_debug("%s: new map added for dev %s, iova %pa, prop %d\n", __func__,
+ dev_name(dev), &iovm_map->iova, prop);
+
+ return iovm_map;
+}
+
+dma_addr_t ion_iovmm_map(struct dma_buf_attachment *attachment,
+ off_t offset, size_t size,
+ enum dma_data_direction direction, int prop)
+{
+ struct dma_buf *dmabuf = attachment->dmabuf;
+ struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_iovm_map *iovm_map;
+ struct iommu_domain *domain;
+
+ BUG_ON(dmabuf->ops != &dma_buf_ops);
+
+ if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
+ buffer->flags & ION_FLAG_PROTECTED) {
+ struct ion_buffer_info *info = buffer->priv_virt;
+
+ if (info->prot_desc.dma_addr)
+ return info->prot_desc.dma_addr;
+ pr_err("%s: protected buffer but no secure iova\n", __func__);
+ return -EINVAL;
+ }
+
+ domain = get_domain_from_dev(attachment->dev);
+ if (!domain) {
+ pr_err("%s: invalid iommu device\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(iovm_map, &buffer->iovas, list) {
+ if (domain == iovm_map->domain) {
+ iovm_map->map_cnt++;
+ mutex_unlock(&buffer->lock);
+ return iovm_map->iova;
+ }
+ }
+
+ if (!ion_buffer_cached(buffer))
+ prop &= ~IOMMU_CACHE;
+
+ iovm_map = ion_buffer_iova_create(buffer, attachment->dev,
+ direction, prop);
+ if (IS_ERR(iovm_map)) {
+ mutex_unlock(&buffer->lock);
+ return PTR_ERR(iovm_map);
+ }
+
+ list_add_tail(&iovm_map->list, &buffer->iovas);
+ mutex_unlock(&buffer->lock);
+
+ return iovm_map->iova;
+}
+
+void ion_iovmm_unmap(struct dma_buf_attachment *attachment, dma_addr_t iova)
+{
+ struct ion_iovm_map *iovm_map;
+ struct dma_buf * dmabuf = attachment->dmabuf;
+ struct device *dev = attachment->dev;
+ struct ion_buffer *buffer = attachment->dmabuf->priv;
+ struct iommu_domain *domain;
+
+ BUG_ON(dmabuf->ops != &dma_buf_ops);
+
+ if (IS_ENABLED(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION) &&
+ buffer->flags & ION_FLAG_PROTECTED)
+ return;
+
+ domain = get_domain_from_dev(attachment->dev);
+ if (!domain) {
+ pr_err("%s: invalid iommu device\n", __func__);
+ return;
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(iovm_map, &buffer->iovas, list) {
+ if ((domain == iovm_map->domain) && (iova == iovm_map->iova)) {
+ if (--iovm_map->map_cnt == 0) {
+ list_del(&iovm_map->list);
+ pr_debug("%s: unmap previous %pa for dev %s\n",
+ __func__, &iovm_map->iova,
+ dev_name(iovm_map->dev));
+ iovmm_unmap(iovm_map->dev, iovm_map->iova);
+ kfree(iovm_map);
+ }
+
+ mutex_unlock(&buffer->lock);
+ return;
+ }
+ }
+
+ mutex_unlock(&buffer->lock);
+
+ WARN(1, "IOVA %pa is not found for %s\n", &iova, dev_name(dev));
+}