#include "ion.h"
#include "ion_priv.h"
-#define DEBUG
/**
* struct ion_device - the metadata of the ion device node
rb_insert_color(&buffer->node, &dev->buffers);
}
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
+
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
buffer->dev = dev;
buffer->size = len;
+ buffer->flags = flags;
- table = buffer->heap->ops->map_dma(buffer->heap, buffer);
+ table = heap->ops->map_dma(heap, buffer);
if (IS_ERR_OR_NULL(table)) {
heap->ops->free(buffer);
kfree(buffer);
return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
+ if (buffer->flags & ION_FLAG_CACHED)
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
+ i) {
+ if (sg_dma_len(sg) == PAGE_SIZE)
+ continue;
+ pr_err("%s: cached mappings must have pagewise "
+ "sg_lists\n", __func__);
+ heap->ops->unmap_dma(heap, buffer);
+ kfree(buffer);
+ return ERR_PTR(-EINVAL);
+ }
+ ret = ion_buffer_alloc_dirty(buffer);
+ if (ret) {
+ heap->ops->unmap_dma(heap, buffer);
+ heap->ops->free(buffer);
+ kfree(buffer);
+ return ERR_PTR(ret);
+ }
+
+ buffer->dev = dev;
+ buffer->size = len;
+ INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
/* this will set up dma addresses for the sglist -- it is not
technically correct as per the dma api -- a specific
}
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int flags)
+ size_t align, unsigned int heap_mask,
+ unsigned int flags)
{
struct rb_node *n;
struct ion_handle *handle;
struct ion_device *dev = client->dev;
struct ion_buffer *buffer = NULL;
+ pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
+ align, heap_mask, flags);
/*
* traverse the list of heaps available in this system in priority
* order. If the heap type is supported by the client, and matches the
if (!((1 << heap->type) & client->heap_mask))
continue;
/* if the caller didn't specify this heap type */
- if (!((1 << heap->id) & flags))
+ if (!((1 << heap->id) & heap_mask))
continue;
buffer = ion_buffer_create(heap, dev, len, align, flags);
if (!IS_ERR_OR_NULL(buffer))
return table;
}
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction direction);
+
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct ion_buffer *buffer = dmabuf->priv;
+ if (buffer->flags & ION_FLAG_CACHED)
+ ion_buffer_sync_for_device(buffer, attachment->dev, direction);
return buffer->sg_table;
}
{
}
+static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
+{
+ unsigned long pages = buffer->sg_table->nents;
+ unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
+
+ buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
+ if (!buffer->dirty)
+ return -ENOMEM;
+ return 0;
+}
+
+struct ion_vma_list {
+ struct list_head list;
+ struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+ struct ion_vma_list *vma_list;
+
+ pr_debug("%s: syncing for device %s\n", __func__,
+ dev ? dev_name(dev) : "null");
+ mutex_lock(&buffer->lock);
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (!test_bit(i, buffer->dirty))
+ continue;
+ dma_sync_sg_for_device(dev, sg, 1, dir);
+ clear_bit(i, buffer->dirty);
+ }
+ list_for_each_entry(vma_list, &buffer->vmas, list) {
+ struct vm_area_struct *vma = vma_list->vma;
+
+ zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
+ NULL);
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct scatterlist *sg;
+ int i;
+
+ mutex_lock(&buffer->lock);
+ set_bit(vmf->pgoff, buffer->dirty);
+
+ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+ if (i != vmf->pgoff)
+ continue;
+ dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
+ vm_insert_page(vma, (unsigned long)vmf->virtual_address,
+ sg_page(sg));
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+ return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list;
+
+ vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ if (!vma_list)
+ return;
+ vma_list->vma = vma;
+ mutex_lock(&buffer->lock);
+ list_add(&vma_list->list, &buffer->vmas);
+ mutex_unlock(&buffer->lock);
+ pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = vma->vm_private_data;
+ struct ion_vma_list *vma_list, *tmp;
+
+ pr_debug("%s\n", __func__);
+ mutex_lock(&buffer->lock);
+ list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+ if (vma_list->vma != vma)
+ continue;
+ list_del(&vma_list->list);
+ kfree(vma_list);
+ pr_debug("%s: deleting %p\n", __func__, vma);
+ break;
+ }
+ mutex_unlock(&buffer->lock);
+}
+
+struct vm_operations_struct ion_vma_ops = {
+ .open = ion_vm_open,
+ .close = ion_vm_close,
+ .fault = ion_vm_fault,
+};
+
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct ion_buffer *buffer = dmabuf->priv;
- int ret;
+ int ret = 0;
if (!buffer->heap->ops->map_user) {
pr_err("%s: this heap does not define a method for mapping "
return -EINVAL;
}
- mutex_lock(&buffer->lock);
- /* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- mutex_unlock(&buffer->lock);
+ if (buffer->flags & ION_FLAG_CACHED) {
+ vma->vm_private_data = buffer;
+ vma->vm_ops = &ion_vma_ops;
+ ion_vm_open(vma);
+ } else {
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+ }
if (ret)
pr_err("%s: failure mapping buffer to userspace\n",
if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
return -EFAULT;
data.handle = ion_alloc(client, data.len, data.align,
- data.flags);
+ data.heap_mask, data.flags);
if (IS_ERR(data.handle))
return PTR_ERR(data.handle);
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
+/**
+ * heap flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
+ cached, ion will do cache
+ maintenance when the buffer is
+ mapped for dma */
+
#ifdef __KERNEL__
struct ion_device;
struct ion_heap;
* @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks have
* alignment requirements of some kind
- * @flags: mask of heaps to allocate from, if multiple bits are set
+ * @heap_mask: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from lowest to highest order bit
+ * @flags: heap flags, the low 16 bits are consumed by ion, the high 16
+ * bits are passed on to the respective heap and can be heap
+ * custom
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
- size_t align, unsigned int flags);
+ size_t align, unsigned int heap_mask,
+ unsigned int flags);
/**
* ion_free - free a handle
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @align: required alignment of the allocation
+ * @heap_mask: mask of heaps to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to refer
* to this allocation
struct ion_allocation_data {
size_t len;
size_t align;
+ unsigned int heap_mask;
unsigned int flags;
struct ion_handle *handle;
};
struct scatterlist *sg;
int i;
void *vaddr;
+ pgprot_t pgprot;
struct sg_table *table = buffer->priv_virt;
struct page **pages = kmalloc(sizeof(struct page *) * table->nents,
GFP_KERNEL);
for_each_sg(table->sgl, sg, table->nents, i)
pages[i] = sg_page(sg);
- vaddr = vmap(pages, table->nents, VM_MAP, PAGE_KERNEL);
+
+ if (buffer->flags & ION_FLAG_CACHED)
+ pgprot = PAGE_KERNEL;
+ else
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+ vaddr = vmap(pages, table->nents, VM_MAP, pgprot);
kfree(pages);
return vaddr;
}
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
- struct ion_buffer *buffer)
+ struct ion_buffer *buffer)
{
struct sg_table *table;
int ret;
return table;
}
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+ struct ion_buffer *buffer)
+{
+ sg_free_table(buffer->sg_table);
+ kfree(buffer->sg_table);
+}
+
int ion_system_contig_heap_map_user(struct ion_heap *heap,
struct ion_buffer *buffer,
struct vm_area_struct *vma)
.free = ion_system_contig_heap_free,
.phys = ion_system_contig_heap_phys,
.map_dma = ion_system_contig_heap_map_dma,
- .unmap_dma = ion_system_heap_unmap_dma,
+ .unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_system_heap_map_kernel,
.unmap_kernel = ion_system_heap_unmap_kernel,
.map_user = ion_system_contig_heap_map_user,