buffer->dev = dev;
buffer->size = len;
- INIT_LIST_HEAD(&buffer->attachments);
mutex_init(&buffer->lock);
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
kfree(table);
}
-struct ion_dma_buf_attachment {
- struct device *dev;
- struct sg_table *table;
- struct list_head list;
-};
-
static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev,
struct dma_buf_attachment *attachment)
{
- struct ion_dma_buf_attachment *a;
struct sg_table *table;
struct ion_buffer *buffer = dmabuf->priv;
- a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
-
table = dup_sg_table(buffer->sg_table);
- if (IS_ERR(table)) {
- kfree(a);
+ if (IS_ERR(table))
return -ENOMEM;
- }
- a->table = table;
- a->dev = dev;
- INIT_LIST_HEAD(&a->list);
-
- attachment->priv = a;
-
- mutex_lock(&buffer->lock);
- list_add(&a->list, &buffer->attachments);
- mutex_unlock(&buffer->lock);
+ attachment->priv = table;
return 0;
}
static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
- struct ion_dma_buf_attachment *a = attachment->priv;
- struct ion_buffer *buffer = dmabuf->priv;
-
- free_duped_table(a->table);
- mutex_lock(&buffer->lock);
- list_del(&a->list);
- mutex_unlock(&buffer->lock);
-
- kfree(a);
+ free_duped_table(attachment->priv);
}
static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
- struct ion_dma_buf_attachment *a = attachment->priv;
- struct sg_table *table;
-
- table = a->table;
+ struct sg_table *table = attachment->priv;
if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
direction))
{
struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
- struct ion_dma_buf_attachment *a;
+ struct dma_buf_attachment *att;
/*
* TODO: Move this elsewhere because we don't always need a vaddr
mutex_unlock(&buffer->lock);
}
- mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ mutex_lock(&dmabuf->lock);
+ list_for_each_entry(att, &dmabuf->attachments, node) {
+ struct sg_table *table = att->priv;
+
+ dma_sync_sg_for_cpu(att->dev, table->sgl, table->nents,
direction);
}
- mutex_unlock(&buffer->lock);
+ mutex_unlock(&dmabuf->lock);
return 0;
}
enum dma_data_direction direction)
{
struct ion_buffer *buffer = dmabuf->priv;
- struct ion_dma_buf_attachment *a;
+ struct dma_buf_attachment *att;
if (buffer->heap->ops->map_kernel) {
mutex_lock(&buffer->lock);
mutex_unlock(&buffer->lock);
}
- mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ mutex_lock(&dmabuf->lock);
+ list_for_each_entry(att, &dmabuf->attachments, node) {
+ struct sg_table *table = att->priv;
+
+ dma_sync_sg_for_device(att->dev, table->sgl, table->nents,
direction);
}
- mutex_unlock(&buffer->lock);
+ mutex_unlock(&dmabuf->lock);
return 0;
}