#include <linux/sched/task.h>
#include "ion.h"
+#include "ion_exynos.h"
static struct ion_device *internal_dev;
static int heap_id;
buffer->dev = dev;
buffer->size = len;
+ ret = exynos_ion_alloc_fixup(dev, buffer);
+ if (ret < 0)
+ goto err1;
+
INIT_LIST_HEAD(&buffer->iovas);
mutex_init(&buffer->lock);
mutex_lock(&dev->buffer_lock);
void ion_buffer_destroy(struct ion_buffer *buffer)
{
+ exynos_ion_free_fixup(buffer);
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
}
debugfs_done:
+ exynos_ion_fixup(idev);
idev->buffers = RB_ROOT;
mutex_init(&idev->buffer_lock);
init_rwsem(&idev->lock);
WARN(1, "iova %pad found for %s\n", &iova, dev_name(attachment->dev));
}
+
+/*
+ * exynos_ion_fixup - do something to ion_device for the Exynos extensions
+ */
+void exynos_ion_fixup(struct ion_device *idev)
+{
+ struct device *dev = idev->dev.this_device;
+
+ /*
+ * dma-mapping API only works on dma supported device. dma_map_sg() and
+ * the similarities allocates swiotlb buffers if the dma mask of the
+ * given device is not capable of full access to physical address space.
+ * This forces dma-mapping of ARM64 works as if the given device is full
+ * memory access devices.
+ * See ion_buffer_create() and ion_buffer_destroy().
+ */
+ arch_setup_dma_ops(dev, 0x0ULL, 1ULL << 36, NULL, false);
+ dev->dma_mask = &dev->coherent_dma_mask;
+ dma_set_mask(dev, DMA_BIT_MASK(36));
+}
+
+int exynos_ion_alloc_fixup(struct ion_device *idev, struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+ int nents;
+
+ /* assign dma_addresses to scatter-gather list */
+ nents = dma_map_sg_attrs(idev->dev.this_device, table->sgl,
+ table->orig_nents, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (nents < table->orig_nents) {
+ pr_err("%s: failed dma_map_sg(nents %d)=nents %d\n",
+ __func__, table->orig_nents, nents);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void exynos_ion_free_fixup(struct ion_buffer *buffer)
+{
+ struct sg_table *table = buffer->sg_table;
+
+ dma_unmap_sg_attrs(buffer->dev->dev.this_device, table->sgl,
+ table->orig_nents, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
struct dma_buf;
struct ion_heap;
struct ion_platform_heap;
+struct ion_device;
+struct ion_buffer;
/**
* struct ion_buffer_prot_info - buffer protection information
void *ion_buffer_protect_single(unsigned int protection_id, unsigned int size,
unsigned long phys, unsigned int protalign);
void ion_buffer_unprotect(void *priv);
+void exynos_ion_fixup(struct ion_device *idev);
+int exynos_ion_alloc_fixup(struct ion_device *idev, struct ion_buffer *buffer);
+void exynos_ion_free_fixup(struct ion_buffer *buffer);
+
#else
static inline void *ion_buffer_protect_single(unsigned int protection_id,
unsigned int size,
return NULL;
}
#define ion_buffer_unprotect(priv) do { } while (0)
+#define exynos_ion_fixup(idev) do { } while (0)
+static inline int exynos_ion_alloc_fixup(struct ion_device *idev,
+ struct ion_buffer *buffer)
+{
+ return 0;
+}
+
+#define exynos_ion_free_fixup(buffer) do { } while (0)
#endif
extern const struct dma_buf_ops ion_dma_buf_ops;
struct ion_heap *ion_get_heap_by_name(const char *heap_name);
struct dma_buf *__ion_alloc(size_t len, unsigned int heap_id_mask,
unsigned int flags);
+
#endif /* _ION_EXYNOS_H_ */