Clients of ION in userland can specify if the allocated buffer is
written by H/W like GPU before S/W initializes it. If the cache
maintenance is fully supported by all H/Ws that touch the buffer or
all H/Ws touching the buffer are in the same cache coherency domain,
everything goes right. But the problem occurs due to the drivers that
does not perform cache maintenance due to the performance reason.
For the the different H/Ws that touch the same buffer in the different
cache coherency domain, the drivers should care when creating IO
memory mapping. ION_FLAG_MAY_HWRENDER and ion_hwrender_dmabuf() helper
give information to such drivers if the buffer is initialized by H/W.
ION_FLAG_MAY_HWRENDER effects nonthing in ION. Just documentation
purpose.
Change-Id: Iab6293f2c5d6017cf67378356721de4256f049b9
Signed-off-by: Cho KyongHo <pullip.cho@samsung.com>
return ion_buffer_cached(dmabuf->priv);
}
+bool ion_hwrender_dmabuf(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ return !!(buffer->flags & ION_FLAG_MAY_HWRENDER);
+}
+
struct ion_iovm_map {
struct list_head list;
struct device *dev;
* overhead by explicit cache maintenance.
*/
#define ION_FLAG_SYNC_FORCE 32
+/*
+ * the allocated buffer is possibly written by H/W(GPU) before initializing by
+ * S/W except buffer initialization by ION on allocation.
+ */
+#define ION_FLAG_MAY_HWRENDER 64
/**
* DOC: Ion Userspace API
struct dma_buf *ion_alloc_dmabuf(const char *heap_name,
size_t len, unsigned int flags);
bool ion_cached_dmabuf(struct dma_buf *dmabuf);
+bool ion_hwrender_dmabuf(struct dma_buf *dmabuf);
#else
static inline struct dma_buf *ion_alloc_dmabuf(const char *heap_name,
size_t len, unsigned int flags)
{
return ERR_PTR(-ENODEV);
}
+
static inline bool ion_cached_dmabuf(struct dma_buf *dmabuf)
{
return false;
}
+
+static inline bool ion_cached_hwrender(struct dma_buf *dmabuf)
+{
+ return false;
+}
#endif
#if defined(CONFIG_EXYNOS_IOVMM) && defined(CONFIG_ION_EXYNOS)