#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static unsigned long mali_dma_attrs;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
/* Write combine dma_attrs */
static DEFINE_DMA_ATTRS(dma_attrs_wc);
#endif
spin_unlock(&mali_mem_page_table_page_pool.lock);
if (_MALI_OSK_ERR_OK != ret) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
*mapping = dma_alloc_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
GFP_KERNEL, &dma_attrs_wc);
} else {
spin_unlock(&mali_mem_page_table_page_pool.lock);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, virt, phys,
&dma_attrs_wc);
/* After releasing the spinlock: free the pages we removed from the pool. */
for (i = 0; i < nr_to_free; i++) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
#else
return _MALI_OSK_ERR_NOMEM;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ mali_dma_attrs = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
#endif
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ #include <linux/dma-mapping.h>
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
#include <linux/dma-attrs.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) */
#ifdef CONFIG_DMA_SHARED_BUFFER
dma_addr_t dma_pa;
struct kbase_va_region *reg;
phys_addr_t *page_array;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
goto err;
/* All the alloc calls return zeroed memory */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
#else
no_alloc:
kfree(reg);
no_reg:
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
#else
dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
{
struct kbase_va_region *reg;
int err;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
kbase_mem_phy_alloc_put(reg->gpu_alloc);
kfree(reg);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
dma_free_attrs(kctx->kbdev->dev, handle->size,
handle->cpu_va, handle->dma_pa, &attrs);
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ #include <linux/dma-mapping.h>
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
#include <linux/dma-attrs.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) */
#ifdef CONFIG_DMA_SHARED_BUFFER
dma_addr_t dma_pa;
struct kbase_va_region *reg;
phys_addr_t *page_array;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
goto err;
/* All the alloc calls return zeroed memory */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
#else
no_alloc:
kfree(reg);
no_reg:
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
#else
dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
{
struct kbase_va_region *reg;
int err;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
kbase_mem_phy_alloc_put(reg->gpu_alloc);
kfree(reg);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
dma_free_attrs(kctx->kbdev->dev, handle->size,
handle->cpu_va, handle->dma_pa, &attrs);
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ #include <linux/dma-mapping.h>
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
#include <linux/dma-attrs.h>
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) */
#ifdef CONFIG_DMA_SHARED_BUFFER
dma_addr_t dma_pa;
struct kbase_va_region *reg;
phys_addr_t *page_array;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
goto err;
/* All the alloc calls return zeroed memory */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
va = dma_alloc_attrs(kctx->kbdev->dev, size, &dma_pa, GFP_KERNEL, &attrs);
#else
no_alloc:
kfree(reg);
no_reg:
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_free_attrs(kctx->kbdev->dev, size, va, dma_pa, &attrs);
#else
dma_free_writecombine(kctx->kbdev->dev, size, va, dma_pa);
{
struct kbase_va_region *reg;
int err;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))&&(LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
DEFINE_DMA_ATTRS(attrs);
#endif
kbase_mem_phy_alloc_put(reg->gpu_alloc);
kfree(reg);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(kctx->kbdev->dev, handle->size,
+ handle->cpu_va, handle->dma_pa,
+ DMA_ATTR_WRITE_COMBINE);
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0))
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
dma_free_attrs(kctx->kbdev->dev, handle->size,
handle->cpu_va, handle->dma_pa, &attrs);
#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static unsigned long mali_dma_attrs;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
/* Write combine dma_attrs */
static DEFINE_DMA_ATTRS(dma_attrs_wc);
#endif
spin_unlock(&mali_mem_page_table_page_pool.lock);
if (_MALI_OSK_ERR_OK != ret) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
*mapping = dma_alloc_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
GFP_KERNEL, &dma_attrs_wc);
} else {
spin_unlock(&mali_mem_page_table_page_pool.lock);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, virt, phys,
&dma_attrs_wc);
/* After releasing the spinlock: free the pages we removed from the pool. */
for (i = 0; i < nr_to_free; i++) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
#else
return _MALI_OSK_ERR_NOMEM;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ mali_dma_attrs = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
#endif
#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static unsigned long mali_dma_attrs;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
/* Write combine dma_attrs */
static DEFINE_DMA_ATTRS(dma_attrs_wc);
#endif
spin_unlock(&mali_mem_page_table_page_pool.lock);
if (_MALI_OSK_ERR_OK != ret) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
*mapping = dma_alloc_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
GFP_KERNEL, &dma_attrs_wc);
} else {
spin_unlock(&mali_mem_page_table_page_pool.lock);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, virt, phys,
&dma_attrs_wc);
/* After releasing the spinlock: free the pages we removed from the pool. */
for (i = 0; i < nr_to_free; i++) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
#else
return _MALI_OSK_ERR_NOMEM;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ mali_dma_attrs = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
#endif
#define MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB * 256)
#define MALI_OS_MEMORY_POOL_TRIM_JIFFIES (10 * CONFIG_HZ) /* Default to 10s */
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+static unsigned long mali_dma_attrs;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
/* Write combine dma_attrs */
static DEFINE_DMA_ATTRS(dma_attrs_wc);
#endif
spin_unlock(&mali_mem_page_table_page_pool.lock);
if (_MALI_OSK_ERR_OK != ret) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ *mapping = dma_alloc_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
+ GFP_KERNEL, mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
*mapping = dma_alloc_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, &tmp_phys,
GFP_KERNEL, &dma_attrs_wc);
} else {
spin_unlock(&mali_mem_page_table_page_pool.lock);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev,
+ _MALI_OSK_MALI_PAGE_SIZE, virt, phys,
+ mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev,
_MALI_OSK_MALI_PAGE_SIZE, virt, phys,
&dma_attrs_wc);
/* After releasing the spinlock: free the pages we removed from the pool. */
for (i = 0; i < nr_to_free; i++) {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
+ virt_arr[i], (dma_addr_t)phys_arr[i], mali_dma_attrs);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_free_attrs(&mali_platform_device->dev, _MALI_OSK_MALI_PAGE_SIZE,
virt_arr[i], (dma_addr_t)phys_arr[i], &dma_attrs_wc);
#else
return _MALI_OSK_ERR_NOMEM;
}
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
+ mali_dma_attrs = DMA_ATTR_WRITE_COMBINE;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &dma_attrs_wc);
#endif