import PULS_20160108
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm64 / mm / dma-mapping.c
index 4bd7579ec9e6029746cb0fc485a255056fbaf2ff..8dffd87c9d32225636552120f877bbe0f62ccd0b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
 
 struct dma_map_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
-                                         dma_addr_t *dma_handle, gfp_t flags,
-                                         struct dma_attrs *attrs)
+static void *__dma_alloc_coherent(struct device *dev, size_t size,
+                                 dma_addr_t *dma_handle, gfp_t flags,
+                                 struct dma_attrs *attrs)
 {
-       if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+       if (dev == NULL) {
+               WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+               return NULL;
+       }
+
+       if (IS_ENABLED(CONFIG_ZONE_DMA) &&
            dev->coherent_dma_mask <= DMA_BIT_MASK(32))
-               flags |= GFP_DMA32;
-       return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+               flags |= GFP_DMA;
+       if (IS_ENABLED(CONFIG_DMA_CMA)) {
+               struct page *page;
+
+               size = PAGE_ALIGN(size);
+               page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+                                                       get_order(size));
+               if (!page)
+                       return NULL;
+
+               *dma_handle = phys_to_dma(dev, page_to_phys(page));
+               return page_address(page);
+       } else {
+               return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+       }
+}
+
+static void __dma_free_coherent(struct device *dev, size_t size,
+                               void *vaddr, dma_addr_t dma_handle,
+                               struct dma_attrs *attrs)
+{
+       if (dev == NULL) {
+               WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+               return;
+       }
+
+       if (IS_ENABLED(CONFIG_DMA_CMA)) {
+               phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+               dma_release_from_contiguous(dev,
+                                       phys_to_page(paddr),
+                                       size >> PAGE_SHIFT);
+       } else {
+               swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+       }
+}
+
+static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+                                    dma_addr_t *dma_handle, gfp_t flags,
+                                    struct dma_attrs *attrs)
+{
+       struct page *page, **map;
+       void *ptr, *coherent_ptr;
+       int order, i;
+
+       size = PAGE_ALIGN(size);
+       order = get_order(size);
+
+       ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+       if (!ptr)
+               goto no_mem;
+       map = kmalloc(sizeof(struct page *) << order, 
+                       flags & ~(GFP_DMA | GFP_DMA32));
+       if (!map)
+               goto no_map;
+
+       /* remove any dirty cache lines on the kernel alias */
+       __dma_flush_range(ptr, ptr + size);
+
+       /* create a coherent mapping */
+       page = virt_to_page(ptr);
+       for (i = 0; i < (size >> PAGE_SHIFT); i++)
+               map[i] = page + i;
+       coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+                           pgprot_dmacoherent(pgprot_default));
+       kfree(map);
+       if (!coherent_ptr)
+               goto no_map;
+
+       return coherent_ptr;
+
+no_map:
+       __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+no_mem:
+       *dma_handle = ~0;
+       return NULL;
+}
+
+static void __dma_free_noncoherent(struct device *dev, size_t size,
+                                  void *vaddr, dma_addr_t dma_handle,
+                                  struct dma_attrs *attrs)
+{
+       void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
+       vunmap(vaddr);
+       __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+}
+
+static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
+                                    unsigned long offset, size_t size,
+                                    enum dma_data_direction dir,
+                                    struct dma_attrs *attrs)
+{
+       dma_addr_t dev_addr;
+
+       dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
+       __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+
+       return dev_addr;
+}
+
+
+static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
+                                size_t size, enum dma_data_direction dir,
+                                struct dma_attrs *attrs)
+{
+       __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+       swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                                 int nelems, enum dma_data_direction dir,
+                                 struct dma_attrs *attrs)
+{
+       struct scatterlist *sg;
+       int i, ret;
+
+       ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
+       for_each_sg(sgl, sg, ret, i)
+               __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                              sg->length, dir);
+
+       return ret;
 }
 
-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
-                                       void *vaddr, dma_addr_t dma_handle,
-                                       struct dma_attrs *attrs)
+static void __swiotlb_unmap_sg_attrs(struct device *dev,
+                                    struct scatterlist *sgl, int nelems,
+                                    enum dma_data_direction dir,
+                                    struct dma_attrs *attrs)
 {
-       swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                                sg->length, dir);
+       swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
 }
 
-static struct dma_map_ops arm64_swiotlb_dma_ops = {
-       .alloc = arm64_swiotlb_alloc_coherent,
-       .free = arm64_swiotlb_free_coherent,
+static void __swiotlb_sync_single_for_cpu(struct device *dev,
+                                         dma_addr_t dev_addr, size_t size,
+                                         enum dma_data_direction dir)
+{
+       __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+       swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
+}
+
+static void __swiotlb_sync_single_for_device(struct device *dev,
+                                            dma_addr_t dev_addr, size_t size,
+                                            enum dma_data_direction dir)
+{
+       swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
+       __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+}
+
+static void __swiotlb_sync_sg_for_cpu(struct device *dev,
+                                     struct scatterlist *sgl, int nelems,
+                                     enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                                sg->length, dir);
+       swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
+}
+
+static void __swiotlb_sync_sg_for_device(struct device *dev,
+                                        struct scatterlist *sgl, int nelems,
+                                        enum dma_data_direction dir)
+{
+       struct scatterlist *sg;
+       int i;
+
+       swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
+       for_each_sg(sgl, sg, nelems, i)
+               __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+                              sg->length, dir);
+}
+
+struct dma_map_ops noncoherent_swiotlb_dma_ops = {
+       .alloc = __dma_alloc_noncoherent,
+       .free = __dma_free_noncoherent,
+       .map_page = __swiotlb_map_page,
+       .unmap_page = __swiotlb_unmap_page,
+       .map_sg = __swiotlb_map_sg_attrs,
+       .unmap_sg = __swiotlb_unmap_sg_attrs,
+       .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
+       .sync_single_for_device = __swiotlb_sync_single_for_device,
+       .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+       .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+       .dma_supported = swiotlb_dma_supported,
+       .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
+
+struct dma_map_ops coherent_swiotlb_dma_ops = {
+       .alloc = __dma_alloc_coherent,
+       .free = __dma_free_coherent,
        .map_page = swiotlb_map_page,
        .unmap_page = swiotlb_unmap_page,
        .map_sg = swiotlb_map_sg_attrs,
@@ -60,10 +252,11 @@ static struct dma_map_ops arm64_swiotlb_dma_ops = {
        .dma_supported = swiotlb_dma_supported,
        .mapping_error = swiotlb_dma_mapping_error,
 };
+EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
 
 void __init arm64_swiotlb_init(void)
 {
-       dma_ops = &arm64_swiotlb_dma_ops;
+       dma_ops = &noncoherent_swiotlb_dma_ops;
        swiotlb_init(1);
 }