sh: Add dma-mapping support for dma_alloc/free_coherent() overrides.
authorPaul Mundt <lethal@linux-sh.org>
Mon, 26 Oct 2009 00:50:51 +0000 (09:50 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 26 Oct 2009 00:50:51 +0000 (09:50 +0900)
This moves the current dma_alloc/free_coherent() calls to a generic
variant and plugs them in for the nommu default. Other variants can
override the defaults in the dma mapping ops directly.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/dma-mapping.h
arch/sh/kernel/dma-nommu.c
arch/sh/mm/consistent.c

index b9a8f18f35a2f24fd6991eda77b15519ea352493..653076018df08d3b4aead0d755eb8eac4dbf3912 100644 (file)
@@ -9,6 +9,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
        return dma_ops;
 }
 
+#include <asm-generic/dma-coherent.h>
+#include <asm-generic/dma-mapping-common.h>
+
 static inline int dma_supported(struct device *dev, u64 mask)
 {
        struct dma_map_ops *ops = get_dma_ops(dev);
@@ -33,12 +36,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
        return 0;
 }
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                        dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
-                      void *vaddr, dma_addr_t dma_handle);
-
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction dir);
 
@@ -65,7 +62,42 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
        return dma_addr == 0;
 }
 
-#include <asm-generic/dma-coherent.h>
-#include <asm-generic/dma-mapping-common.h>
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+                                      dma_addr_t *dma_handle, gfp_t gfp)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       void *memory;
+
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+               return memory;
+       if (!ops->alloc_coherent)
+               return NULL;
+
+       memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+
+       return memory;
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+                                    void *vaddr, dma_addr_t dma_handle)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       WARN_ON(irqs_disabled());       /* for portability */
+
+       if (dma_release_from_coherent(dev, get_order(size), vaddr))
+               return;
+
+       debug_dma_free_coherent(dev, size, vaddr, dma_handle);
+       if (ops->free_coherent)
+               ops->free_coherent(dev, size, vaddr, dma_handle);
+}
+
+/* arch/sh/mm/consistent.c */
+extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+                                       dma_addr_t *dma_addr, gfp_t flag);
+extern void dma_generic_free_coherent(struct device *dev, size_t size,
+                                     void *vaddr, dma_addr_t dma_handle);
 
 #endif /* __ASM_SH_DMA_MAPPING_H */
index e88fcebf860c0628b61cf1266c2a4b65073be90d..b336fcf40f1281f9c332aceadc4d3d64bb495eab 100644 (file)
@@ -61,6 +61,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
 }
 
 struct dma_map_ops nommu_dma_ops = {
+       .alloc_coherent         = dma_generic_alloc_coherent,
+       .free_coherent          = dma_generic_free_coherent,
        .map_page               = nommu_map_page,
        .map_sg                 = nommu_map_sg,
        .sync_single_for_device = nommu_sync_single,
index 1165161e472ccf731c04c3f45d5c7810df1d1e82..ef20bbabefa030bcf2e1b3fa9d8466e71a21fdd3 100644 (file)
@@ -33,15 +33,12 @@ static int __init dma_init(void)
 }
 fs_initcall(dma_init);
 
-void *dma_alloc_coherent(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t gfp)
+void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+                                dma_addr_t *dma_handle, gfp_t gfp)
 {
        void *ret, *ret_nocache;
        int order = get_order(size);
 
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
-               return ret;
-
        ret = (void *)__get_free_pages(gfp, order);
        if (!ret)
                return NULL;
@@ -63,30 +60,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 
        *dma_handle = virt_to_phys(ret);
 
-       debug_dma_alloc_coherent(dev, size, *dma_handle, ret_nocache);
-
        return ret_nocache;
 }
-EXPORT_SYMBOL(dma_alloc_coherent);
 
-void dma_free_coherent(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle)
+void dma_generic_free_coherent(struct device *dev, size_t size,
+                              void *vaddr, dma_addr_t dma_handle)
 {
        int order = get_order(size);
        unsigned long pfn = dma_handle >> PAGE_SHIFT;
        int k;
 
-       WARN_ON(irqs_disabled());       /* for portability */
-
-       if (dma_release_from_coherent(dev, order, vaddr))
-               return;
-
-       debug_dma_free_coherent(dev, size, vaddr, dma_handle);
        for (k = 0; k < (1 << order); k++)
                __free_pages(pfn_to_page(pfn + k), 0);
+
        iounmap(vaddr);
 }
-EXPORT_SYMBOL(dma_free_coherent);
 
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                    enum dma_data_direction direction)