dmaengine: prepare for generic 'unmap' data
authorDan Williams <dan.j.williams@intel.com>
Fri, 18 Oct 2013 17:35:23 +0000 (19:35 +0200)
committerDan Williams <dan.j.williams@intel.com>
Thu, 14 Nov 2013 00:25:06 +0000 (16:25 -0800)
Add a hook for a common dma unmap implementation to enable removal of
the per driver custom unmap code.  (A reworked version of Bartlomiej
Zolnierkiewicz's patches to remove the custom callbacks and the size
increase of dma_async_tx_descriptor for drivers that don't care about
raid).

Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
[bzolnier: prepare pl330 driver for adding missing unmap while at it]
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
15 files changed:
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/dw/core.c
drivers/dma/ep93xx_dma.c
drivers/dma/fsldma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/dma_v2.c
drivers/dma/ioat/dma_v3.c
drivers/dma/iop-adma.c
drivers/dma/mv_xor.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
include/linux/dmaengine.h

index fce46c5bf1c74e3d76accde7570ffa2d423eb9f1..7f9846464b773dda6dbc9619dc8f48ac06531047 100644 (file)
@@ -1197,6 +1197,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
        struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
        struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
 
+       dma_descriptor_unmap(txd);
        if (!plchan->slave)
                pl08x_unmap_buffers(txd);
 
index c787f38a186a008a6cf8fa4af1dc9d19cab8f836..cc7098ddf9d4dc326cf6d0b02ea307fde631f4e0 100644 (file)
@@ -345,6 +345,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
        list_move(&desc->desc_node, &atchan->free_list);
 
        /* unmap dma addresses (not on slave channels) */
+       dma_descriptor_unmap(txd);
        if (!atchan->chan_common.private) {
                struct device *parent = chan2parent(&atchan->chan_common);
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
index 89eb89f222846e0ff5d20cfc5e14619fc05d6600..e3fe1b1a73b1329e294e83156bd2bf2ef6823066 100644 (file)
@@ -311,6 +311,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
        list_splice_init(&desc->tx_list, &dwc->free_list);
        list_move(&desc->desc_node, &dwc->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!is_slave_direction(dwc->direction)) {
                struct device *parent = chan2parent(&dwc->chan);
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
index 591cd8c63abbcb081a4cd2ca264ed118f7f3d782..dcd6bf5d3091e2103c812c09642bbed102c82587 100644 (file)
@@ -791,6 +791,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
                 * For the memcpy channels the API requires us to unmap the
                 * buffers unless requested otherwise.
                 */
+               dma_descriptor_unmap(&desc->txd);
                if (!edmac->chan.private)
                        ep93xx_dma_unmap_buffers(desc);
 
index b3f3e90054f2ab956e8019dd8abe539b85fa0301..66c4052a1f34593fd39239ab3e7f48590267cd1f 100644 (file)
@@ -868,6 +868,7 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
        /* Run any dependencies */
        dma_run_dependencies(txd);
 
+       dma_descriptor_unmap(txd);
        /* Unmap the dst buffer, if requested */
        if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
                if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
index 5ff6fc1819dc6a2e90c035956b23e23c56f9bb5d..26f8cfd6bc3f02b28bdc4d3a006b534a06c142f6 100644 (file)
@@ -602,6 +602,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
                dump_desc_dbg(ioat, desc);
                if (tx->cookie) {
                        dma_cookie_complete(tx);
+                       dma_descriptor_unmap(tx);
                        ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
                        ioat->active -= desc->hw->tx_cnt;
                        if (tx->callback) {
index b925e1b1d139bddbc6edf86f34d4b943ebfb086c..fc7b50a813ccafe917811f5dce07b0cb9f7829dd 100644 (file)
@@ -148,6 +148,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
                tx = &desc->txd;
                dump_desc_dbg(ioat, desc);
                if (tx->cookie) {
+                       dma_descriptor_unmap(tx);
                        ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
                        dma_cookie_complete(tx);
                        if (tx->callback) {
index d8ececaf1b57082cc5709aca68714542657b5566..57a2901b917ab0f46e826c429e8f91ec6bd335ff 100644 (file)
@@ -577,6 +577,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
                tx = &desc->txd;
                if (tx->cookie) {
                        dma_cookie_complete(tx);
+                       dma_descriptor_unmap(tx);
                        ioat3_dma_unmap(ioat, desc, idx + i);
                        if (tx->callback) {
                                tx->callback(tx->callback_param);
index dd8b44a56e5d0f7090b8dd65bce87a73a60c90ef..8f6e426590eb71eaae41a73fb5e20216d49e42a7 100644 (file)
@@ -152,6 +152,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
                if (tx->callback)
                        tx->callback(tx->callback_param);
 
+               dma_descriptor_unmap(tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 */
index 536dcb8ba5fdfe69ed5f726fc6b5897f00266698..ed1ab1d0875edc0333f60d32a6b6a414b4487cd5 100644 (file)
@@ -278,6 +278,7 @@ mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
                        desc->async_tx.callback(
                                desc->async_tx.callback_param);
 
+               dma_descriptor_unmap(&desc->async_tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 */
index a562d24d20bf55179436d16086ca90f63d1b1894..ab25e52cd43b888d3cabd5438d6d15c2289541eb 100644 (file)
@@ -2268,6 +2268,8 @@ static void pl330_tasklet(unsigned long data)
                        list_move_tail(&desc->node, &pch->dmac->desc_pool);
                }
 
+               dma_descriptor_unmap(&desc->txd);
+
                if (callback) {
                        spin_unlock_irqrestore(&pch->lock, flags);
                        callback(callback_param);
index 370ff8265630cf05cdb64a571e9acb7d9064e08e..442492da74159518930aecfb9d8b34cb957bb319 100644 (file)
@@ -1765,6 +1765,7 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
                        desc->async_tx.callback(
                                desc->async_tx.callback_param);
 
+               dma_descriptor_unmap(&desc->async_tx);
                /* unmap dma addresses
                 * (unmap_single vs unmap_page?)
                 *
index 28af214fce049db85fc02fb903a748fdbef6e0a1..1d0c98839087b97b1cd73792fb532f65071ae78d 100644 (file)
@@ -293,6 +293,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
 
        list_move(&td_desc->desc_node, &td_chan->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP))
                __td_unmap_descs(td_desc,
                        txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE);
index 71e8e775189e0df5568d474ea00157a2675f9260..22a0b6c78c774cbb729809022207f1e36febcbca 100644 (file)
@@ -419,6 +419,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
        list_splice_init(&desc->tx_list, &dc->free_list);
        list_move(&desc->desc_node, &dc->free_list);
 
+       dma_descriptor_unmap(txd);
        if (!ds) {
                dma_addr_t dmaaddr;
                if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
index 0bc727534108d5a2d5d527e75eaa8020a3ccd239..9070050fbcd86a4a34f1c60552c9907d566ae32c 100644 (file)
@@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref);
 typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
 
 typedef void (*dma_async_tx_callback)(void *dma_async_param);
+
+struct dmaengine_unmap_data {
+       u8 to_cnt;
+       u8 from_cnt;
+       u8 bidi_cnt;
+       struct device *dev;
+       struct kref kref;
+       size_t len;
+       dma_addr_t addr[0];
+};
+
 /**
  * struct dma_async_tx_descriptor - async transaction descriptor
  * ---dma generic offload fields---
@@ -438,6 +449,7 @@ struct dma_async_tx_descriptor {
        dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
        dma_async_tx_callback callback;
        void *callback_param;
+       struct dmaengine_unmap_data *unmap;
 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
        struct dma_async_tx_descriptor *next;
        struct dma_async_tx_descriptor *parent;
@@ -445,6 +457,20 @@ struct dma_async_tx_descriptor {
 #endif
 };
 
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
+                                struct dmaengine_unmap_data *unmap)
+{
+       kref_get(&unmap->kref);
+       tx->unmap = unmap;
+}
+
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
+{
+       if (tx->unmap) {
+               tx->unmap = NULL;
+       }
+}
+
 #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
 {