dmaengine: shdma: extend .device_terminate_all() to record partial transfer
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>
Thu, 18 Feb 2010 16:30:02 +0000 (16:30 +0000)
committerPaul Mundt <lethal@linux-sh.org>
Tue, 2 Mar 2010 02:12:03 +0000 (11:12 +0900)
This patch extends the .device_terminate_all() method of the shdma driver
to return number of bytes transfered in the current descriptor.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/include/asm/dmaengine.h
drivers/dma/shdma.c
drivers/dma/shdma.h

index 9586e4a482b1360e0d74e2d3462e452debeea7c9..bf2f30cf0a2773c391ba4314ddc925fb8ea39bb8 100644 (file)
@@ -10,6 +10,9 @@
 #ifndef ASM_DMAENGINE_H
 #define ASM_DMAENGINE_H
 
+#include <linux/dmaengine.h>
+#include <linux/list.h>
+
 #include <asm/dma-register.h>
 
 #define SH_DMAC_MAX_CHANNELS   6
@@ -70,4 +73,21 @@ struct sh_dmae_slave {
        struct sh_dmae_slave_config     *config;  /* Set by the driver */
 };
 
+struct sh_dmae_regs {
+       u32 sar; /* SAR / source address */
+       u32 dar; /* DAR / destination address */
+       u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_desc {
+       struct sh_dmae_regs hw;
+       struct list_head node;
+       struct dma_async_tx_descriptor async_tx;
+       enum dma_data_direction direction;
+       dma_cookie_t cookie;
+       size_t partial;
+       int chunks;
+       int mark;
+};
+
 #endif
index ea6779f3e73fada8edb714d0dc8877d00f33c634..5d17e09cb625412beaf0e8275f39142b7006a4c1 100644 (file)
@@ -587,6 +587,19 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
        if (!chan)
                return;
 
+       dmae_halt(sh_chan);
+
+       spin_lock_bh(&sh_chan->desc_lock);
+       if (!list_empty(&sh_chan->ld_queue)) {
+               /* Record partial transfer */
+               struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
+                                                 struct sh_desc, node);
+               desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
+                       sh_chan->xmit_shift;
+
+       }
+       spin_unlock_bh(&sh_chan->desc_lock);
+
        sh_dmae_chan_ld_cleanup(sh_chan, true);
 }
 
@@ -701,6 +714,9 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
        /* Find the first not transferred desciptor */
        list_for_each_entry(desc, &sh_chan->ld_queue, node)
                if (desc->mark == DESC_SUBMITTED) {
+                       dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
+                               desc->async_tx.cookie, sh_chan->id,
+                               desc->hw.tcr, desc->hw.sar, desc->hw.dar);
                        /* Get the ld start address from ld_queue */
                        dmae_set_reg(sh_chan, &desc->hw);
                        dmae_start(sh_chan);
index 9f0897f7fe34a336fe54df469d86e59a6a1b6cd3..153609a1e96c55092f2eb99911552eee7b08e802 100644 (file)
 
 #define SH_DMA_TCR_MAX 0x00FFFFFF      /* 16MB */
 
-struct sh_dmae_regs {
-       u32 sar; /* SAR / source address */
-       u32 dar; /* DAR / destination address */
-       u32 tcr; /* TCR / transfer count */
-};
-
-struct sh_desc {
-       struct sh_dmae_regs hw;
-       struct list_head node;
-       struct dma_async_tx_descriptor async_tx;
-       enum dma_data_direction direction;
-       dma_cookie_t cookie;
-       int chunks;
-       int mark;
-};
-
 struct device;
 
 struct sh_dmae_chan {