Enable channel in device_issue_pending call, so that the order between
cookie assignment and channel enabling can be ensured naturally.
It fixes the mxs gpmi-nand breakage which is caused by the incorrect
order of cookie assigning and channel enabling.
Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
Tested-by: Huang Shijie <b32955@freescale.com>
Tested-by <samgandhi9@gmail.com>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan);
-
- mxs_dma_enable_chan(mxs_chan);
-
return dma_cookie_assign(tx);
}
static void mxs_dma_issue_pending(struct dma_chan *chan)
{
- /*
- * Nothing to do. We only have a single descriptor.
- */
+ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
+
+ mxs_dma_enable_chan(mxs_chan);
}
static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
goto out;
dmaengine_submit(desc);
+ dma_async_issue_pending(host->dmach);
return;
out:
dev_warn(mmc_dev(host->mmc),
desc->callback = dma_irq_callback;
desc->callback_param = this;
dmaengine_submit(desc);
+ dma_async_issue_pending(get_dma_chan(this));
/* Wait for the interrupt from the DMA block. */
err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));