tmio_mmc_enable_dma(host, true);
}
+static void tmio_mmc_dma_callback(void *arg)
+{
+ struct tmio_mmc_host *host = arg;
+
+ wait_for_completion(&host->dma_dataend);
+
+ spin_lock_irq(&host->lock);
+
+ if (!host->data)
+ goto out;
+
+ if (host->data->flags & MMC_DATA_READ)
+ dma_unmap_sg(host->chan_rx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_sg(host->chan_tx->device->dev,
+ host->sg_ptr, host->sg_len,
+ DMA_TO_DEVICE);
+
+ tmio_mmc_do_data_irq(host);
+out:
+ spin_unlock_irq(&host->lock);
+}
+
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
+ reinit_completion(&host->dma_dataend);
+ desc->callback = tmio_mmc_dma_callback;
+ desc->callback_param = host;
+
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
+ reinit_completion(&host->dma_dataend);
+ desc->callback = tmio_mmc_dma_callback;
+ desc->callback_param = host;
+
cookie = dmaengine_submit(desc);
if (cookie < 0) {
desc = NULL;
dma_async_issue_pending(chan);
}
-static void tmio_mmc_tasklet_fn(unsigned long arg)
-{
- struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
-
- spin_lock_irq(&host->lock);
-
- if (!host->data)
- goto out;
-
- if (host->data->flags & MMC_DATA_READ)
- dma_unmap_sg(host->chan_rx->device->dev,
- host->sg_ptr, host->sg_len,
- DMA_FROM_DEVICE);
- else
- dma_unmap_sg(host->chan_tx->device->dev,
- host->sg_ptr, host->sg_len,
- DMA_TO_DEVICE);
-
- tmio_mmc_do_data_irq(host);
-out:
- spin_unlock_irq(&host->lock);
-}
-
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!host->bounce_buf)
goto ebouncebuf;
- tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host);
+ init_completion(&host->dma_dataend);
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
}
if (done) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
- tasklet_schedule(&host->dma_complete);
+ complete(&host->dma_dataend);
}
} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
- tasklet_schedule(&host->dma_complete);
+ complete(&host->dma_dataend);
} else {
tmio_mmc_do_data_irq(host);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);