struct scatterlist *sgl = &sport->tx_sgl[0];
struct circ_buf *xmit = &sport->port.state->xmit;
unsigned long flags;
+ unsigned long temp;
spin_lock_irqsave(&sport->port.lock, flags);
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+
/* update the stat */
xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
sport->port.icount.tx += sport->tx_bytes;
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = sport->dma_chan_tx;
struct device *dev = sport->port.dev;
+ unsigned long temp;
int ret;
if (sport->dma_is_txing)
dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
uart_circ_chars_pending(xmit));
+
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
+
/* fire it */
sport->dma_is_txing = 1;
dmaengine_submit(desc);
{
struct imx_port *sport = (struct imx_port *)port;
struct scatterlist *sgl = &sport->tx_sgl[0];
+ unsigned long temp;
if (!sport->dma_chan_tx)
return;
if (sport->dma_is_txing) {
dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents,
DMA_TO_DEVICE);
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~UCR1_TDMAEN;
+ writel(temp, sport->port.membase + UCR1);
sport->dma_is_txing = false;
}
}