Use new core function mmc_get_dma_dir().
Signed-off-by: Heiner Kallweit <hkallweit1@gmail.com>
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
if (host->dma_in_use) {
enum dma_data_direction dma_data_dir;
- if (data->flags & MMC_DATA_WRITE)
- dma_data_dir = DMA_TO_DEVICE;
- else
- dma_data_dir = DMA_FROM_DEVICE;
+ dma_data_dir = mmc_get_dma_dir(data);
if (dma_data_dir == DMA_FROM_DEVICE) {
/*
*/
sg_len = (data->blocks == 1) ? 1 : data->sg_len;
- if (data->flags & MMC_DATA_WRITE)
- dma_data_dir = DMA_TO_DEVICE;
- else
- dma_data_dir = DMA_FROM_DEVICE;
+ dma_data_dir = mmc_get_dma_dir(data);
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
sg_len, dma_data_dir);
if (data)
dma_unmap_sg(&host->pdev->dev,
data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ mmc_get_dma_dir(data));
}
/*
if (data)
dma_unmap_sg(host->dma.chan->device->dev,
data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ mmc_get_dma_dir(data));
}
/*
{
u32 iflags, tmp;
unsigned int sg_len;
- enum dma_data_direction dir;
int i;
data->error = -EINPROGRESS;
/* Enable pdc mode */
atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE);
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
+ if (data->flags & MMC_DATA_READ)
iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
- } else {
- dir = DMA_TO_DEVICE;
+ else
iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
- }
/* Set BLKLEN */
tmp = atmci_readl(host, ATMCI_MR);
/* Configure PDC */
host->data_size = data->blocks * data->blksz;
- sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
if ((!host->caps.has_rwproof)
&& (host->data->flags & MMC_DATA_WRITE)) {
}
if (host->data_size)
- atmci_pdc_set_both_buf(host,
- ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
-
+ atmci_pdc_set_both_buf(host, data->flags & MMC_DATA_READ ?
+ XFER_RECEIVE : XFER_TRANSMIT);
return iflags;
}
struct dma_async_tx_descriptor *desc;
struct scatterlist *sg;
unsigned int i;
- enum dma_data_direction direction;
enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 maxburst;
return -ENODEV;
if (data->flags & MMC_DATA_READ) {
- direction = DMA_FROM_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
maxburst = atmci_convert_chksize(host,
host->dma_conf.src_maxburst);
} else {
- direction = DMA_TO_DEVICE;
host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
maxburst = atmci_convert_chksize(host,
host->dma_conf.dst_maxburst);
ATMCI_DMAEN);
sglen = dma_map_sg(chan->device->dev, data->sg,
- data->sg_len, direction);
+ data->sg_len, mmc_get_dma_dir(data));
dmaengine_slave_config(chan, &host->dma_conf);
desc = dmaengine_prep_slave_sg(chan,
return iflags;
unmap_exit:
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
return -ENOMEM;
}
int ret = 0;
host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE));
+ mmc_get_dma_dir(data));
/* no individual DMA segment should need a partial FIFO */
for (i = 0; i < host->sg_len; i++) {
if (sg_dma_len(data->sg + i) & mask) {
dma_unmap_sg(mmc_dev(host->mmc),
- data->sg, data->sg_len,
- (data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
return -1;
}
}
davinci_abort_dma(host);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- (data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
+ mmc_get_dma_dir(data));
host->do_dma = false;
}
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
-static int dw_mci_get_dma_dir(struct mmc_data *data)
-{
- if (data->flags & MMC_DATA_WRITE)
- return DMA_TO_DEVICE;
- else
- return DMA_FROM_DEVICE;
-}
-
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
dma_unmap_sg(host->dev,
data->sg,
data->sg_len,
- dw_mci_get_dma_dir(data));
+ mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
sg_len = dma_map_sg(host->dev,
data->sg,
data->sg_len,
- dw_mci_get_dma_dir(data));
+ mmc_get_dma_dir(data));
if (sg_len == 0)
return -EINVAL;
dma_unmap_sg(slot->host->dev,
data->sg,
data->sg_len,
- dw_mci_get_dma_dir(data));
+ mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
return -ENODEV;
}
-static inline int jz4740_mmc_get_dma_dir(struct mmc_data *data)
-{
- return (data->flags & MMC_DATA_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-}
-
static inline struct dma_chan *jz4740_mmc_get_dma_chan(struct jz4740_mmc_host *host,
struct mmc_data *data)
{
struct mmc_data *data)
{
struct dma_chan *chan = jz4740_mmc_get_dma_chan(host, data);
- enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+ enum dma_data_direction dir = mmc_get_dma_dir(data);
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
}
struct dma_chan *chan)
{
struct jz4740_mmc_host_next *next_data = &host->next_data;
- enum dma_data_direction dir = jz4740_mmc_get_dma_dir(data);
+ enum dma_data_direction dir = mmc_get_dma_dir(data);
int sg_len;
if (!next && data->host_cookie &&
u32 clock_rate;
unsigned long timeout;
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
+ direction = mmc_get_dma_dir(data);
mmc_spi_setup_data_message(host, multiple, direction);
t = &host->t;
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
struct dma_chan *chan;
- enum dma_data_direction dir;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
+ if (data->flags & MMC_DATA_READ)
chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
+ else
chan = host->dma_tx_channel;
- }
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
}
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
- enum dma_data_direction buffer_dirn;
int nr_sg;
unsigned long flags = DMA_CTRL_ACK;
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
- buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
conf.direction = DMA_MEM_TO_DEV;
- buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
return -EINVAL;
device = chan->device;
- nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
if (nr_sg == 0)
return -EINVAL;
return 0;
unmap_exit:
- dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
return -ENOMEM;
}
static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host)
{
- u32 len, dir_data, dir_slave;
+ u32 len, dir_slave;
long dma_time;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *dma_chan;
if (data->flags & MMC_DATA_WRITE) {
dma_chan = host->dma_chan_tx;
- dir_data = DMA_TO_DEVICE;
dir_slave = DMA_MEM_TO_DEV;
} else {
dma_chan = host->dma_chan_rx;
- dir_data = DMA_FROM_DEVICE;
dir_slave = DMA_DEV_TO_MEM;
}
len = dma_map_sg(dma_chan->device->dev, data->sg,
- data->sg_len, dir_data);
+ data->sg_len, mmc_get_dma_dir(data));
if (len > 0) {
desc = dmaengine_prep_slave_sg(dma_chan, data->sg,
dma_unmap_sg(dma_chan->device->dev,
data->sg, data->sg_len,
- dir_data);
+ mmc_get_dma_dir(data));
}
struct mmc_data *data = mrq->data;
if (!(data->host_cookie & MSDC_PREPARE_FLAG)) {
- bool read = (data->flags & MMC_DATA_READ) != 0;
-
data->host_cookie |= MSDC_PREPARE_FLAG;
data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len,
- read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ mmc_get_dma_dir(data));
}
}
return;
if (data->host_cookie & MSDC_PREPARE_FLAG) {
- bool read = (data->flags & MMC_DATA_READ) != 0;
-
dma_unmap_sg(host->dev, data->sg, data->sg_len,
- read ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ mmc_get_dma_dir(data));
data->host_cookie &= ~MSDC_PREPARE_FLAG;
}
}
return 1;
} else {
dma_addr_t phys_addr;
- int dma_dir = (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE;
- host->sg_frags = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dma_dir);
+
+ host->sg_frags = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ mmc_get_dma_dir(data));
phys_addr = sg_dma_address(data->sg);
mvsd_write(MVSD_SYS_ADDR_LOW, (u32)phys_addr & 0xffff);
mvsd_write(MVSD_SYS_ADDR_HI, (u32)phys_addr >> 16);
host->pio_size = 0;
} else {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_frags,
- (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ mmc_get_dma_dir(data));
}
if (err_status & MVSD_ERR_DATA_TIMEOUT)
OMAP_HSMMC_WRITE(host->base, CMD, cmdreg);
}
-static int
-omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
-{
- if (data->flags & MMC_DATA_WRITE)
- return DMA_TO_DEVICE;
- else
- return DMA_FROM_DEVICE;
-}
-
static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host,
struct mmc_data *data)
{
dmaengine_terminate_all(chan);
dma_unmap_sg(chan->device->dev,
host->data->sg, host->data->sg_len,
- omap_hsmmc_get_dma_dir(host, host->data));
+ mmc_get_dma_dir(host->data));
host->data->host_cookie = 0;
}
if (!data->host_cookie)
dma_unmap_sg(chan->device->dev,
data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ mmc_get_dma_dir(data));
req_in_progress = host->req_in_progress;
host->dma_ch = -1;
/* Check if next job is already prepared */
if (next || data->host_cookie != host->next_data.cookie) {
dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ mmc_get_dma_dir(data));
} else {
dma_len = host->next_data.dma_len;
struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data);
dma_unmap_sg(c->device->dev, data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ mmc_get_dma_dir(data));
data->host_cookie = 0;
}
}
conf.direction = DMA_MEM_TO_DEV;
dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mmc_get_dma_dir(data));
dmaengine_slave_config(host->dma, &conf);
desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
unmap_exit:
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mmc_get_dma_dir(data));
return -ENOMEM;
}
return data->sg_count;
sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mmc_get_dma_dir(data));
if (sg_count == 0)
return -ENOSPC;
if (data->host_cookie != COOKIE_UNMAPPED)
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- data->flags & MMC_DATA_WRITE ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
if (data && data->host_cookie == COOKIE_MAPPED) {
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- (data->flags & MMC_DATA_READ) ?
- DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
wmb();
}
-static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data)
-{
- if (data->flags & MMC_DATA_WRITE)
- return DMA_TO_DEVICE;
- else
- return DMA_FROM_DEVICE;
-}
-
static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host,
struct mmc_data *data)
{
struct scatterlist *sg;
dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- sunxi_mmc_get_dma_dir(data));
+ mmc_get_dma_dir(data));
if (dma_len == 0) {
dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n");
return -ENOMEM;
rval |= SDXC_FIFO_RESET;
mmc_writel(host, REG_GCTRL, rval);
dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- sunxi_mmc_get_dma_dir(data));
+ mmc_get_dma_dir(data));
}
mmc_writel(host, REG_RINTR, 0xffff);
if (data)
dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
- sunxi_mmc_get_dma_dir(data));
+ mmc_get_dma_dir(data));
dev_err(mmc_dev(mmc), "request already pending\n");
mrq->cmd->error = -EBUSY;