* @need_clock_update: Update the clock rate before the next request.
* @need_reset: Reset controller before next request.
* @mode_reg: Value of the MR register.
+ * @cfg_reg: Value of the CFG register.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @mapbase: Physical address of the MMIO registers.
bool need_clock_update;
bool need_reset;
u32 mode_reg;
+ u32 cfg_reg;
unsigned long bus_hz;
unsigned long mapbase;
struct clk *mck;
return true;
}
+/*
+ * The new MCI2 module isn't 100% compatible with the old MCI module,
+ * and it has a few nice features which we want to use...
+ */
+static inline bool atmci_is_mci2(void)
+{
+ if (cpu_is_at91sam9g45())
+ return true;
+
+ return false;
+}
+
+
/*
* The debugfs stuff below is mostly optimized away when
* CONFIG_DEBUG_FS is not set.
buf[MCI_BLKR / 4],
buf[MCI_BLKR / 4] & 0xffff,
(buf[MCI_BLKR / 4] >> 16) & 0xffff);
+ if (atmci_is_mci2())
+ seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]);
/* Don't read RSPR and RDR; it will consume the data there */
atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]);
atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]);
+ if (atmci_is_mci2()) {
+ u32 val;
+
+ val = buf[MCI_DMA / 4];
+ seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",
+ val, val & 3,
+ ((val >> 4) & 3) ?
+ 1 << (((val >> 4) & 3) + 1) : 1,
+ val & MCI_DMAEN ? " DMAEN" : "");
+
+ val = buf[MCI_CFG / 4];
+ seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",
+ val,
+ val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "",
+ val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "",
+ val & MCI_CFG_HSMODE ? " HSMODE" : "",
+ val & MCI_CFG_LSYNC ? " LSYNC" : "");
+ }
+
kfree(buf);
return 0;
dev_vdbg(&host->pdev->dev, "DMA complete\n");
+ if (atmci_is_mci2())
+ /* Disable DMA hardware handshaking on MCI */
+ mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN);
+
atmci_dma_cleanup(host);
/*
}
static int
-atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
struct dma_chan *chan;
struct dma_async_tx_descriptor *desc;
if (!chan)
return -ENODEV;
+ if (atmci_is_mci2())
+ mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN);
+
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
host->dma.data_desc = desc;
desc->callback = atmci_dma_complete;
desc->callback_param = host;
- desc->tx_submit(desc);
-
- /* Go! */
- chan->device->device_issue_pending(chan);
return 0;
unmap_exit:
return -ENOMEM;
}
+static void atmci_submit_data(struct atmel_mci *host)
+{
+ struct dma_chan *chan = host->data_chan;
+ struct dma_async_tx_descriptor *desc = host->dma.data_desc;
+
+ if (chan) {
+ desc->tx_submit(desc);
+ chan->device->device_issue_pending(chan);
+ }
+}
+
#else /* CONFIG_MMC_ATMELMCI_DMA */
-static int atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
+static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
{
return -ENOSYS;
}
+static void atmci_submit_data(struct atmel_mci *host) {}
+
static void atmci_stop_dma(struct atmel_mci *host)
{
/* Data transfer was stopped by the interrupt handler */
* Returns a mask of interrupt flags to be enabled after the whole
* request has been prepared.
*/
-static u32 atmci_submit_data(struct atmel_mci *host, struct mmc_data *data)
+static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data)
{
u32 iflags;
host->data = data;
iflags = ATMCI_DATA_ERROR_FLAGS;
- if (atmci_submit_data_dma(host, data)) {
+ if (atmci_prepare_data_dma(host, data)) {
host->data_chan = NULL;
/*
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->need_reset = false;
}
mci_writel(host, SDCR, slot->sdc_reg);
while (!(mci_readl(host, SR) & MCI_CMDRDY))
cpu_relax();
}
+ iflags = 0;
data = mrq->data;
if (data) {
atmci_set_timeout(host, slot, data);
| MCI_BLKLEN(data->blksz));
dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n",
MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz));
+
+ iflags |= atmci_prepare_data(host, data);
}
- iflags = MCI_CMDRDY;
+ iflags |= MCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
atmci_start_command(host, cmd, cmdflags);
if (data)
- iflags |= atmci_submit_data(host, data);
+ atmci_submit_data(host);
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
clk_enable(host->mck);
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
}
/*
mci_writel(host, CR, MCI_CR_SWRST);
mci_writel(host, CR, MCI_CR_MCIEN);
mci_writel(host, MR, host->mode_reg);
+ if (atmci_is_mci2())
+ mci_writel(host, CFG, host->cfg_reg);
host->data = NULL;
host->cmd = NULL;
}
if (!host->dma.chan)
dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
+ else
+ dev_info(&host->pdev->dev,
+ "Using %s for DMA transfers\n",
+ dma_chan_name(host->dma.chan));
}
#else
static void atmci_configure_dma(struct atmel_mci *host) {}