obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
tmio_mmc_core-y := tmio_mmc_pio.o
-tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
-obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
+obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o tmio_mmc_dma.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
/* All SDHI have SDIO status bits which must be 1 */
mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
- ret = tmio_mmc_host_probe(host, mmc_data);
+ ret = tmio_mmc_host_probe(host, mmc_data, tmio_mmc_get_dma_ops());
if (ret < 0)
goto efree;
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res) >> 10;
- ret = tmio_mmc_host_probe(host, pdata);
+ ret = tmio_mmc_host_probe(host, pdata, NULL);
if (ret)
goto host_free;
void (*enable)(struct tmio_mmc_host *host, bool enable);
};
+struct tmio_mmc_dma_ops {
+ void (*start)(struct tmio_mmc_host *host, struct mmc_data *data);
+ void (*enable)(struct tmio_mmc_host *host, bool enable);
+ void (*request)(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata);
+ void (*release)(struct tmio_mmc_host *host);
+ void (*abort)(struct tmio_mmc_host *host);
+};
+
struct tmio_mmc_host {
void __iomem *ctl;
struct mmc_command *cmd;
/* Tuning values: 1 for success, 0 for failure */
DECLARE_BITMAP(taps, BITS_PER_BYTE * sizeof(long));
unsigned int tap_num;
+
+ const struct tmio_mmc_dma_ops *dma_ops;
};
struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev);
void tmio_mmc_host_free(struct tmio_mmc_host *host);
int tmio_mmc_host_probe(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata);
+ struct tmio_mmc_data *pdata,
+ const struct tmio_mmc_dma_ops *dma_ops);
void tmio_mmc_host_remove(struct tmio_mmc_host *host);
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i);
irqreturn_t tmio_mmc_irq(int irq, void *devid);
+#if IS_ENABLED(CONFIG_MMC_SDHI)
+const struct tmio_mmc_dma_ops *tmio_mmc_get_dma_ops(void);
+#else
+static inline const struct tmio_mmc_dma_ops *tmio_mmc_get_dma_ops(void)
+{
+ return NULL;
+}
+#endif
+
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
local_irq_restore(*flags);
}
-#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
-void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
-void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
-void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
-void tmio_mmc_release_dma(struct tmio_mmc_host *host);
-void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
-#else
-static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
- struct mmc_data *data)
-{
-}
-
-static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
-{
-}
-
-static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
- struct tmio_mmc_data *pdata)
-{
- host->chan_tx = NULL;
- host->chan_rx = NULL;
-}
-
-static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
-{
-}
-
-static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
-{
-}
-#endif
-
#ifdef CONFIG_PM
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
#define TMIO_MMC_MIN_DMA_LEN 8
-void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
if (!host->chan_tx || !host->chan_rx)
return;
host->dma->enable(host, enable);
}
-void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+static void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
{
tmio_mmc_enable_dma(host, false);
}
}
-void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
{
if (data->flags & MMC_DATA_READ) {
dma_async_issue_pending(chan);
}
-void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
+static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
{
/* We can only either use DMA for both Tx and Rx or not use it at all */
if (!host->dma || (!host->pdev->dev.of_node &&
host->chan_tx = NULL;
}
-void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
if (host->chan_tx) {
struct dma_chan *chan = host->chan_tx;
host->bounce_buf = NULL;
}
}
+
+static const struct tmio_mmc_dma_ops tmio_mmc_dma_ops = {
+ .start = tmio_mmc_start_dma,
+ .enable = tmio_mmc_enable_dma,
+ .request = tmio_mmc_request_dma,
+ .release = tmio_mmc_release_dma,
+ .abort = tmio_mmc_abort_dma,
+};
+
+const struct tmio_mmc_dma_ops *tmio_mmc_get_dma_ops(void)
+{
+ return &tmio_mmc_dma_ops;
+}
#include "tmio_mmc.h"
+static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
+ struct mmc_data *data)
+{
+ if (host->dma_ops)
+ host->dma_ops->start(host, data);
+}
+
+static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+ if (host->dma_ops)
+ host->dma_ops->enable(host, enable);
+}
+
+static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
+ struct tmio_mmc_data *pdata)
+{
+ if (host->dma_ops) {
+ host->dma_ops->request(host, pdata);
+ } else {
+ host->chan_tx = NULL;
+ host->chan_rx = NULL;
+ }
+}
+
+static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
+{
+ if (host->dma_ops)
+ host->dma_ops->release(host);
+}
+
+static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+ if (host->dma_ops)
+ host->dma_ops->abort(host);
+}
+
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
+EXPORT_SYMBOL(tmio_mmc_enable_mmc_irqs);
void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
}
+EXPORT_SYMBOL(tmio_mmc_disable_mmc_irqs);
static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
schedule_work(&host->done);
}
+EXPORT_SYMBOL(tmio_mmc_do_data_irq);
static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
{
EXPORT_SYMBOL(tmio_mmc_host_free);
int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
- struct tmio_mmc_data *pdata)
+ struct tmio_mmc_data *pdata,
+ const struct tmio_mmc_dma_ops *dma_ops)
{
struct platform_device *pdev = _host->pdev;
struct mmc_host *mmc = _host->mmc;
INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
+ _host->dma_ops = dma_ops;
tmio_mmc_request_dma(_host, pdata);
pm_runtime_set_active(&pdev->dev);