#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/smc.h>
#include "dw_mmc.h"
#include "dw_mmc-pltfm.h"
#include "dw_mmc-exynos.h"
+extern int cal_pll_mmc_set_ssc(unsigned int mfr, unsigned int mrr, unsigned int ssc_on);
+extern int cal_pll_mmc_check(void);
+
+static void dw_mci_exynos_register_dump(struct dw_mci *host)
+{
+ dev_err(host->dev, ": EMMCP_BASE: 0x%08x\n",
+ host->sfr_dump->fmp_emmcp_base = mci_readl(host, EMMCP_BASE));
+ dev_err(host->dev, ": MPSECURITY: 0x%08x\n",
+ host->sfr_dump->mpsecurity = mci_readl(host, MPSECURITY));
+ dev_err(host->dev, ": MPSTAT: 0x%08x\n",
+ host->sfr_dump->mpstat = mci_readl(host, MPSTAT));
+ dev_err(host->dev, ": MPSBEGIN: 0x%08x\n",
+ host->sfr_dump->mpsbegin = mci_readl(host, MPSBEGIN0));
+ dev_err(host->dev, ": MPSEND: 0x%08x\n",
+ host->sfr_dump->mpsend = mci_readl(host, MPSEND0));
+ dev_err(host->dev, ": MPSCTRL: 0x%08x\n",
+ host->sfr_dump->mpsctrl = mci_readl(host, MPSCTRL0));
+ dev_err(host->dev, ": HS400_DQS_EN: 0x%08x\n",
+ host->sfr_dump->hs400_rdqs_en = mci_readl(host, HS400_DQS_EN));
+ dev_err(host->dev, ": HS400_ASYNC_FIFO_CTRL: 0x%08x\n",
+ host->sfr_dump->hs400_acync_fifo_ctrl = mci_readl(host, HS400_ASYNC_FIFO_CTRL));
+ dev_err(host->dev, ": HS400_DLINE_CTRL: 0x%08x\n",
+ host->sfr_dump->hs400_dline_ctrl = mci_readl(host, HS400_DLINE_CTRL));
+}
+
+void dw_mci_reg_dump(struct dw_mci *host)
+{
+ u32 reg;
+
+ dev_err(host->dev, ": ============== REGISTER DUMP ==============\n");
+ dev_err(host->dev, ": CTRL: 0x%08x\n", host->sfr_dump->contrl = mci_readl(host, CTRL));
+ dev_err(host->dev, ": PWREN: 0x%08x\n", host->sfr_dump->pwren = mci_readl(host, PWREN));
+ dev_err(host->dev, ": CLKDIV: 0x%08x\n",
+ host->sfr_dump->clkdiv = mci_readl(host, CLKDIV));
+ dev_err(host->dev, ": CLKSRC: 0x%08x\n",
+ host->sfr_dump->clksrc = mci_readl(host, CLKSRC));
+ dev_err(host->dev, ": CLKENA: 0x%08x\n",
+ host->sfr_dump->clkena = mci_readl(host, CLKENA));
+ dev_err(host->dev, ": TMOUT: 0x%08x\n", host->sfr_dump->tmout = mci_readl(host, TMOUT));
+ dev_err(host->dev, ": CTYPE: 0x%08x\n", host->sfr_dump->ctype = mci_readl(host, CTYPE));
+ dev_err(host->dev, ": BLKSIZ: 0x%08x\n",
+ host->sfr_dump->blksiz = mci_readl(host, BLKSIZ));
+ dev_err(host->dev, ": BYTCNT: 0x%08x\n",
+ host->sfr_dump->bytcnt = mci_readl(host, BYTCNT));
+ dev_err(host->dev, ": INTMSK: 0x%08x\n",
+ host->sfr_dump->intmask = mci_readl(host, INTMASK));
+ dev_err(host->dev, ": CMDARG: 0x%08x\n",
+ host->sfr_dump->cmdarg = mci_readl(host, CMDARG));
+ dev_err(host->dev, ": CMD: 0x%08x\n", host->sfr_dump->cmd = mci_readl(host, CMD));
+ dev_err(host->dev, ": RESP0: 0x%08x\n", mci_readl(host, RESP0));
+ dev_err(host->dev, ": RESP1: 0x%08x\n", mci_readl(host, RESP1));
+ dev_err(host->dev, ": RESP2: 0x%08x\n", mci_readl(host, RESP2));
+ dev_err(host->dev, ": RESP3: 0x%08x\n", mci_readl(host, RESP3));
+ dev_err(host->dev, ": MINTSTS: 0x%08x\n",
+ host->sfr_dump->mintsts = mci_readl(host, MINTSTS));
+ dev_err(host->dev, ": RINTSTS: 0x%08x\n",
+ host->sfr_dump->rintsts = mci_readl(host, RINTSTS));
+ dev_err(host->dev, ": STATUS: 0x%08x\n",
+ host->sfr_dump->status = mci_readl(host, STATUS));
+ dev_err(host->dev, ": FIFOTH: 0x%08x\n",
+ host->sfr_dump->fifoth = mci_readl(host, FIFOTH));
+ dev_err(host->dev, ": CDETECT: 0x%08x\n", mci_readl(host, CDETECT));
+ dev_err(host->dev, ": WRTPRT: 0x%08x\n", mci_readl(host, WRTPRT));
+ dev_err(host->dev, ": GPIO: 0x%08x\n", mci_readl(host, GPIO));
+ dev_err(host->dev, ": TCBCNT: 0x%08x\n",
+ host->sfr_dump->tcbcnt = mci_readl(host, TCBCNT));
+ dev_err(host->dev, ": TBBCNT: 0x%08x\n",
+ host->sfr_dump->tbbcnt = mci_readl(host, TBBCNT));
+ dev_err(host->dev, ": DEBNCE: 0x%08x\n", mci_readl(host, DEBNCE));
+ dev_err(host->dev, ": USRID: 0x%08x\n", mci_readl(host, USRID));
+ dev_err(host->dev, ": VERID: 0x%08x\n", mci_readl(host, VERID));
+ dev_err(host->dev, ": HCON: 0x%08x\n", mci_readl(host, HCON));
+ dev_err(host->dev, ": UHS_REG: 0x%08x\n",
+ host->sfr_dump->uhs_reg = mci_readl(host, UHS_REG));
+ dev_err(host->dev, ": BMOD: 0x%08x\n", host->sfr_dump->bmod = mci_readl(host, BMOD));
+ dev_err(host->dev, ": PLDMND: 0x%08x\n", mci_readl(host, PLDMND));
+ dev_err(host->dev, ": DBADDRL: 0x%08x\n",
+ host->sfr_dump->dbaddrl = mci_readl(host, DBADDRL));
+ dev_err(host->dev, ": DBADDRU: 0x%08x\n",
+ host->sfr_dump->dbaddru = mci_readl(host, DBADDRU));
+ dev_err(host->dev, ": DSCADDRL: 0x%08x\n",
+ host->sfr_dump->dscaddrl = mci_readl(host, DSCADDRL));
+ dev_err(host->dev, ": DSCADDRU: 0x%08x\n",
+ host->sfr_dump->dscaddru = mci_readl(host, DSCADDRU));
+ dev_err(host->dev, ": BUFADDR: 0x%08x\n",
+ host->sfr_dump->bufaddr = mci_readl(host, BUFADDR));
+ dev_err(host->dev, ": BUFADDRU: 0x%08x\n",
+ host->sfr_dump->bufaddru = mci_readl(host, BUFADDRU));
+ dev_err(host->dev, ": DBADDR: 0x%08x\n",
+ host->sfr_dump->dbaddr = mci_readl(host, DBADDR));
+ dev_err(host->dev, ": DSCADDR: 0x%08x\n",
+ host->sfr_dump->dscaddr = mci_readl(host, DSCADDR));
+ dev_err(host->dev, ": BUFADDR: 0x%08x\n",
+ host->sfr_dump->bufaddr = mci_readl(host, BUFADDR));
+ dev_err(host->dev, ": CLKSEL: 0x%08x\n",
+ host->sfr_dump->clksel = mci_readl(host, CLKSEL));
+ dev_err(host->dev, ": IDSTS: 0x%08x\n", mci_readl(host, IDSTS));
+ dev_err(host->dev, ": IDSTS64: 0x%08x\n",
+ host->sfr_dump->idsts64 = mci_readl(host, IDSTS64));
+ dev_err(host->dev, ": IDINTEN: 0x%08x\n", mci_readl(host, IDINTEN));
+ dev_err(host->dev, ": IDINTEN64: 0x%08x\n",
+ host->sfr_dump->idinten64 = mci_readl(host, IDINTEN64));
+ dev_err(host->dev, ": RESP_TAT: 0x%08x\n", mci_readl(host, RESP_TAT));
+ dev_err(host->dev, ": FORCE_CLK_STOP: 0x%08x\n",
+ host->sfr_dump->force_clk_stop = mci_readl(host, FORCE_CLK_STOP));
+ dev_err(host->dev, ": CDTHRCTL: 0x%08x\n", mci_readl(host, CDTHRCTL));
+ dw_mci_exynos_register_dump(host);
+ dev_err(host->dev, ": ============== STATUS DUMP ================\n");
+ dev_err(host->dev, ": cmd_status: 0x%08x\n",
+ host->sfr_dump->cmd_status = host->cmd_status);
+ dev_err(host->dev, ": data_status: 0x%08x\n",
+ host->sfr_dump->force_clk_stop = host->data_status);
+ dev_err(host->dev, ": pending_events: 0x%08lx\n",
+ host->sfr_dump->pending_events = host->pending_events);
+ dev_err(host->dev, ": completed_events:0x%08lx\n",
+ host->sfr_dump->completed_events = host->completed_events);
+ dev_err(host->dev, ": state: %d\n", host->sfr_dump->host_state = host->state);
+ dev_err(host->dev, ": gate-clk: %s\n",
+ atomic_read(&host->ciu_clk_cnt) ? "enable" : "disable");
+ dev_err(host->dev, ": ciu_en_win: %d\n", atomic_read(&host->ciu_en_win));
+ reg = mci_readl(host, CMD);
+ dev_err(host->dev, ": ================= CMD REG =================\n");
+ if ((reg >> 9) & 0x1) {
+ dev_err(host->dev, ": read/write : %s\n",
+ (reg & (0x1 << 10)) ? "write" : "read");
+ dev_err(host->dev, ": data expected : %d\n", (reg >> 9) & 0x1);
+ }
+ dev_err(host->dev, ": cmd index : %d\n",
+ host->sfr_dump->cmd_index = ((reg >> 0) & 0x3f));
+ reg = mci_readl(host, STATUS);
+ dev_err(host->dev, ": ================ STATUS REG ===============\n");
+ dev_err(host->dev, ": fifocount : %d\n",
+ host->sfr_dump->fifo_count = ((reg >> 17) & 0x1fff));
+ dev_err(host->dev, ": response index : %d\n", (reg >> 11) & 0x3f);
+ dev_err(host->dev, ": data state mc busy: %d\n", (reg >> 10) & 0x1);
+ dev_err(host->dev, ": data busy : %d\n",
+ host->sfr_dump->data_busy = ((reg >> 9) & 0x1));
+ dev_err(host->dev, ": data 3 state : %d\n",
+ host->sfr_dump->data_3_state = ((reg >> 8) & 0x1));
+ dev_err(host->dev, ": command fsm state : %d\n", (reg >> 4) & 0xf);
+ dev_err(host->dev, ": fifo full : %d\n", (reg >> 3) & 0x1);
+ dev_err(host->dev, ": fifo empty : %d\n", (reg >> 2) & 0x1);
+ dev_err(host->dev, ": fifo tx watermark : %d\n",
+ host->sfr_dump->fifo_tx_watermark = ((reg >> 1) & 0x1));
+ dev_err(host->dev, ": fifo rx watermark : %d\n",
+ host->sfr_dump->fifo_rx_watermark = ((reg >> 0) & 0x1));
+ dev_err(host->dev, ": ===========================================\n");
+}
+
/* Variations in Exynos specific dw-mshc controller */
enum dw_mci_exynos_type {
- DW_MCI_TYPE_EXYNOS4210,
- DW_MCI_TYPE_EXYNOS4412,
- DW_MCI_TYPE_EXYNOS5250,
- DW_MCI_TYPE_EXYNOS5420,
- DW_MCI_TYPE_EXYNOS5420_SMU,
- DW_MCI_TYPE_EXYNOS7,
- DW_MCI_TYPE_EXYNOS7_SMU,
+ DW_MCI_TYPE_EXYNOS,
};
static struct dw_mci_exynos_compatible {
- char *compatible;
- enum dw_mci_exynos_type ctrl_type;
+ char *compatible;
+ enum dw_mci_exynos_type ctrl_type;
} exynos_compat[] = {
{
- .compatible = "samsung,exynos4210-dw-mshc",
- .ctrl_type = DW_MCI_TYPE_EXYNOS4210,
- }, {
- .compatible = "samsung,exynos4412-dw-mshc",
- .ctrl_type = DW_MCI_TYPE_EXYNOS4412,
- }, {
- .compatible = "samsung,exynos5250-dw-mshc",
- .ctrl_type = DW_MCI_TYPE_EXYNOS5250,
- }, {
- .compatible = "samsung,exynos5420-dw-mshc",
- .ctrl_type = DW_MCI_TYPE_EXYNOS5420,
- }, {
- .compatible = "samsung,exynos5420-dw-mshc-smu",
- .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU,
- }, {
- .compatible = "samsung,exynos7-dw-mshc",
- .ctrl_type = DW_MCI_TYPE_EXYNOS7,
- }, {
- .compatible = "samsung,exynos7-dw-mshc-smu",
- .ctrl_type = DW_MCI_TYPE_EXYNOS7_SMU,
- },
-};
+.compatible = "samsung,exynos-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS,},};
static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
-
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
- return EXYNOS4412_FIXED_CIU_CLK_DIV;
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
- return EXYNOS4210_FIXED_CIU_CLK_DIV;
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL64)) + 1;
- else
- return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
+ return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
}
-static void dw_mci_exynos_config_smu(struct dw_mci *host)
+static int dw_mci_exynos_priv_init(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
- /*
- * If Exynos is provided the Security management,
- * set for non-ecryption mode at this time.
- */
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
- mci_writel(host, MPSBEGIN0, 0);
- mci_writel(host, MPSEND0, SDMMC_ENDING_SEC_NR_MAX);
- mci_writel(host, MPSCTRL0, SDMMC_MPSCTRL_SECURE_WRITE_BIT |
- SDMMC_MPSCTRL_NON_SECURE_READ_BIT |
- SDMMC_MPSCTRL_VALID |
- SDMMC_MPSCTRL_NON_SECURE_WRITE_BIT);
+ priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
+ priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN);
+ priv->saved_dqs_en |= AXI_NON_BLOCKING_WR;
+ mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en);
+ if (!priv->dqs_delay)
+ priv->dqs_delay = DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
+#if defined(CONFIG_MMC_DW_64BIT_DESC)
+ if (priv->voltage_int_extra != 0) {
+ u32 reg = 0;
+
+ reg = mci_readl(host, AXI_BURST_LEN);
+ reg &= ~(0x7 << 24);
+ reg |= (priv->voltage_int_extra << 24);
+ mci_writel(host, AXI_BURST_LEN, reg);
}
+#endif
+ return 0;
}
-static int dw_mci_exynos_priv_init(struct dw_mci *host)
+static void dw_mci_ssclk_control(struct dw_mci *host, int enable)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
-
- dw_mci_exynos_config_smu(host);
-
- if (priv->ctrl_type >= DW_MCI_TYPE_EXYNOS5420) {
- priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
- priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN);
- priv->saved_dqs_en |= AXI_NON_BLOCKING_WR;
- mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en);
- if (!priv->dqs_delay)
- priv->dqs_delay =
- DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
+#if 0
+ if (host->pdata->quirks & DW_MCI_QUIRK_USE_SSC) {
+ u32 err;
+
+ if (enable && cal_pll_mmc_check() == false) {
+ if (host->pdata->ssc_rate > 8) {
+ dev_info(host->dev, "unvalid SSC rate value.\n");
+ } else {
+ err = cal_pll_mmc_set_ssc(12, host->pdata->ssc_rate, 1);
+ if (err)
+ dev_info(host->dev, "SSC set fail.\n");
+ else
+ dev_info(host->dev, "SSC set enable.\n");
+ }
+ } else if (!enable && cal_pll_mmc_check() == true) {
+ err = cal_pll_mmc_set_ssc(0, 0, 0);
+ if (err)
+ dev_info(host->dev, "SSC set fail.\n");
+ else
+ dev_info(host->dev, "SSC set disable.\n");
+ }
}
-
- host->bus_hz /= (priv->ciu_div + 1);
-
- return 0;
+#endif
}
static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- clksel = mci_readl(host, CLKSEL64);
- else
- clksel = mci_readl(host, CLKSEL);
-
+ clksel = mci_readl(host, CLKSEL);
clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- mci_writel(host, CLKSEL64, clksel);
- else
- mci_writel(host, CLKSEL, clksel);
+ if (!((host->pdata->io_mode == MMC_TIMING_MMC_HS400) ||
+ (host->pdata->io_mode == MMC_TIMING_MMC_HS400_ES)))
+ clksel &= ~(BIT(30) | BIT(19));
- /*
- * Exynos4412 and Exynos5250 extends the use of CMD register with the
- * use of bit 29 (which is reserved on standard MSHC controllers) for
- * optionally bypassing the HOLD register for command and data. The
- * HOLD register should be bypassed in case there is no phase shift
- * applied on CMD/DATA that is sent to the card.
- */
- if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel) && host->slot)
- set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags);
+ mci_writel(host, CLKSEL, clksel);
}
#ifdef CONFIG_PM
static int dw_mci_exynos_runtime_resume(struct device *dev)
{
- struct dw_mci *host = dev_get_drvdata(dev);
- int ret;
-
- ret = dw_mci_runtime_resume(dev);
- if (ret)
- return ret;
-
- dw_mci_exynos_config_smu(host);
-
- return ret;
+ return dw_mci_runtime_resume(dev);
}
/**
static int dw_mci_exynos_resume_noirq(struct device *dev)
{
struct dw_mci *host = dev_get_drvdata(dev);
- struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- clksel = mci_readl(host, CLKSEL64);
- else
- clksel = mci_readl(host, CLKSEL);
+ clksel = mci_readl(host, CLKSEL);
- if (clksel & SDMMC_CLKSEL_WAKEUP_INT) {
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- mci_writel(host, CLKSEL64, clksel);
- else
- mci_writel(host, CLKSEL, clksel);
- }
+ if (clksel & SDMMC_CLKSEL_WAKEUP_INT)
+ mci_writel(host, CLKSEL, clksel);
return 0;
}
#else
#define dw_mci_exynos_resume_noirq NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM */
+
+static void dw_mci_card_int_hwacg_ctrl(struct dw_mci *host, u32 flag)
+{
+ u32 reg;
+
+ reg = mci_readl(host, FORCE_CLK_STOP);
+ if (flag == HWACG_Q_ACTIVE_EN) {
+ reg |= MMC_HWACG_CONTROL;
+ host->qactive_check = HWACG_Q_ACTIVE_EN;
+ } else {
+ reg &= ~(MMC_HWACG_CONTROL);
+ host->qactive_check = HWACG_Q_ACTIVE_DIS;
+ }
+ mci_writel(host, FORCE_CLK_STOP, reg);
+}
static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
{
* Not supported to configure register
* related to HS400
*/
- if (priv->ctrl_type < DW_MCI_TYPE_EXYNOS5420) {
- if (timing == MMC_TIMING_MMC_HS400)
- dev_warn(host->dev,
- "cannot configure HS400, unsupported chipset\n");
- return;
- }
dqs = priv->saved_dqs_en;
strobe = priv->saved_strobe_ctrl;
- if (timing == MMC_TIMING_MMC_HS400 ||
- timing == MMC_TIMING_MMC_HS400_ES) {
+ if (timing == MMC_TIMING_MMC_HS400 || timing == MMC_TIMING_MMC_HS400_ES) {
dqs &= ~(DWMCI_TXDT_CRC_TIMER_SET(0xFF, 0xFF));
- dqs |= (DWMCI_TXDT_CRC_TIMER_SET(priv->ddr200_tx_t_fastlimit,
- priv->ddr200_tx_t_initval) | DWMCI_RDDQS_EN |
- DWMCI_AXI_NON_BLOCKING_WRITE);
+ dqs |= (DWMCI_TXDT_CRC_TIMER_SET(priv->hs400_tx_t_fastlimit,
+ priv->hs400_tx_t_initval) | DWMCI_RDDQS_EN |
+ DWMCI_AXI_NON_BLOCKING_WRITE);
if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP) {
if (priv->delay_line || priv->tx_delay_line)
strobe = DWMCI_WD_DQS_DELAY_CTRL(priv->tx_delay_line) |
- DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
- DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
+ DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
+ DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
else
strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
- DWMCI_RD_DQS_DELAY_CTRL(90);
+ DWMCI_RD_DQS_DELAY_CTRL(90);
} else {
if (priv->delay_line)
strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
- DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
+ DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
else
strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
- DWMCI_RD_DQS_DELAY_CTRL(90);
+ DWMCI_RD_DQS_DELAY_CTRL(90);
}
dqs |= (DATA_STROBE_EN | DWMCI_AXI_NON_BLOCKING_WRITE);
if (timing == MMC_TIMING_MMC_HS400_ES)
unsigned long actual;
u8 div;
int ret;
+ u32 clock;
+
/*
* Don't care if wanted clock is zero or
* ciu clock is unavailable
if (wanted < EXYNOS_CCLKIN_MIN)
wanted = EXYNOS_CCLKIN_MIN;
- if (wanted == priv->cur_speed)
- return;
-
div = dw_mci_exynos_get_ciu_div(host);
+
+ if (wanted == priv->cur_speed) {
+ clock = clk_get_rate(host->ciu_clk);
+ if (clock == priv->cur_speed * div)
+ return;
+ }
+
ret = clk_set_rate(host->ciu_clk, wanted * div);
if (ret)
- dev_warn(host->dev,
- "failed to set clk-rate %u error: %d\n",
- wanted * div, ret);
+ dev_warn(host->dev, "failed to set clk-rate %u error: %d\n", wanted * div, ret);
actual = clk_get_rate(host->ciu_clk);
host->bus_hz = actual / div;
priv->cur_speed = wanted;
{
struct dw_mci_exynos_priv_data *priv = host->priv;
unsigned int wanted = ios->clock;
+ u32 *clk_tbl = priv->ref_clk;
u32 timing = ios->timing, clksel;
+ u32 cclkin;
+
+ cclkin = clk_tbl[timing];
+ host->pdata->io_mode = timing;
+ if (host->bus_hz != cclkin)
+ wanted = cclkin;
switch (timing) {
case MMC_TIMING_MMC_HS400:
case MMC_TIMING_MMC_HS400_ES:
- /* Update tuned sample timing */
- clksel = SDMMC_CLKSEL_UP_SAMPLE(
- priv->hs400_timing, priv->tuned_sample);
- wanted <<= 1;
+ if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP) {
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs400_ulp_timing, priv->tuned_sample);
+ clksel |= (BIT(30) | BIT(19)); /* ultra low powermode on */
+ } else {
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs400_timing, priv->tuned_sample);
+ clksel &= ~(BIT(30) | BIT(19)); /* ultra low powermode on */
+ wanted <<= 1;
+ }
+ if (host->pdata->is_fine_tuned)
+ clksel |= BIT(6);
break;
case MMC_TIMING_MMC_DDR52:
+ case MMC_TIMING_UHS_DDR50:
clksel = priv->ddr_timing;
/* Should be double rate for DDR mode */
if (ios->bus_width == MMC_BUS_WIDTH_8)
wanted <<= 1;
break;
+ case MMC_TIMING_MMC_HS200:
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs200_timing, priv->tuned_sample);
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ if (priv->sdr104_timing)
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr104_timing, priv->tuned_sample);
+ else {
+ dev_info(host->dev, "Setting of SDR104 timing in not been!!\n");
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr_timing, priv->tuned_sample);
+ }
+ dw_mci_ssclk_control(host, 1);
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ if (priv->sdr50_timing)
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr50_timing, priv->tuned_sample);
+ else {
+ dev_info(host->dev, "Setting of SDR50 timing is not been!!\n");
+ clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr_timing, priv->tuned_sample);
+ }
+ dw_mci_ssclk_control(host, 1);
+ break;
default:
clksel = priv->sdr_timing;
}
- /* Set clock timing for the requested speed mode*/
+ host->cclk_in = wanted;
+
+ /* Set clock timing for the requested speed mode */
dw_mci_exynos_set_clksel_timing(host, clksel);
/* Configure setting for HS400 */
dw_mci_exynos_adjust_clock(host, wanted);
}
+#ifndef MHZ
+#define MHZ (1000 * 1000)
+#endif
+
static int dw_mci_exynos_parse_dt(struct dw_mci *host)
{
struct dw_mci_exynos_priv_data *priv;
struct device_node *np = host->dev->of_node;
u32 timing[4];
- u32 div = 0;
+ u32 div = 0, voltage_int_extra = 0;
int idx;
- int ret;
+ u32 ref_clk_size;
+ u32 *ref_clk;
+ u32 *ciu_clkin_values = NULL;
+ int idx_ref;
+ int ret = 0;
+ int id = 0, i;
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ if (!priv) {
+ dev_err(host->dev, "mem alloc failed for private data\n");
return -ENOMEM;
+ }
for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
if (of_device_is_compatible(np, exynos_compat[idx].compatible))
priv->ctrl_type = exynos_compat[idx].ctrl_type;
}
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412)
- priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1;
- else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210)
- priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1;
- else {
- of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
- priv->ciu_div = div;
+ if (of_property_read_u32(np, "num-ref-clks", &ref_clk_size)) {
+ dev_err(host->dev, "Getting a number of referece clock failed\n");
+ ret = -ENODEV;
+ goto err_ref_clk;
+ }
+
+ ref_clk = devm_kzalloc(host->dev, ref_clk_size * sizeof(*ref_clk), GFP_KERNEL);
+ if (!ref_clk) {
+ dev_err(host->dev, "Mem alloc failed for reference clock table\n");
+ ret = -ENOMEM;
+ goto err_ref_clk;
+ }
+
+ ciu_clkin_values = devm_kzalloc(host->dev,
+ ref_clk_size * sizeof(*ciu_clkin_values), GFP_KERNEL);
+
+ if (!ciu_clkin_values) {
+ dev_err(host->dev, "Mem alloc failed for temporary clock values\n");
+ ret = -ENOMEM;
+ goto err_ref_clk;
+ }
+ if (of_property_read_u32_array(np, "ciu_clkin", ciu_clkin_values, ref_clk_size)) {
+ dev_err(host->dev, "Getting ciu_clkin values faild\n");
+ ret = -ENOMEM;
+ goto err_ref_clk;
+ }
+
+ for (idx_ref = 0; idx_ref < ref_clk_size; idx_ref++, ref_clk++, ciu_clkin_values++) {
+ if (*ciu_clkin_values > MHZ)
+ *(ref_clk) = (*ciu_clkin_values);
+ else
+ *(ref_clk) = (*ciu_clkin_values) * MHZ;
}
- ret = of_property_read_u32_array(np,
- "samsung,dw-mshc-sdr-timing", timing, 4);
+ ref_clk -= ref_clk_size;
+ ciu_clkin_values -= ref_clk_size;
+ priv->ref_clk = ref_clk;
+
+ if (of_get_property(np, "card-detect", NULL))
+ priv->cd_gpio = of_get_named_gpio(np, "card-detect", 0);
+ else
+ priv->cd_gpio = -1;
+
+ /* Swapping clock drive strength */
+ of_property_read_u32(np, "clk-drive-number", &priv->clk_drive_number);
+
+ priv->pinctrl = devm_pinctrl_get(host->dev);
+
+ if (IS_ERR(priv->pinctrl)) {
+ priv->pinctrl = NULL;
+ } else {
+ priv->clk_drive_base = pinctrl_lookup_state(priv->pinctrl, "default");
+ priv->clk_drive_str[0] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-1x");
+ priv->clk_drive_str[1] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-2x");
+ priv->clk_drive_str[2] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-3x");
+ priv->clk_drive_str[3] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-4x");
+ priv->clk_drive_str[4] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-5x");
+ priv->clk_drive_str[5] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-6x");
+
+ for (i = 0; i < 6; i++) {
+ if (IS_ERR(priv->clk_drive_str[i]))
+ priv->clk_drive_str[i] = NULL;
+ }
+ }
+
+ of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
+ priv->ciu_div = div;
+
+ if (of_property_read_u32(np, "samsung,voltage-int-extra", &voltage_int_extra))
+ priv->voltage_int_extra = voltage_int_extra;
+
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr-timing", timing, 4);
if (ret)
return ret;
priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
- ret = of_property_read_u32_array(np,
- "samsung,dw-mshc-ddr-timing", timing, 4);
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-ddr-timing", timing, 4);
if (ret)
return ret;
priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
- ret = of_property_read_u32_array(np,
- "samsung,dw-mshc-hs400-timing", timing, 4);
- if (!ret && of_property_read_u32(np,
- "samsung,read-strobe-delay", &priv->dqs_delay))
- dev_dbg(host->dev,
- "read-strobe-delay is not found, assuming usage of default value\n");
+ of_property_read_u32(np, "ignore-phase", &priv->ignore_phase);
+ if (of_find_property(np, "bypass-for-allpass", NULL))
+ priv->ctrl_flag |= DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS;
+ if (of_find_property(np, "use-enable-shift", NULL))
+ priv->ctrl_flag |= DW_MMC_EXYNOS_ENABLE_SHIFT;
+
+ id = of_alias_get_id(host->dev->of_node, "mshc");
+ switch (id) {
+ /* dwmmc0 : eMMC */
+ case 0:
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs200-timing", timing, 4);
+ if (ret)
+ goto err_ref_clk;
+ priv->hs200_timing =
+ SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs400-timing", timing, 4);
+ if (ret)
+ goto err_ref_clk;
+
+ priv->hs400_timing =
+ SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs400-ulp-timing", timing, 4);
+ if (!ret)
+ priv->hs400_ulp_timing =
+ SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+ else
+ ret = 0;
+
+ /* Rx Delay Line */
+ of_property_read_u32(np, "samsung,dw-mshc-hs400-delay-line", &priv->delay_line);
- priv->hs400_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+ /* Tx Delay Line */
+ of_property_read_u32(np,
+ "samsung,dw-mshc-hs400-tx-delay-line", &priv->tx_delay_line);
+ /* The fast RXCRC packet arrival time */
+ of_property_read_u32(np,
+ "samsung,dw-mshc-txdt-crc-timer-fastlimit",
+ &priv->hs400_tx_t_fastlimit);
+
+ /* Initial value of the timeout down counter for RXCRC packet */
+ of_property_read_u32(np,
+ "samsung,dw-mshc-txdt-crc-timer-initval",
+ &priv->hs400_tx_t_initval);
+ break;
+ /* dwmmc1 : SDIO */
+ case 1:
+ /* dwmmc2 : SD Card */
+ case 2:
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr50-timing", timing, 4); /* SDR50 100Mhz */
+ if (!ret)
+ priv->sdr50_timing =
+ SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+ else {
+ priv->sdr50_timing = priv->sdr_timing;
+ ret = 0;
+ }
+
+ ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr104-timing", timing, 4); /* SDR104 200mhz */
+ if (!ret)
+ priv->sdr104_timing =
+ SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
+ else {
+ priv->sdr104_timing = priv->sdr_timing;
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -ENODEV;
+ }
host->priv = priv;
- return 0;
+ err_ref_clk:
+ return ret;
}
static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
-
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL64));
- else
- return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
+ return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
}
static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
{
u32 clksel;
- struct dw_mci_exynos_priv_data *priv = host->priv;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- clksel = mci_readl(host, CLKSEL64);
- else
- clksel = mci_readl(host, CLKSEL);
+ clksel = mci_readl(host, CLKSEL);
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- mci_writel(host, CLKSEL64, clksel);
- else
- mci_writel(host, CLKSEL, clksel);
+ mci_writel(host, CLKSEL, clksel);
}
static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
{
- struct dw_mci_exynos_priv_data *priv = host->priv;
u32 clksel;
u8 sample;
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- clksel = mci_readl(host, CLKSEL64);
+ clksel = mci_readl(host, CLKSEL);
+ sample = (clksel + 1) & 0x7;
+ clksel = (clksel & ~0x7) | sample;
+ mci_writel(host, CLKSEL, clksel);
+ return sample;
+}
+
+static void dw_mci_set_quirk_endbit(struct dw_mci *host, s8 mid)
+{
+ u32 clksel, phase;
+ u32 shift;
+
+ clksel = mci_readl(host, CLKSEL);
+ phase = (((clksel >> 24) & 0x7) + 1) << 1;
+ shift = 360 / phase;
+
+ if (host->verid < DW_MMC_260A && (shift * mid) % 360 >= 225)
+ host->quirks |= DW_MCI_QUIRK_NO_DETECT_EBIT;
else
- clksel = mci_readl(host, CLKSEL);
+ host->quirks &= ~DW_MCI_QUIRK_NO_DETECT_EBIT;
+}
+
+static void dw_mci_exynos_set_enable_shift(struct dw_mci *host, u32 sample, bool fine_tune)
+{
+ u32 i, j, en_shift, en_shift_phase[3][4] = { {0, 0, 1, 0},
+ {1, 2, 3, 3},
+ {2, 4, 5, 5}
+ };
+
+ en_shift = mci_readl(host, HS400_ENABLE_SHIFT)
+ & ~(DWMCI_ENABLE_SHIFT_MASK);
+
+ for (i = 0; i < 3; i++) {
+ for (j = 1; j < 4; j++) {
+ if (sample == en_shift_phase[i][j]) {
+ en_shift |= DWMCI_ENABLE_SHIFT(en_shift_phase[i][0]);
+ break;
+ }
+ }
+ }
+ if ((en_shift < 2) && fine_tune)
+ en_shift += 1;
+ mci_writel(host, HS400_ENABLE_SHIFT, en_shift);
+}
+static u8 dw_mci_tuning_sampling(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel, i;
+ u8 sample;
+
+ clksel = mci_readl(host, CLKSEL);
sample = (clksel + 1) & 0x7;
clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
- if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
- priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU)
- mci_writel(host, CLKSEL64, clksel);
+ if (priv->ignore_phase) {
+ for (i = 0; i < 8; i++) {
+ if (priv->ignore_phase & (0x1 << sample))
+ sample = (sample + 1) & 0x7;
+ else
+ break;
+ }
+ }
+ clksel = (clksel & 0xfffffff8) | sample;
+ mci_writel(host, CLKSEL, clksel);
+
+ if (phase6_en & (0x1 << sample) || phase7_en & (0x1 << sample))
+ sample_path_sel_en(host, AXI_BURST_LEN);
else
- mci_writel(host, CLKSEL, clksel);
+ sample_path_sel_dis(host, AXI_BURST_LEN);
+
+ if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT)
+ dw_mci_exynos_set_enable_shift(host, sample, false);
return sample;
}
-static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+/* initialize the clock sample to given value */
+static void dw_mci_exynos_set_sample(struct dw_mci *host, u32 sample, bool tuning)
{
- const u8 iter = 8;
- u8 __c;
- s8 i, loc = -1;
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel;
- for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
- if ((__c & 0xc7) == 0xc7) {
- loc = i;
- goto out;
- }
+ clksel = mci_readl(host, CLKSEL);
+ clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample);
+ mci_writel(host, CLKSEL, clksel);
+ if (sample == 6 || sample == 7)
+ sample_path_sel_en(host, AXI_BURST_LEN);
+ else
+ sample_path_sel_dis(host, AXI_BURST_LEN);
+
+ if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT)
+ dw_mci_exynos_set_enable_shift(host, sample, false);
+ if (!tuning)
+ dw_mci_set_quirk_endbit(host, clksel);
+}
+
+static void dw_mci_set_fine_tuning_bit(struct dw_mci *host, bool is_fine_tuning)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 clksel, sample;
+
+ clksel = mci_readl(host, CLKSEL);
+ clksel = (clksel & ~BIT(6));
+ sample = (clksel & 0x7);
+
+ if (is_fine_tuning) {
+ host->pdata->is_fine_tuned = true;
+ clksel |= BIT(6);
+ } else
+ host->pdata->is_fine_tuned = false;
+ mci_writel(host, CLKSEL, clksel);
+ if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT) {
+ if (((sample % 2) == 1) && is_fine_tuning && sample != 0x7)
+ dw_mci_exynos_set_enable_shift(host, sample, true);
+ else
+ dw_mci_exynos_set_enable_shift(host, sample, false);
}
+}
- for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
- if ((__c & 0x83) == 0x83) {
- loc = i;
- goto out;
- }
+/* read current clock sample offset */
+static u32 dw_mci_exynos_get_sample(struct dw_mci *host)
+{
+ u32 clksel = mci_readl(host, CLKSEL);
+
+ return SDMMC_CLKSEL_CCLK_SAMPLE(clksel);
+}
+
+static int __find_median_of_16bits(u32 orig_bits, u16 mask, u8 startbit)
+{
+ u32 i, testbits;
+
+ testbits = orig_bits;
+ for (i = startbit; i < (16 + startbit); i++, testbits >>= 1)
+ if ((testbits & mask) == mask)
+ return SDMMC_CLKSEL_CCLK_FINE_SAMPLE(i);
+ return -1;
+}
+
+#define NUM_OF_MASK 7
+static int find_median_of_16bits(struct dw_mci *host, unsigned int map, bool force)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+ u32 orig_bits;
+ u8 i, divratio;
+ int sel = -1;
+ u16 mask[NUM_OF_MASK] = { 0x1fff, 0x7ff, 0x1ff, 0x7f, 0x1f, 0xf, 0x7 };
+ /* Tuning during the center value is set to 3/2 */
+ int optimum[NUM_OF_MASK] = { 9, 7, 6, 5, 3, 2, 1 };
+
+ /* replicate the map so "arithimetic shift right" shifts in
+ * the same bits "again". e.g. portable "Rotate Right" bit operation.
+ */
+ if (map == 0xFFFF && force == false)
+ return sel;
+
+ divratio = (mci_readl(host, CLKSEL) >> 24) & 0x7;
+ dev_info(host->dev, "divratio: %d map: 0x %08x\n", divratio, map);
+
+ orig_bits = map | (map << 16);
+
+ if (divratio == 1) {
+ if (!(priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT))
+ orig_bits = orig_bits & (orig_bits >> 8);
+ }
+
+ for (i = 0; i < NUM_OF_MASK; i++) {
+ sel = __find_median_of_16bits(orig_bits, mask[i], optimum[i]);
+ if (-1 != sel)
+ break;
}
-out:
- return loc;
+ return sel;
}
-static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
+static void exynos_dwmci_tuning_drv_st(struct dw_mci *host)
+{
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ dev_info(host->dev, "Clock GPIO Drive Strength Value: x%d\n", (priv->clk_drive_tuning));
+
+ if (priv->pinctrl && priv->clk_drive_str[priv->clk_drive_tuning - 1])
+ pinctrl_select_state(priv->pinctrl,
+ priv->clk_drive_str[priv->clk_drive_tuning - 1]);
+}
+
+/*
+ * Test all 8 possible "Clock in" Sample timings.
+ * Create a bitmap of which CLock sample values work and find the "median"
+ * value. Apply it and remember that we found the best value.
+ */
+static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
+ struct dw_mci_tuning_data *tuning_data)
{
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
- u8 start_smpl, smpl, candiates = 0;
- s8 found = -1;
+ unsigned int tuning_loop = MAX_TUNING_LOOP;
+ unsigned int drv_str_retries;
+ bool tuned = 0;
int ret = 0;
+ u8 *tuning_blk; /* data read from device */
+
+ unsigned int sample_good = 0; /* bit map of clock sample (0-7) */
+ u32 test_sample = -1;
+ u32 orig_sample;
+ int best_sample = 0, best_sample_ori = 0;
+ u8 pass_index;
+ bool is_fine_tuning = false;
+ unsigned int abnormal_result = 0xFFFF;
+ unsigned int temp_ignore_phase = priv->ignore_phase;
+ int ffs_ignore_phase = 0;
+ u8 all_pass_count = 0;
+ bool bypass = false;
+
+ while (temp_ignore_phase) {
+ ffs_ignore_phase = ffs(temp_ignore_phase) - 1;
+ abnormal_result &= ~(0x3 << (2 * ffs_ignore_phase));
+ temp_ignore_phase &= ~(0x1 << ffs_ignore_phase);
+ }
- start_smpl = dw_mci_exynos_get_clksmpl(host);
+ /* Short circuit: don't tune again if we already did. */
+ if (host->pdata->tuned) {
+ host->drv_data->misc_control(host, CTRL_RESTORE_CLKSEL, NULL);
+ mci_writel(host, CDTHRCTL, host->cd_rd_thr << 16 | 1);
+ dev_info(host->dev, "EN_SHIFT 0x %08x CLKSEL 0x %08x\n",
+ mci_readl(host, HS400_ENABLE_SHIFT), mci_readl(host, CLKSEL));
+ return 0;
+ }
+
+ tuning_blk = kmalloc(2 * tuning_data->blksz, GFP_KERNEL);
+ if (!tuning_blk)
+ return -ENOMEM;
+
+ test_sample = orig_sample = dw_mci_exynos_get_sample(host);
+ host->cd_rd_thr = 512;
+ mci_writel(host, CDTHRCTL, host->cd_rd_thr << 16 | 1);
+
+ /*
+ * eMMC 4.5 spec section 6.6.7.1 says the device is guaranteed to
+ * complete 40 iteration of CMD21 in 150ms. So this shouldn't take
+ * longer than about 30ms or so....at least assuming most values
+ * work and don't time out.
+ */
+
+ if (host->pdata->io_mode == MMC_TIMING_MMC_HS400)
+ host->quirks |= DW_MCI_QUIRK_NO_DETECT_EBIT;
+
+ dev_info(host->dev, "Tuning Abnormal_result 0x%08x.\n", abnormal_result);
+
+ priv->clk_drive_tuning = priv->clk_drive_number;
+ drv_str_retries = priv->clk_drive_number;
do {
- mci_writel(host, TMOUT, ~0);
- smpl = dw_mci_exynos_move_next_clksmpl(host);
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+ struct scatterlist sg;
+
+ if (!tuning_loop)
+ break;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+ cmd.error = 0;
+ cmd.busy_timeout = 10; /* 2x * (150ms/40 + setup overhead) */
+
+ memset(&stop, 0, sizeof(stop));
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ stop.error = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.blksz = tuning_data->blksz;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.error = 0;
+
+ memset(tuning_blk, ~0U, tuning_data->blksz);
+ sg_init_one(&sg, tuning_blk, tuning_data->blksz);
+
+ memset(&mrq, 0, sizeof(mrq));
+ mrq.cmd = &cmd;
+ mrq.stop = &stop;
+ mrq.data = &data;
+ host->mrq = &mrq;
+
+ /*
+ * DDR200 tuning Sequence with fine tuning setup
+ *
+ * 0. phase 0 (0 degree) + no fine tuning setup
+ * - pass_index = 0
+ * 1. phase 0 + fine tuning setup
+ * - pass_index = 1
+ * 2. phase 1 (90 degree) + no fine tuning setup
+ * - pass_index = 2
+ * ..
+ * 15. phase 7 + fine tuning setup
+ * - pass_index = 15
+ *
+ */
+ dw_mci_set_fine_tuning_bit(host, is_fine_tuning);
+
+ dw_mci_set_timeout(host, dw_mci_calc_timeout(host));
+ mmc_wait_for_req(mmc, &mrq);
+
+ pass_index = (u8) test_sample * 2;
+
+ if (is_fine_tuning)
+ pass_index++;
+
+ if (!cmd.error && !data.error) {
+ /*
+ * Verify the "tuning block" arrived (to host) intact.
+ * If yes, remember this sample value works.
+ */
+ if (host->use_dma == 1) {
+ sample_good |= (1 << pass_index);
+ } else {
+ if (!memcmp
+ (tuning_data->blk_pattern, tuning_blk, tuning_data->blksz))
+ sample_good |= (1 << pass_index);
+ }
+ } else {
+ dev_info(&mmc->class_dev,
+ "Tuning error: cmd.error:%d, data.error:%d CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
+ cmd.error, data.error,
+ mci_readl(host, CLKSEL), mci_readl(host, HS400_ENABLE_SHIFT));
+ }
- if (!mmc_send_tuning(mmc, opcode, NULL))
- candiates |= (1 << smpl);
+ if (is_fine_tuning)
+ test_sample = dw_mci_tuning_sampling(host);
+
+ is_fine_tuning = !is_fine_tuning;
+
+ if (orig_sample == test_sample && !is_fine_tuning) {
+
+ /*
+ * Get at middle clock sample values.
+ */
+ if (sample_good == abnormal_result)
+ all_pass_count++;
+
+ if (priv->ctrl_flag & DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS)
+ bypass = (all_pass_count > priv->clk_drive_number) ? true : false;
+
+ if (bypass) {
+ dev_info(host->dev, "Bypassed for all pass at %d times\n",
+ priv->clk_drive_number);
+ sample_good = abnormal_result & 0xFFFF;
+ tuned = true;
+ }
+
+ best_sample = find_median_of_16bits(host, sample_good, bypass);
+
+ if (best_sample >= 0) {
+ dev_info(host->dev, "sample_good: 0x%02x best_sample: 0x%02x\n",
+ sample_good, best_sample);
+
+ if (sample_good != abnormal_result || bypass) {
+ tuned = true;
+ break;
+ }
+ } else
+ dev_info(host->dev,
+ "Failed to find median value in sample good (0x%02x)\n",
+ sample_good);
+
+ if (drv_str_retries) {
+ drv_str_retries--;
+ if (priv->clk_drive_str[0]) {
+ exynos_dwmci_tuning_drv_st(host);
+ if (priv->clk_drive_tuning > 0)
+ priv->clk_drive_tuning--;
+ }
+ sample_good = 0;
+ } else
+ break;
+ }
+ tuning_loop--;
+ } while (!tuned);
- } while (start_smpl != smpl);
+ /*
+ * To set sample value with mid, the value should be divided by 2,
+ * because mid represents index in pass map extended.(8 -> 16 bits)
+ * And that mid is odd number, means the selected case includes
+ * using fine tuning.
+ */
+
+ best_sample_ori = best_sample;
+ best_sample /= 2;
+
+ if (host->pdata->io_mode == MMC_TIMING_MMC_HS400)
+ host->quirks &= ~DW_MCI_QUIRK_NO_DETECT_EBIT;
- found = dw_mci_exynos_get_best_clksmpl(candiates);
- if (found >= 0) {
- dw_mci_exynos_set_clksmpl(host, found);
- priv->tuned_sample = found;
+ if (tuned) {
+ host->pdata->clk_smpl = priv->tuned_sample = best_sample;
+ if (host->pdata->only_once_tune)
+ host->pdata->tuned = true;
+
+ if (best_sample_ori % 2)
+ best_sample += 1;
+
+ dw_mci_exynos_set_sample(host, best_sample, false);
+ dw_mci_set_fine_tuning_bit(host, false);
} else {
+ /* Failed. Just restore and return error */
+ dev_err(host->dev, "tuning err\n");
+ mci_writel(host, CDTHRCTL, 0 << 16 | 0);
+ dw_mci_exynos_set_sample(host, orig_sample, false);
ret = -EIO;
}
+ /* Rollback Clock drive strength */
+ if (priv->pinctrl && priv->clk_drive_base)
+ pinctrl_select_state(priv->pinctrl, priv->clk_drive_base);
+
+ dev_info(host->dev, "CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
+ mci_readl(host, CLKSEL), mci_readl(host, HS400_ENABLE_SHIFT));
+
+ kfree(tuning_blk);
return ret;
}
-static int dw_mci_exynos_prepare_hs400_tuning(struct dw_mci *host,
- struct mmc_ios *ios)
+static int dw_mci_exynos_request_ext_irq(struct dw_mci *host, irq_handler_t func)
{
struct dw_mci_exynos_priv_data *priv = host->priv;
-
- dw_mci_exynos_set_clksel_timing(host, priv->hs400_timing);
- dw_mci_exynos_adjust_clock(host, (ios->clock) << 1);
+ int ext_cd_irq = 0;
+
+ if (gpio_is_valid(priv->cd_gpio) && !gpio_request(priv->cd_gpio, "DWMCI_EXT_CD")) {
+ ext_cd_irq = gpio_to_irq(priv->cd_gpio);
+ if (ext_cd_irq &&
+ devm_request_irq(host->dev, ext_cd_irq, func,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT, "tflash_det", host) == 0) {
+ dev_info(host->dev, "success to request irq for card detect.\n");
+ enable_irq_wake(ext_cd_irq);
+ } else
+ dev_info(host->dev, "cannot request irq for card detect.\n");
+ }
return 0;
}
+static int dw_mci_exynos_check_cd(struct dw_mci *host)
+{
+ int ret = -1;
+ struct dw_mci_exynos_priv_data *priv = host->priv;
+
+ if (gpio_is_valid(priv->cd_gpio)) {
+ if (host->pdata->use_gpio_invert)
+ ret = gpio_get_value(priv->cd_gpio) ? 1 : 0;
+ else
+ ret = gpio_get_value(priv->cd_gpio) ? 0 : 1;
+ }
+ return ret;
+}
+
/* Common capabilities of Exynos4/Exynos5 SoC */
static unsigned long exynos_dwmmc_caps[4] = {
MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
MMC_CAP_CMD23,
};
+static int dw_mci_exynos_misc_control(struct dw_mci *host,
+ enum dw_mci_misc_control control, void *priv)
+{
+ int ret = 0;
+
+ switch (control) {
+ case CTRL_RESTORE_CLKSEL:
+ dw_mci_exynos_set_sample(host, host->pdata->clk_smpl, false);
+ dw_mci_set_fine_tuning_bit(host, host->pdata->is_fine_tuned);
+ break;
+ case CTRL_REQUEST_EXT_IRQ:
+ ret = dw_mci_exynos_request_ext_irq(host, (irq_handler_t) priv);
+ break;
+ case CTRL_CHECK_CD:
+ ret = dw_mci_exynos_check_cd(host);
+ break;
+ default:
+ dev_err(host->dev, "dw_mmc exynos: wrong case\n");
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_MMC_DW_EXYNOS_FMP
+static int dw_mci_exynos_crypto_engine_cfg(struct dw_mci *host,
+ void *desc,
+ struct mmc_data *data,
+ struct page *page, int sector_offset, bool cmdq_enabled)
+{
+ return exynos_mmc_fmp_cfg(host, desc, data, page, sector_offset, cmdq_enabled);
+}
+
+static int dw_mci_exynos_crypto_engine_clear(struct dw_mci *host, void *desc, bool cmdq_enabled)
+{
+ return exynos_mmc_fmp_clear(host, desc, cmdq_enabled);
+}
+
+static int dw_mci_exynos_access_control_get_dev(struct dw_mci *host)
+{
+ return exynos_mmc_smu_get_dev(host);
+}
+
+static int dw_mci_exynos_access_control_sec_cfg(struct dw_mci *host)
+{
+ return exynos_mmc_smu_sec_cfg(host);
+}
+
+static int dw_mci_exynos_access_control_init(struct dw_mci *host)
+{
+ return exynos_mmc_smu_init(host);
+}
+
+static int dw_mci_exynos_access_control_abort(struct dw_mci *host)
+{
+ return exynos_mmc_smu_abort(host);
+}
+
+static int dw_mci_exynos_access_control_resume(struct dw_mci *host)
+{
+ return exynos_mmc_smu_resume(host);
+}
+#endif
+
static const struct dw_mci_drv_data exynos_drv_data = {
- .caps = exynos_dwmmc_caps,
+ .caps = exynos_dwmmc_caps,
.num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
- .init = dw_mci_exynos_priv_init,
- .set_ios = dw_mci_exynos_set_ios,
- .parse_dt = dw_mci_exynos_parse_dt,
- .execute_tuning = dw_mci_exynos_execute_tuning,
- .prepare_hs400_tuning = dw_mci_exynos_prepare_hs400_tuning,
+ .init = dw_mci_exynos_priv_init,
+ .set_ios = dw_mci_exynos_set_ios,
+ .parse_dt = dw_mci_exynos_parse_dt,
+ .execute_tuning = dw_mci_exynos_execute_tuning,
+ .hwacg_control = dw_mci_card_int_hwacg_ctrl,
+ .misc_control = dw_mci_exynos_misc_control,
+#ifdef CONFIG_MMC_DW_EXYNOS_FMP
+ .crypto_engine_cfg = dw_mci_exynos_crypto_engine_cfg,
+ .crypto_engine_clear = dw_mci_exynos_crypto_engine_clear,
+ .access_control_get_dev = dw_mci_exynos_access_control_get_dev,
+ .access_control_sec_cfg = dw_mci_exynos_access_control_sec_cfg,
+ .access_control_init = dw_mci_exynos_access_control_init,
+ .access_control_abort = dw_mci_exynos_access_control_abort,
+ .access_control_resume = dw_mci_exynos_access_control_resume,
+#endif
+
+ .ssclk_control = dw_mci_ssclk_control,
};
static const struct of_device_id dw_mci_exynos_match[] = {
- { .compatible = "samsung,exynos4412-dw-mshc",
- .data = &exynos_drv_data, },
- { .compatible = "samsung,exynos5250-dw-mshc",
- .data = &exynos_drv_data, },
- { .compatible = "samsung,exynos5420-dw-mshc",
- .data = &exynos_drv_data, },
- { .compatible = "samsung,exynos5420-dw-mshc-smu",
- .data = &exynos_drv_data, },
- { .compatible = "samsung,exynos7-dw-mshc",
- .data = &exynos_drv_data, },
- { .compatible = "samsung,exynos7-dw-mshc-smu",
- .data = &exynos_drv_data, },
+ {.compatible = "samsung,exynos-dw-mshc",
+ .data = &exynos_drv_data,},
{},
};
+
MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
static int dw_mci_exynos_probe(struct platform_device *pdev)
static const struct dev_pm_ops dw_mci_exynos_pmops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
- dw_mci_exynos_runtime_resume,
- NULL)
- .resume_noirq = dw_mci_exynos_resume_noirq,
+ SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
+ dw_mci_exynos_runtime_resume,
+ NULL)
+ .resume_noirq = dw_mci_exynos_resume_noirq,
.thaw_noirq = dw_mci_exynos_resume_noirq,
.restore_noirq = dw_mci_exynos_resume_noirq,
};
static struct platform_driver dw_mci_exynos_pltfm_driver = {
- .probe = dw_mci_exynos_probe,
- .remove = dw_mci_exynos_remove,
- .driver = {
- .name = "dwmmc_exynos",
- .of_match_table = dw_mci_exynos_match,
- .pm = &dw_mci_exynos_pmops,
- },
+ .probe = dw_mci_exynos_probe,
+ .remove = dw_mci_exynos_remove,
+ .driver = {
+ .name = "dwmmc_exynos",
+ .of_match_table = dw_mci_exynos_match,
+ .pm = &dw_mci_exynos_pmops,
+ },
};
module_platform_driver(dw_mci_exynos_pltfm_driver);
#include <linux/mmc/sdio.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/smc.h>
+
+#include <soc/samsung/exynos-pm.h>
+#include <soc/samsung/exynos-powermode.h>
+#include <soc/samsung/exynos-cpupm.h>
#include "dw_mmc.h"
+#include "dw_mmc-exynos.h"
+#include "../core/queue.h"
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
#define DW_MCI_DMA_THRESHOLD 16
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
-#define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
+#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
-#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
- SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
- SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
- SDMMC_IDMAC_INT_TI)
+/* Each descriptor can transfer up to 4KB of data in chained mode */
+#define DW_MCI_DESC_DATA_LENGTH 0x1000
#define DESC_RING_BUF_SZ PAGE_SIZE
-struct idmac_desc_64addr {
- u32 des0; /* Control Descriptor */
-#define IDMAC_OWN_CLR64(x) \
- !((x) & cpu_to_le32(IDMAC_DES0_OWN))
+static bool dw_mci_reset(struct dw_mci *host);
+
+static int dw_mci_card_busy(struct mmc_host *mmc);
+bool dw_mci_fifo_reset(struct device *dev, struct dw_mci *host);
+void dw_mci_ciu_reset(struct device *dev, struct dw_mci *host);
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
+extern int fmp_mmc_map_sg(struct dw_mci *host, struct idmac_desc_64addr *desc, int idx,
+ uint32_t sector_key, uint32_t sector, struct mmc_data *data);
+static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq);
+static struct workqueue_struct *pm_workqueue;
+#if defined(CONFIG_MMC_DW_DEBUG)
+static struct dw_mci_debug_data dw_mci_debug __cacheline_aligned;
+
+/* Add sysfs for read cmd_logs */
+static ssize_t dw_mci_debug_log_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ ssize_t total_len = 0;
+ int j = 0, k = 0;
+ struct dw_mci_cmd_log *cmd_log;
+ unsigned int offset;
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct dw_mci *host = dw_mci_debug.host[mmc->index];
+ /*
+ * print cmd_log from prev. 14 to last
+ */
+ if (host->debug_info->en_logging & DW_MCI_DEBUG_ON_CMD) {
+ offset = atomic_read(&host->debug_info->cmd_log_count) - 13;
+ offset &= DWMCI_LOG_MAX - 1;
+ total_len += snprintf(buf, PAGE_SIZE, "HOST%1d\n", mmc->index);
+ buf += (sizeof(char) * 6);
+ cmd_log = host->debug_info->cmd_log;
+ for (j = 0; j < 14; j++) {
+ total_len += snprintf(buf + (sizeof(char) * 71 * j) +
+ (sizeof(char) * (2 * k + 6 * (k + 1))), PAGE_SIZE,
+ "%04d:%2d,0x%08x,%04d,%016llu,%016llu,%02x,%04x,%03d.\n",
+ offset,
+ cmd_log[offset].cmd, cmd_log[offset].arg,
+ cmd_log[offset].data_size, cmd_log[offset].send_time,
+ cmd_log[offset].done_time, cmd_log[offset].seq_status,
+ cmd_log[offset].rint_sts,
+ cmd_log[offset].status_count);
+ offset++;
+ }
+ total_len += snprintf(buf + (sizeof(char) * 2), PAGE_SIZE, "\n\n");
+ k++;
+ }
- u32 des1; /* Reserved */
+ return total_len;
+}
- u32 des2; /*Buffer sizes */
-#define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
- ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
- ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
+static ssize_t dw_mci_debug_log_control(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ int enable = 0;
+ int ret = 0;
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct dw_mci *host = dw_mci_debug.host[mmc->index];
- u32 des3; /* Reserved */
+ ret = kstrtoint(buf, 0, &enable);
+ if (ret)
+ goto out;
+ host->debug_info->en_logging = enable;
+ printk("%s: en_logging is %d.\n",
+ mmc_hostname(host->slot->mmc), host->debug_info->en_logging);
+ out:
+ return len;
+}
- u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
- u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
+static DEVICE_ATTR(dwmci_debug, 0644, dw_mci_debug_log_show, dw_mci_debug_log_control);
- u32 des6; /* Lower 32-bits of Next Descriptor Address */
- u32 des7; /* Upper 32-bits of Next Descriptor Address */
-};
+/*
+ * new_cmd : has to be true Only send_command.(except CMD13)
+ * flags :
+ * 0x1 : send_cmd : start_command(all)
+ * 0x2 : resp(CD) : set done_time without data case
+ * 0x4 : data_done(DTO) : set done_time with data case
+ * 0x8 : error interrupt occurs : set rint_sts read from RINTSTS
+ */
+static void dw_mci_debug_cmd_log(struct mmc_command *cmd, struct dw_mci *host,
+ bool new_cmd, u8 flags, u32 rintsts)
+{
+ int cpu = raw_smp_processor_id();
+ unsigned int count;
+ struct dw_mci_cmd_log *cmd_log;
-struct idmac_desc {
- __le32 des0; /* Control Descriptor */
-#define IDMAC_DES0_DIC BIT(1)
-#define IDMAC_DES0_LD BIT(2)
-#define IDMAC_DES0_FD BIT(3)
-#define IDMAC_DES0_CH BIT(4)
-#define IDMAC_DES0_ER BIT(5)
-#define IDMAC_DES0_CES BIT(30)
-#define IDMAC_DES0_OWN BIT(31)
+ if (!host->debug_info || !(host->debug_info->en_logging & DW_MCI_DEBUG_ON_CMD))
+ return;
- __le32 des1; /* Buffer sizes */
-#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
+ cmd_log = host->debug_info->cmd_log;
- __le32 des2; /* buffer 1 physical address */
+ if (!new_cmd) {
+ count = atomic_read(&host->debug_info->cmd_log_count) & (DWMCI_LOG_MAX - 1);
+ if (flags & DW_MCI_FLAG_SEND_CMD) /* CMD13 */
+ cmd_log[count].status_count++;
+ if (flags & DW_MCI_FLAG_CD) {
+ cmd_log[count].seq_status |= DW_MCI_FLAG_CD;
+ cmd_log[count].done_time = cpu_clock(cpu);
+ }
+ if (flags & DW_MCI_FLAG_DTO) {
+ cmd_log[count].seq_status |= DW_MCI_FLAG_DTO;
+ cmd_log[count].done_time = cpu_clock(cpu);
+ }
+ if (flags & DW_MCI_FLAG_ERROR) {
+ cmd_log[count].seq_status |= DW_MCI_FLAG_ERROR;
+ cmd_log[count].rint_sts |= (rintsts & 0xFFFF);
+ }
+ } else {
+ count = atomic_inc_return(&host->debug_info->cmd_log_count) & (DWMCI_LOG_MAX - 1);
+ cmd_log[count].cmd = cmd->opcode;
+ cmd_log[count].arg = cmd->arg;
+ if (cmd->data)
+ cmd_log[count].data_size = cmd->data->blocks;
+ else
+ cmd_log[count].data_size = 0;
- __le32 des3; /* buffer 2 physical address */
-};
+ cmd_log[count].send_time = cpu_clock(cpu);
-/* Each descriptor can transfer up to 4KB of data in chained mode */
-#define DW_MCI_DESC_DATA_LENGTH 0x1000
+ cmd_log[count].done_time = 0x0;
+ cmd_log[count].seq_status = DW_MCI_FLAG_SEND_CMD;
+ if (!(flags & DW_MCI_FLAG_SEND_CMD))
+ cmd_log[count].seq_status |= DW_MCI_FLAG_NEW_CMD_ERR;
+
+ cmd_log[count].rint_sts = 0x0;
+ cmd_log[count].status_count = 0;
+ }
+}
+
+static void dw_mci_debug_req_log(struct dw_mci *host, struct mmc_request *mrq,
+ enum dw_mci_req_log_state log_state, enum dw_mci_state state)
+{
+ int cpu = raw_smp_processor_id();
+ unsigned int count;
+ struct dw_mci_req_log *req_log;
+
+ if (!host->debug_info || !(host->debug_info->en_logging & DW_MCI_DEBUG_ON_REQ))
+ return;
+
+ req_log = host->debug_info->req_log;
+
+ count = atomic_inc_return(&host->debug_info->req_log_count)
+ & (DWMCI_REQ_LOG_MAX - 1);
+ if (log_state == STATE_REQ_START) {
+ req_log[count].info0 = mrq->cmd->opcode;
+ req_log[count].info1 = mrq->cmd->arg;
+ if (mrq->data) {
+ req_log[count].info2 = (u32) mrq->data->blksz;
+ req_log[count].info3 = (u32) mrq->data->blocks;
+ } else {
+ req_log[count].info2 = 0;
+ req_log[count].info3 = 0;
+ }
+ } else {
+ req_log[count].info0 = host->cmd_status;
+ req_log[count].info1 = host->data_status;
+ req_log[count].info2 = 0;
+ req_log[count].info3 = 0;
+ }
+ req_log[count].log_state = log_state;
+ req_log[count].pending_events = host->pending_events;
+ req_log[count].completed_events = host->completed_events;
+ req_log[count].timestamp = cpu_clock(cpu);
+ req_log[count].state = state;
+}
+
+static void dw_mci_debug_init(struct dw_mci *host)
+{
+ unsigned int host_index;
+ unsigned int info_index;
+
+ host_index = dw_mci_debug.host_count++;
+ if (host_index < DWMCI_DBG_NUM_HOST) {
+ dw_mci_debug.host[host_index] = host;
+ if (DWMCI_DBG_MASK_INFO & DWMCI_DBG_BIT_HOST(host_index)) {
+ static atomic_t temp_cmd_log_count = ATOMIC_INIT(-1);
+ static atomic_t temp_req_log_count = ATOMIC_INIT(-1);
+ int sysfs_err = 0;
+
+ info_index = dw_mci_debug.info_count++;
+ dw_mci_debug.info_index[host_index] = info_index;
+ host->debug_info = &dw_mci_debug.debug_info[info_index];
+ host->debug_info->en_logging = DW_MCI_DEBUG_ON_CMD | DW_MCI_DEBUG_ON_REQ;
+ host->debug_info->cmd_log_count = temp_cmd_log_count;
+ host->debug_info->req_log_count = temp_req_log_count;
+
+ sysfs_err = sysfs_create_file(&(host->slot->mmc->class_dev.kobj),
+ &(dev_attr_dwmci_debug.attr));
+ pr_info("%s: create debug_log sysfs : %s.....\n", __func__,
+ sysfs_err ? "failed" : "successed");
+ dev_info(host->dev, "host %d debug On\n", host_index);
+ } else {
+ dw_mci_debug.info_index[host_index] = 0xFF;
+ }
+ }
+}
+#else
+static inline int dw_mci_debug_cmd_log(struct mmc_command *cmd,
+ struct dw_mci *host, bool new_cmd, u8 flags, u32 rintsts)
+{
+ return 0;
+}
+
+static inline int dw_mci_debug_req_log(struct dw_mci *host,
+ struct mmc_request *mrq, enum dw_mci_req_log_state log_state,
+ enum dw_mci_state state)
+{
+ return 0;
+}
+
+static inline int dw_mci_debug_init(struct dw_mci *host)
+{
+ return 0;
+}
+#endif /* defined (CONFIG_MMC_DW_DEBUG) */
+
+static void dw_mci_qos_work(struct work_struct *work)
+{
+ struct dw_mci *host = container_of(work, struct dw_mci, qos_work.work);
+
+ pm_qos_update_request(&host->pm_qos_lock, 0);
+}
+
+static void dw_mci_qos_get(struct dw_mci *host)
+{
+ if (delayed_work_pending(&host->qos_work))
+ cancel_delayed_work_sync(&host->qos_work);
+
+ pm_qos_update_request(&host->pm_qos_lock, host->pdata->qos_dvfs_level);
+}
+
+static void dw_mci_qos_put(struct dw_mci *host)
+{
+ queue_delayed_work(pm_workqueue, &host->qos_work, msecs_to_jiffies(5));
+}
+
+/* Add sysfs for argos */
+static ssize_t dw_mci_transferred_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+
+ return sprintf(buf, "%u\n", host->transferred_cnt);
+}
+
+DEVICE_ATTR(trans_count, 0444, dw_mci_transferred_cnt_show, NULL);
+
+static void dw_mci_transferred_cnt_init(struct dw_mci *host, struct mmc_host *mmc)
+{
+ int sysfs_err = 0;
+
+ sysfs_err = sysfs_create_file(&(mmc->class_dev.kobj), &(dev_attr_trans_count.attr));
+ pr_info("%s: trans_count: %s.....\n", __func__, sysfs_err ? "failed" : "successed");
+}
+
+bool dw_mci_fifo_reset(struct device *dev, struct dw_mci *host);
+void dw_mci_ciu_reset(struct device *dev, struct dw_mci *host);
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
+static int dw_mci_get_cd(struct mmc_host *mmc);
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
- struct mmc_data *data;
+ struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
}
static const struct file_operations dw_mci_req_fops = {
- .owner = THIS_MODULE,
- .open = dw_mci_req_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .owner = THIS_MODULE,
+ .open = dw_mci_req_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static int dw_mci_regs_show(struct seq_file *s, void *v)
}
static const struct file_operations dw_mci_regs_fops = {
- .owner = THIS_MODULE,
- .open = dw_mci_regs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .owner = THIS_MODULE,
+ .open = dw_mci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
{
- struct mmc_host *mmc = slot->mmc;
+ struct mmc_host *mmc = slot->mmc;
struct dw_mci *host = slot->host;
struct dentry *root;
struct dentry *node;
if (!root)
return;
- node = debugfs_create_file("regs", 0400, root, host,
- &dw_mci_regs_fops);
+ node = debugfs_create_file("regs", 0400, root, host, &dw_mci_regs_fops);
if (!node)
goto err;
- node = debugfs_create_file("req", 0400, root, slot,
- &dw_mci_req_fops);
+ node = debugfs_create_file("req", 0400, root, slot, &dw_mci_req_fops);
if (!node)
goto err;
- node = debugfs_create_u32("state", 0400, root, (u32 *)&host->state);
+ node = debugfs_create_u32("state", 0400, root, (u32 *) &host->state);
if (!node)
goto err;
- node = debugfs_create_x32("pending_events", 0400, root,
- (u32 *)&host->pending_events);
+ node = debugfs_create_x32("pending_events", 0400, root, (u32 *) &host->pending_events);
if (!node)
goto err;
node = debugfs_create_x32("completed_events", 0400, root,
- (u32 *)&host->completed_events);
+ (u32 *) &host->completed_events);
if (!node)
goto err;
return;
-err:
+ err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
-#endif /* defined(CONFIG_DEBUG_FS) */
+#endif /* defined(CONFIG_DEBUG_FS) */
-static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
+u32 dw_mci_disable_interrupt(struct dw_mci *host, unsigned int *int_mask)
{
u32 ctrl;
ctrl = mci_readl(host, CTRL);
+ ctrl &= ~(SDMMC_CTRL_INT_ENABLE);
+ mci_writel(host, CTRL, ctrl);
+
+ *int_mask = mci_readl(host, INTMASK);
+
+ mci_writel(host, INTMASK, 0);
+
+ return ctrl;
+}
+
+void dw_mci_enable_interrupt(struct dw_mci *host, unsigned int int_mask)
+{
+ unsigned int ctrl;
+
+ mci_writel(host, INTMASK, int_mask);
+
+ ctrl = mci_readl(host, CTRL);
+ mci_writel(host, CTRL, ctrl | SDMMC_CTRL_INT_ENABLE);
+}
+
+static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
+{
+ u32 ctrl;
+ unsigned int int_mask = 0;
+ u32 clksel_saved = 0x0;
+ bool ret = false;
+
+ /* Interrupt disable */
+ ctrl = dw_mci_disable_interrupt(host, &int_mask);
+
+ /* set Rx timing to 0 */
+ clksel_saved = mci_readl(host, CLKSEL);
+ mci_writel(host, CLKSEL, clksel_saved & ~(0x3 << 6 | 0x7));
+
+ /* Reset */
ctrl |= reset;
mci_writel(host, CTRL, ctrl);
+ /* All interrupt clear */
+ mci_writel(host, RINTSTS, 0xFFFFFFFF);
+
+ /* Interrupt enable */
+ dw_mci_enable_interrupt(host, int_mask);
+
/* wait till resets clear */
- if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
- !(ctrl & reset),
- 1, 500 * USEC_PER_MSEC)) {
- dev_err(host->dev,
- "Timeout resetting block (ctrl reset %#x)\n",
- ctrl & reset);
- return false;
+ if (!readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
+ !(ctrl & reset), 1, 500 * USEC_PER_MSEC)) {
+ ret = true;
}
- return true;
+ if (!ret)
+ dev_err(host->dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
+
+ /* restore Rx timing */
+ mci_writel(host, CLKSEL, clksel_saved);
+
+ return ret;
}
static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
* ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
* expected.
*/
- if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
- !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
+ if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) && !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
status,
!(status & SDMMC_STATUS_BUSY),
{
struct dw_mci *host = slot->host;
unsigned int cmd_status = 0;
+ int try = 50;
mci_writel(host, CMDARG, arg);
- wmb(); /* drain writebuffer */
- dw_mci_wait_while_busy(host, cmd);
+ wmb(); /* drain writebuffer */
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
- if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
- !(cmd_status & SDMMC_CMD_START),
- 1, 500 * USEC_PER_MSEC))
- dev_err(&slot->mmc->class_dev,
- "Timeout sending command (cmd %#x arg %#x status %#x)\n",
- cmd, arg, cmd_status);
+ do {
+ if (!readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
+ !(cmd_status & SDMMC_CMD_START),
+ 1, 10 * USEC_PER_MSEC))
+ return;
+
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ mci_writel(host, CMD, SDMMC_CMD_START | cmd);
+ } while (--try);
+
+ dev_err(&slot->mmc->class_dev,
+ "Timeout sending command (cmd %#x arg %#x status %#x)\n", cmd, arg, cmd_status);
+}
+
+static bool dw_mci_wait_data_busy(struct dw_mci *host, struct mmc_request *mrq)
+{
+ u32 status;
+ struct dw_mci_slot *slot = host->slot;
+ int try = 6;
+ u32 clkena;
+ bool ret = false;
+
+ do {
+ if (!readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, status,
+ !(status & SDMMC_STATUS_BUSY),
+ 10, 100 * USEC_PER_MSEC)) {
+ ret = true;
+ goto out;
+ }
+
+ /* card is checked every 1s by CMD13 at least */
+ if (mrq->cmd->opcode == MMC_SEND_STATUS)
+ return true;
+
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ /* After CTRL Reset, Should be needed clk val to CIU */
+ if (host->slot) {
+ /* Disable low power mode */
+ clkena = mci_readl(host, CLKENA);
+ clkena &= ~((SDMMC_CLKEN_LOW_PWR) << slot->id);
+ mci_writel(host, CLKENA, clkena);
+
+ mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+ } while (--try);
+ out:
+ if (host->slot) {
+ if (ret == false)
+ dev_err(host->dev, "Data[0]: data is busy\n");
+
+ /* enable clock */
+ mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | SDMMC_CLKEN_LOW_PWR) << slot->id));
+
+ /* inform CIU */
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+
+ return ret;
+}
+
+static void dw_mci_update_clock(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ int retry = 10;
+ unsigned int int_mask = 0;
+ unsigned int cmd_status = 0;
+
+ dw_mci_disable_interrupt(host, &int_mask);
+
+ do {
+ wmb();
+ mci_writel(host, CMD, SDMMC_CMD_START | SDMMC_CMD_UPD_CLK);
+
+ if (!readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
+ !(cmd_status & SDMMC_CMD_START),
+ 1, 1 * USEC_PER_MSEC)) {
+ goto out;
+ } else {
+ /* reset controller because a command is stuecked */
+ if (mci_readl(host, RINTSTS) & SDMMC_INT_HLE) {
+ mci_writel(host, RINTSTS, SDMMC_INT_HLE);
+ break;
+ }
+ }
+
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ } while (--retry);
+
+ dev_err(&slot->mmc->class_dev, "Timeout updating command (status %#x)\n", cmd_status);
+ out:
+ /* recover interrupt mask after updating clock */
+ dw_mci_enable_interrupt(host, int_mask);
+}
+
+static inline bool dw_mci_stop_abort_cmd(struct mmc_command *cmd)
+{
+ u32 op = cmd->opcode;
+
+ if ((op == MMC_STOP_TRANSMISSION) ||
+ (op == MMC_GO_IDLE_STATE) ||
+ (op == MMC_GO_INACTIVE_STATE) ||
+ ((op == SD_IO_RW_DIRECT) && (cmd->arg & 0x80000000) &&
+ ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
+ return true;
+ return false;
}
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
if (cmd->opcode == MMC_STOP_TRANSMISSION ||
cmd->opcode == MMC_GO_IDLE_STATE ||
cmd->opcode == MMC_GO_INACTIVE_STATE ||
- (cmd->opcode == SD_IO_RW_DIRECT &&
- ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
+ (cmd->opcode == SD_IO_RW_DIRECT && ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
cmdr |= SDMMC_CMD_STOP;
else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
u32 clk_en_a;
- /* Special bit makes CMD11 not die */
- cmdr |= SDMMC_CMD_VOLT_SWITCH;
-
/* Change state to continue to handle CMD11 weirdness */
WARN_ON(slot->host->state != STATE_SENDING_CMD);
slot->host->state = STATE_SENDING_CMD11;
clk_en_a = mci_readl(host, CLKENA);
clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
}
if (cmd->flags & MMC_RSP_PRESENT) {
cmdr == MMC_READ_MULTIPLE_BLOCK ||
cmdr == MMC_WRITE_BLOCK ||
cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
- cmdr == MMC_SEND_TUNING_BLOCK ||
- cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
+ cmdr == MMC_SEND_TUNING_BLOCK || cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
stop->opcode = MMC_STOP_TRANSMISSION;
stop->arg = 0;
stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
} else if (cmdr == SD_IO_RW_EXTENDED) {
stop->opcode = SD_IO_RW_DIRECT;
stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
- ((cmd->arg >> 28) & 0x7);
+ ((cmd->arg >> 28) & 0x7);
stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
} else {
return 0;
}
- cmdr = stop->opcode | SDMMC_CMD_STOP |
- SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
+ cmdr = stop->opcode | SDMMC_CMD_STOP | SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
cmdr |= SDMMC_CMD_USE_HOLD_REG;
*/
spin_lock_irqsave(&host->irq_lock, irqflags);
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
- mod_timer(&host->cto_timer,
- jiffies + msecs_to_jiffies(cto_ms) + 1);
+ mod_timer(&host->cto_timer, jiffies + msecs_to_jiffies(cto_ms) + 1);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
-static void dw_mci_start_command(struct dw_mci *host,
- struct mmc_command *cmd, u32 cmd_flags)
+static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags)
{
host->cmd = cmd;
- dev_vdbg(host->dev,
- "start command: ARGR=0x%08x CMDR=0x%08x\n",
- cmd->arg, cmd_flags);
+ dev_vdbg(host->dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
- wmb(); /* drain writebuffer */
+ wmb(); /* drain writebuffer */
dw_mci_wait_while_busy(host, cmd_flags);
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
+ host->dma_ops->reset(host);
}
/* Data transfer was stopped by the interrupt handler */
struct mmc_data *data = host->data;
if (data && data->host_cookie == COOKIE_MAPPED) {
- dma_unmap_sg(host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ dma_unmap_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
}
/* Disable and reset the IDMAC interface */
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_USE_IDMAC;
- temp |= SDMMC_CTRL_DMA_RESET;
mci_writel(host, CTRL, temp);
+ /* reset the IDMAC interface */
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
+
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
dev_vdbg(host->dev, "DMA complete\n");
- if ((host->use_dma == TRANS_MODE_EDMAC) &&
- data && (data->flags & MMC_DATA_READ))
+ if ((host->use_dma == TRANS_MODE_EDMAC) && data && (data->flags & MMC_DATA_READ))
/* Invalidate cache after read */
dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
- data->sg,
- data->sg_len,
- DMA_FROM_DEVICE);
+ data->sg, data->sg_len, DMA_FROM_DEVICE);
host->dma_ops->cleanup(host);
if (host->dma_64bit_address == 1) {
struct idmac_desc_64addr *p;
/* Number of descriptors in the ring buffer */
- host->ring_size =
- DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
+ host->ring_size = host->desc_sz * DESC_RING_BUF_SZ /
+ sizeof(struct idmac_desc_64addr);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
- i++, p++) {
+ for (i = 0, p = host->sg_cpu; i < host->ring_size * MMC_DW_IDMAC_MULTIPLIER - 1;
+ i++, p++) {
p->des6 = (host->sg_dma +
- (sizeof(struct idmac_desc_64addr) *
- (i + 1))) & 0xffffffff;
+ (sizeof(struct idmac_desc_64addr) * (i + 1))) & 0xffffffff;
- p->des7 = (u64)(host->sg_dma +
- (sizeof(struct idmac_desc_64addr) *
- (i + 1))) >> 32;
+ p->des7 = (u64) (host->sg_dma +
+ (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32;
/* Initialize reserved and buffer size fields to "0" */
p->des0 = 0;
p->des1 = 0;
/* Set the last descriptor as the end-of-ring descriptor */
p->des6 = host->sg_dma & 0xffffffff;
- p->des7 = (u64)host->sg_dma >> 32;
+ p->des7 = (u64) host->sg_dma >> 32;
p->des0 = IDMAC_DES0_ER;
} else {
struct idmac_desc *p;
/* Number of descriptors in the ring buffer */
- host->ring_size =
- DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
+ host->ring_size = host->desc_sz * DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu;
- i < host->ring_size - 1;
- i++, p++) {
- p->des3 = cpu_to_le32(host->sg_dma +
- (sizeof(struct idmac_desc) * (i + 1)));
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
+ p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)));
p->des0 = 0;
p->des1 = 0;
}
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS64, IDMAC_INT_CLR);
mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
- SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+ SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
- mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
+ mci_writel(host, DBADDRU, (u64) host->sg_dma >> 32);
} else {
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDSTS, IDMAC_INT_CLR);
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
- SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
+ SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDR, host->sg_dma);
}
static inline int dw_mci_prepare_desc64(struct dw_mci *host,
- struct mmc_data *data,
- unsigned int sg_len)
+ struct mmc_data *data, unsigned int sg_len)
{
unsigned int desc_len;
struct idmac_desc_64addr *desc_first, *desc_last, *desc;
u64 mem_addr = sg_dma_address(&data->sg[i]);
- for ( ; length ; desc++) {
+ for (; length; desc++) {
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
- length : DW_MCI_DESC_DATA_LENGTH;
+ length : DW_MCI_DESC_DATA_LENGTH;
length -= desc_len;
* ops and CPU's read ops are asynchronous.
*/
if (readl_poll_timeout_atomic(&desc->des0, val,
- !(val & IDMAC_DES0_OWN),
- 10, 100 * USEC_PER_MSEC))
+ !(val & IDMAC_DES0_OWN),
+ 10, 100 * USEC_PER_MSEC))
goto err_own_bit;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
- desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
- IDMAC_DES0_CH;
+ desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
/* Buffer length */
IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
desc_last->des0 |= IDMAC_DES0_LD;
return 0;
-err_own_bit:
+ err_own_bit:
/* restore the descriptor chain as it's polluted */
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
return -EINVAL;
}
-
static inline int dw_mci_prepare_desc32(struct dw_mci *host,
- struct mmc_data *data,
- unsigned int sg_len)
+ struct mmc_data *data, unsigned int sg_len)
{
unsigned int desc_len;
struct idmac_desc *desc_first, *desc_last, *desc;
u32 mem_addr = sg_dma_address(&data->sg[i]);
- for ( ; length ; desc++) {
+ for (; length; desc++) {
desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
- length : DW_MCI_DESC_DATA_LENGTH;
+ length : DW_MCI_DESC_DATA_LENGTH;
length -= desc_len;
*/
if (readl_poll_timeout_atomic(&desc->des0, val,
IDMAC_OWN_CLR64(val),
- 10,
- 100 * USEC_PER_MSEC))
+ 10, 100 * USEC_PER_MSEC))
goto err_own_bit;
/*
* Set the OWN bit and disable interrupts
* for this descriptor
*/
- desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
- IDMAC_DES0_DIC |
- IDMAC_DES0_CH);
+ desc->des0 = cpu_to_le32(IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH);
/* Buffer length */
IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
/* Set last descriptor */
- desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
- IDMAC_DES0_DIC));
+ desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH | IDMAC_DES0_DIC));
desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
return 0;
-err_own_bit:
+ err_own_bit:
/* restore the descriptor chain as it's polluted */
dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
/* Start it running */
mci_writel(host, PLDMND, 1);
-out:
+ out:
return ret;
}
static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
+ .reset = dw_mci_idmac_reset,
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_dmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
dmaengine_terminate_async(host->dms->ch);
}
-static int dw_mci_edmac_start_dma(struct dw_mci *host,
- unsigned int sg_len)
+static int dw_mci_edmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
struct dma_slave_config cfg;
struct dma_async_tx_descriptor *desc = NULL;
struct scatterlist *sgl = host->data->sg;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ const u32 mszs[] = { 1, 4, 8, 16, 32, 64, 128, 256 };
u32 sg_elems = host->data->sg_len;
u32 fifoth_val;
u32 fifo_offset = host->fifo_reg - host->regs;
}
desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
- sg_len, cfg.direction,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ sg_len, cfg.direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(host->dev, "Can't prepare slave sg.\n");
return -EBUSY;
/* Flush cache before write */
if (host->data->flags & MMC_DATA_WRITE)
- dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
- sg_elems, DMA_TO_DEVICE);
+ dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl, sg_elems, DMA_TO_DEVICE);
dma_async_issue_pending(host->dms->ch);
.cleanup = dw_mci_dma_cleanup,
};
-static int dw_mci_pre_dma_transfer(struct dw_mci *host,
- struct mmc_data *data,
- int cookie)
+static int dw_mci_pre_dma_transfer(struct dw_mci *host, struct mmc_data *data, int cookie)
{
struct scatterlist *sg;
+ struct dw_mci_slot *slot = host->slot;
+ struct mmc_card *card = slot->mmc->card;
unsigned int i, sg_len;
+ unsigned int align_mask = ((host->data_shift == 3) ? 8 : 4) - 1;
if (data->host_cookie == COOKIE_PRE_MAPPED)
return data->sg_len;
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
- if (data->blksz & 3)
+ if (data->blksz & align_mask)
return -EINVAL;
for_each_sg(data->sg, sg, data->sg_len, i) {
- if (sg->offset & 3 || sg->length & 3)
+ if (sg->offset & align_mask || sg->length & align_mask)
return -EINVAL;
}
- sg_len = dma_map_sg(host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ if (card && mmc_card_sdio(card)) {
+ unsigned int rxwmark_val = 0, txwmark_val = 0, msize_val = 0;
+
+ if (data->blksz >= (4 * (1 << host->data_shift))) {
+ msize_val = 1;
+ rxwmark_val = 3;
+ txwmark_val = 4;
+ } else {
+ msize_val = 0;
+ rxwmark_val = 1;
+ txwmark_val = host->fifo_depth / 2;
+ }
+
+ host->fifoth_val = ((msize_val << 28) | (rxwmark_val << 16) | (txwmark_val << 0));
+ dev_dbg(host->dev,
+ "data->blksz: %d data->blocks %d Transfer Size %d msize_val : %d, rxwmark_val : %d host->fifoth_val: 0x%08x\n",
+ data->blksz, data->blocks, (data->blksz * data->blocks),
+ msize_val, rxwmark_val, host->fifoth_val);
+
+ mci_writel(host, FIFOTH, host->fifoth_val);
+
+ if (mmc_card_uhs(card)
+ && card->host->caps & MMC_CAP_UHS_SDR104 && data->flags & MMC_DATA_READ)
+ mci_writel(host, CDTHRCTL, data->blksz << 16 | 1);
+ }
+
+ sg_len = dma_map_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
if (sg_len == 0)
return -EINVAL;
return sg_len;
}
-static void dw_mci_pre_req(struct mmc_host *mmc,
- struct mmc_request *mrq)
+static void dw_mci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
/* This data might be unmapped at this time */
data->host_cookie = COOKIE_UNMAPPED;
- if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
- COOKIE_PRE_MAPPED) < 0)
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data, COOKIE_PRE_MAPPED) < 0)
data->host_cookie = COOKIE_UNMAPPED;
}
-static void dw_mci_post_req(struct mmc_host *mmc,
- struct mmc_request *mrq,
- int err)
+static void dw_mci_post_req(struct mmc_host *mmc, struct mmc_request *mrq, int err)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
return;
if (data->host_cookie != COOKIE_UNMAPPED)
- dma_unmap_sg(slot->host->dev,
- data->sg,
- data->sg_len,
- mmc_get_dma_dir(data));
+ dma_unmap_sg(slot->host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data));
data->host_cookie = COOKIE_UNMAPPED;
}
static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
+ int temp;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
int gpio_cd = mmc_gpio_get_cd(mmc);
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
/* Use platform get_cd function, else try onboard card detect */
if (((mmc->caps & MMC_CAP_NEEDS_POLL)
- || !mmc_card_is_removable(mmc))) {
+ || !mmc_card_is_removable(mmc))) {
present = 1;
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
if (mmc->caps & MMC_CAP_NEEDS_POLL) {
- dev_info(&mmc->class_dev,
- "card is polling.\n");
+ dev_info(&mmc->class_dev, "card is polling.\n");
} else {
- dev_info(&mmc->class_dev,
- "card is non-removable.\n");
+ dev_info(&mmc->class_dev, "card is non-removable.\n");
}
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
}
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
- == 0 ? 1 : 0;
+ == 0 ? 1 : 0;
+ if (drv_data && drv_data->misc_control) {
+ temp = drv_data->misc_control(host, CTRL_CHECK_CD, NULL);
+ if (temp != -1)
+ present = temp;
+ }
spin_lock_bh(&host->lock);
if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is present\n");
- else if (!present &&
- !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
+ else if (!present && !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
dev_dbg(&mmc->class_dev, "card is not present\n");
spin_unlock_bh(&host->lock);
return present;
}
+#if 0
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
{
unsigned int blksz = data->blksz;
- const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
+ const u32 mszs[] = { 1, 4, 8, 16, 32, 64, 128, 256 };
u32 fifo_width = 1 << host->data_shift;
u32 blksz_depth = blksz / fifo_width, fifoth_val;
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
goto done;
do {
- if (!((blksz_depth % mszs[idx]) ||
- (tx_wmark_invers % mszs[idx]))) {
+ if (!((blksz_depth % mszs[idx]) || (tx_wmark_invers % mszs[idx]))) {
msize = idx;
rx_wmark = mszs[idx] - 1;
break;
* If idx is '0', it won't be tried
* Thus, initial values are uesed
*/
-done:
+ done:
fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
mci_writel(host, FIFOTH, fifoth_val);
}
+#endif
static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
{
* in the FIFO region, so we really shouldn't access it).
*/
if (host->verid < DW_MMC_240A ||
- (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
+ (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
return;
/*
* Card write Threshold is introduced since 2.80a
* It's used when HS400 mode is enabled.
*/
- if (data->flags & MMC_DATA_WRITE &&
- !(host->timing != MMC_TIMING_MMC_HS400))
+ if (data->flags & MMC_DATA_WRITE && !(host->timing != MMC_TIMING_MMC_HS400))
return;
if (data->flags & MMC_DATA_WRITE)
enable = SDMMC_CARD_RD_THR_EN;
if (host->timing != MMC_TIMING_MMC_HS200 &&
- host->timing != MMC_TIMING_UHS_SDR104)
+ host->timing != MMC_TIMING_MMC_HS400 && host->timing != MMC_TIMING_UHS_SDR104)
goto disable;
blksz_depth = blksz / (1 << host->data_shift);
mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
return;
-disable:
+ disable:
mci_writel(host, CDTHRCTL, 0);
}
+inline u32 dw_mci_calc_hto_timeout(struct dw_mci *host)
+{
+ struct dw_mci_slot *slot = host->slot;
+ u32 target_timeout, count;
+ u32 max_time, max_ext_time;
+ u32 host_clock = host->cclk_in;
+ u32 tmout_value;
+ int ext_cnt = 0;
+
+ if (!host->pdata->hto_timeout)
+ return 0xFFFFFFFF; /* timeout maximum */
+
+ target_timeout = host->pdata->data_timeout;
+
+ if (host->timing == MMC_TIMING_MMC_HS400 || host->timing == MMC_TIMING_MMC_HS400_ES) {
+ if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP)
+ host_clock *= 2;
+ }
+
+ max_time = SDMMC_DATA_TMOUT_MAX_CNT * SDMMC_DATA_TMOUT_CRT / (host_clock / 1000);
+
+ if (target_timeout < max_time) {
+ tmout_value = mci_readl(host, TMOUT);
+ goto pass;
+ } else {
+ max_ext_time = SDMMC_DATA_TMOUT_MAX_EXT_CNT / (host_clock / 1000);
+ ext_cnt = target_timeout / max_ext_time;
+ }
+
+ target_timeout = host->pdata->hto_timeout;
+
+ /* use clkout for sysnopsys divider */
+ if (host->timing == MMC_TIMING_MMC_HS400 ||
+ host->timing == MMC_TIMING_MMC_HS400_ES ||
+ (host->timing == MMC_TIMING_MMC_DDR52 && slot->ctype == SDMMC_CTYPE_8BIT))
+ host_clock /= 2;
+
+ /* Calculating Timeout value */
+ count = target_timeout * (host_clock / 1000);
+
+ if (count > 0xFFFFFF)
+ count = 0xFFFFFF;
+
+ tmout_value = (count << SDMMC_HTO_TMOUT_SHIFT) | SDMMC_RESP_TMOUT;
+ tmout_value &= ~(0x7 << SDMMC_DATA_TMOUT_EXT_SHIFT);
+ tmout_value |= ((ext_cnt + 1) << SDMMC_DATA_TMOUT_EXT_SHIFT);
+ pass:
+ /* Set return value */
+ return tmout_value;
+}
+
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
unsigned long irqflags;
if (!host->use_dma)
return -ENODEV;
+ if (host->use_dma && host->dma_ops->init && host->dma_ops->reset) {
+ host->dma_ops->init(host);
+ host->dma_ops->reset(host);
+ }
+
sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
if (sg_len < 0) {
host->dma_ops->stop(host);
+ dw_mci_set_timeout(host, dw_mci_calc_hto_timeout(host));
return sg_len;
}
if (host->use_dma == TRANS_MODE_IDMAC)
dev_vdbg(host->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
- (unsigned long)host->sg_cpu,
- (unsigned long)host->sg_dma,
- sg_len);
-
- /*
- * Decide the MSIZE and RX/TX Watermark.
- * If current block size is same with previous size,
- * no need to update fifoth.
- */
- if (host->prev_blksz != data->blksz)
- dw_mci_adjust_fifoth(host, data);
+ (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, sg_len);
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
/* Disable RX/TX IRQs, let DMA handle it */
spin_lock_irqsave(&host->irq_lock, irqflags);
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
temp = mci_readl(host, INTMASK);
- temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
+ temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (host->dma_ops->start(host, sg_len)) {
host->dma_ops->stop(host);
/* We can't do DMA, try PIO for this one */
- dev_dbg(host->dev,
- "%s: fall back to PIO mode for current transfer\n",
- __func__);
+ dev_dbg(host->dev, "%s: fall back to PIO mode for current transfer\n", __func__);
return -ENODEV;
}
dw_mci_ctrl_thld(host, data);
if (dw_mci_submit_data_dma(host, data)) {
+ if (SDMMC_GET_FCNT(mci_readl(host, STATUS)))
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+
if (host->data->flags & MMC_DATA_READ)
flags |= SG_MITER_TO_SG;
else
* If next issued data may be transfered by DMA mode,
* prev_blksz should be invalidated.
*/
- if (host->wm_aligned)
- dw_mci_adjust_fifoth(host, data);
- else
- mci_writel(host, FIFOTH, host->fifoth_val);
+ mci_writel(host, FIFOTH, host->fifoth_val);
host->prev_blksz = 0;
} else {
/*
{
struct dw_mci *host = slot->host;
unsigned int clock = slot->clock;
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
u32 div;
u32 clk_en_a;
u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
- /* We must continue to set bit 28 in CMD until the change is complete */
- if (host->state == STATE_WAITING_CMD11_DONE)
- sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
-
if (!clock) {
mci_writel(host, CLKENA, 0);
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
} else if (clock != host->current_speed || force_clkinit) {
+
div = host->bus_hz / clock;
if (host->bus_hz % clock && host->bus_hz > clock)
/*
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
if ((clock != slot->__clk_old &&
- !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
- force_clkinit) {
+ !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) || force_clkinit) {
/* Silent the verbose log if calling from PM context */
if (!force_clkinit)
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
- div ? ((host->bus_hz / div) >> 1) :
- host->bus_hz, div);
+ div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
/*
* If card is polling, display the message only
* one time at boot time.
*/
- if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
- slot->mmc->f_min == clock)
+ if (slot->mmc->caps & MMC_CAP_NEEDS_POLL && slot->mmc->f_min == clock)
set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
}
mci_writel(host, CLKSRC, 0);
/* inform CIU */
- mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+ dw_mci_update_clock(slot);
/* set clock to desired speed */
mci_writel(host, CLKDIV, div);
/* inform CIU */
- mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+ dw_mci_update_clock(slot);
/* enable clock; only low power if no SDIO */
clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
+ if (host->current_speed <= 400 * 1000)
+ clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
mci_writel(host, CLKENA, clk_en_a);
/* inform CIU */
- mci_send_cmd(slot, sdmmc_cmd_bits, 0);
+ dw_mci_update_clock(slot);
/* keep the last clock value that was requested from core */
slot->__clk_old = clock;
/* Set the current slot bus width */
mci_writel(host, CTYPE, (slot->ctype << slot->id));
+
+ /* Hwacg control for init */
+ if (host->pdata->quirks & DW_MCI_QUIRK_HWACG_CTRL) {
+ if (drv_data && drv_data->hwacg_control) {
+ if (host->current_speed > 400 * 1000)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_EN);
+ else
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_DIS);
+ }
+ }
+}
+
+inline u32 dw_mci_calc_timeout(struct dw_mci *host)
+{
+ u32 target_timeout;
+ u32 count;
+ u32 max_time;
+ u32 max_ext_time;
+ int ext_cnt = 0;
+ u32 host_clock = host->cclk_in;
+
+ if (!host->pdata->data_timeout)
+ return 0xFFFFFFFF; /* timeout maximum */
+
+ target_timeout = host->pdata->data_timeout;
+
+ if (host->timing == MMC_TIMING_MMC_HS400 || host->timing == MMC_TIMING_MMC_HS400_ES) {
+ if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP)
+ host_clock *= 2;
+ }
+
+ max_time = SDMMC_DATA_TMOUT_MAX_CNT * SDMMC_DATA_TMOUT_CRT / (host_clock / 1000);
+
+ if (target_timeout > max_time) {
+ max_ext_time = SDMMC_DATA_TMOUT_MAX_EXT_CNT / (host_clock / 1000);
+ ext_cnt = target_timeout / max_ext_time;
+ target_timeout -= (max_ext_time * ext_cnt);
+ }
+ count = (target_timeout * (host_clock / 1000)) / SDMMC_DATA_TMOUT_CRT;
+
+ /* Set return value */
+ return ((count << SDMMC_DATA_TMOUT_SHIFT)
+ | ((ext_cnt + SDMMC_DATA_TMOUT_EXT) << SDMMC_DATA_TMOUT_EXT_SHIFT)
+ | SDMMC_RESP_TMOUT);
}
static void __dw_mci_start_request(struct dw_mci *host,
- struct dw_mci_slot *slot,
- struct mmc_command *cmd)
+ struct dw_mci_slot *slot, struct mmc_command *cmd)
{
struct mmc_request *mrq;
- struct mmc_data *data;
+ struct mmc_data *data;
u32 cmdflags;
mrq = slot->mrq;
+ if (mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(500));
+ else if (host->pdata->sw_timeout)
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(host->pdata->sw_timeout));
+ else
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(10000));
+
+ host->slot = slot;
host->mrq = mrq;
host->pending_events = 0;
data = cmd->data;
if (data) {
- mci_writel(host, TMOUT, 0xFFFFFFFF);
- mci_writel(host, BYTCNT, data->blksz*data->blocks);
+ dw_mci_set_timeout(host, dw_mci_calc_timeout(host));
+ mci_writel(host, BYTCNT, data->blksz * data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
+ host->transferred_cnt += data->blksz * data->blocks;
}
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
if (data) {
dw_mci_submit_data(host, data);
- wmb(); /* drain writebuffer */
+ wmb(); /* drain writebuffer */
}
-
+ dw_mci_debug_req_log(host, mrq, STATE_REQ_START, 0);
dw_mci_start_command(host, cmd, cmdflags);
if (cmd->opcode == SD_SWITCH_VOLTAGE) {
*/
spin_lock_irqsave(&host->irq_lock, irqflags);
if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
- mod_timer(&host->cmd11_timer,
- jiffies + msecs_to_jiffies(500) + 1);
+ mod_timer(&host->cmd11_timer, jiffies + msecs_to_jiffies(500) + 1);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
- host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
+ if (mrq->stop)
+ host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
+ else
+ host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
}
-static void dw_mci_start_request(struct dw_mci *host,
- struct dw_mci_slot *slot)
+static void dw_mci_start_request(struct dw_mci *host, struct dw_mci_slot *slot)
{
struct mmc_request *mrq = slot->mrq;
struct mmc_command *cmd;
+ host->req_state = DW_MMC_REQ_BUSY;
+
cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
__dw_mci_start_request(host, slot, cmd);
}
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
- dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
- host->state);
+ dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", host->state);
slot->mrq = mrq;
if (host->state == STATE_WAITING_CMD11_DONE) {
- dev_warn(&slot->mmc->class_dev,
- "Voltage change didn't complete\n");
+ dev_warn(&slot->mmc->class_dev, "Voltage change didn't complete\n");
/*
* this case isn't expected to happen, so we can
* either crash here or just try to continue on
* request wouldn't fail until another card was inserted.
*/
+ if (!dw_mci_stop_abort_cmd(mrq->cmd)) {
+ if (!dw_mci_wait_data_busy(host, mrq)) {
+ mrq->cmd->error = -ENOTRECOVERABLE;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ }
+
if (!dw_mci_get_cd(mmc)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
+ if (host->qos_cntrl == true)
+ dw_mci_qos_get(host);
+
spin_lock_bh(&host->lock);
+ /* IDLE IP for SICD */
+#ifdef CONFIG_CPU_IDLE
+ exynos_update_ip_idle_status(slot->host->idle_ip_index, 0);
+#endif
+
dw_mci_queue_request(host, slot, mrq);
spin_unlock_bh(&host->lock);
{
struct dw_mci_slot *slot = mmc_priv(mmc);
const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
+ struct dw_mci *host = slot->host;
u32 regs;
int ret;
/* DDR mode set */
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
ios->timing == MMC_TIMING_UHS_DDR50 ||
- ios->timing == MMC_TIMING_MMC_HS400 ||
- ios->timing == MMC_TIMING_MMC_HS400_ES)
+ ios->timing == MMC_TIMING_MMC_HS400 || ios->timing == MMC_TIMING_MMC_HS400_ES)
regs |= ((0x1 << slot->id) << 16);
else
regs &= ~((0x1 << slot->id) << 16);
+ if (slot->host->pdata->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
+ regs |= (0x1 << slot->id);
+
mci_writel(slot->host, UHS_REG, regs);
slot->host->timing = ios->timing;
switch (ios->power_mode) {
case MMC_POWER_UP:
- if (!IS_ERR(mmc->supply.vmmc)) {
- ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
- ios->vdd);
- if (ret) {
- dev_err(slot->host->dev,
- "failed to enable vmmc regulator\n");
- /*return, if failed turn on vmmc*/
- return;
+ if (!(slot->host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
+ if (!IS_ERR(mmc->supply.vmmc)) {
+ ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+ if (ret) {
+ dev_err(slot->host->dev,
+ "failed to enable vmmc regulator\n");
+ /*return, if failed turn on vmmc */
+ return;
+ }
}
}
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
mci_writel(slot->host, PWREN, regs);
break;
case MMC_POWER_ON:
- if (!slot->host->vqmmc_enabled) {
- if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_enable(mmc->supply.vqmmc);
- if (ret < 0)
- dev_err(slot->host->dev,
- "failed to enable vqmmc\n");
- else
+ if (!(slot->host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
+ if (!slot->host->vqmmc_enabled) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(slot->host->dev,
+ "failed to enable vqmmc\n");
+ else
+ slot->host->vqmmc_enabled = true;
+ } else {
+ /* Keep track so we don't reset again */
slot->host->vqmmc_enabled = true;
-
- } else {
- /* Keep track so we don't reset again */
- slot->host->vqmmc_enabled = true;
+ }
+ /* Reset our state machine after powering on */
+ dw_mci_ctrl_reset(slot->host, SDMMC_CTRL_ALL_RESET_FLAGS);
}
-
- /* Reset our state machine after powering on */
- dw_mci_ctrl_reset(slot->host,
- SDMMC_CTRL_ALL_RESET_FLAGS);
}
-
/* Adjust clock / bus width after power is up */
dw_mci_setup_bus(slot, false);
break;
case MMC_POWER_OFF:
- /* Turn clock off before power goes down */
- dw_mci_setup_bus(slot, false);
-
- if (!IS_ERR(mmc->supply.vmmc))
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
-
- if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
- regulator_disable(mmc->supply.vqmmc);
- slot->host->vqmmc_enabled = false;
-
- regs = mci_readl(slot->host, PWREN);
- regs &= ~(1 << slot->id);
- mci_writel(slot->host, PWREN, regs);
+ if (!(slot->host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
+ /* Turn clock off before power goes down */
+ dw_mci_setup_bus(slot, false);
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+ if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
+ regulator_disable(mmc->supply.vqmmc);
+ slot->host->vqmmc_enabled = false;
+
+ regs = mci_readl(slot->host, PWREN);
+ regs &= ~(1 << slot->id);
+ mci_writel(slot->host, PWREN, regs);
+
+ if (host->quirks & DW_MCI_QUIRK_HWACG_CTRL) {
+ if (drv_data && drv_data->hwacg_control)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_EN);
+ }
+ }
break;
default:
break;
const struct dw_mci_drv_data *drv_data = host->drv_data;
u32 uhs;
u32 v18 = SDMMC_UHS_18V << slot->id;
- int ret;
+ int min_uv, max_uv;
+ int ret = 0, retry = 10;
+ u32 status;
if (drv_data && drv_data->switch_voltage)
return drv_data->switch_voltage(mmc, ios);
* does no harm but you need to set the regulator directly. Try both.
*/
uhs = mci_readl(host, UHS_REG);
- if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+ if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
+ min_uv = 2800000;
+ max_uv = 2800000;
uhs &= ~v18;
- else
+ } else {
+ min_uv = 1800000;
+ max_uv = 1800000;
uhs |= v18;
+ }
- if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = mmc_regulator_set_vqmmc(mmc, ios);
+ if (!(host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
+ if (!IS_ERR(mmc->supply.vqmmc)) {
+ if (ios->signal_voltage != MMC_SIGNAL_VOLTAGE_330) {
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ /* Check For DATA busy */
+ do {
+ if (!readl_poll_timeout(host->regs + SDMMC_STATUS, status,
+ !(status & SDMMC_STATUS_BUSY),
+ 1, 10 * USEC_PER_MSEC)) {
+ goto out;
+ }
+
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ } while (--retry);
+ }
+ out:
+ /* waiting for stable */
+ mdelay(10);
+ ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret) {
- dev_dbg(&mmc->class_dev,
- "Regulator set error %d - %s V\n",
- ret, uhs & v18 ? "1.8" : "3.3");
- return ret;
+ if (ret) {
+ dev_err(&mmc->class_dev,
+ "Regulator set error %d - %s V\n",
+ ret, uhs & v18 ? "1.8" : "3.3");
+ return ret;
+ }
}
}
mci_writel(host, UHS_REG, uhs);
+ del_timer(&host->cmd11_timer);
return 0;
}
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
- if (gpio_ro >= 0)
+ if ((slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT) ||
+ (slot->host->quirks & DW_MCI_QUIRK_NO_WRITE_PROTECT))
+ read_only = 0;
+ /* Use platform get_ro function, else try on board write protect */
+ else if (gpio_ro >= 0)
read_only = gpio_ro;
else
- read_only =
- mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
+ read_only = mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
- dev_dbg(&mmc->class_dev, "card is %s\n",
- read_only ? "read-only" : "read-write");
+ dev_dbg(&mmc->class_dev, "card is %s\n", read_only ? "read-only" : "read-write");
return read_only;
}
if (host->use_dma == TRANS_MODE_IDMAC)
dw_mci_idmac_reset(host);
- if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
- SDMMC_CTRL_FIFO_RESET))
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET | SDMMC_CTRL_FIFO_RESET))
return;
/*
clk_en_a_old = mci_readl(host, CLKENA);
- if (card->type == MMC_TYPE_SDIO ||
- card->type == MMC_TYPE_SD_COMBO) {
+ if (card->type == MMC_TYPE_SDIO || card->type == MMC_TYPE_SD_COMBO) {
set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
clk_en_a = clk_en_a_old & ~clken_low_pwr;
} else {
if (clk_en_a != clk_en_a_old) {
mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
}
}
}
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int err = -EINVAL;
+ struct dw_mci_tuning_data tuning_data;
+ int err = -ENOSYS;
+
+ if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
+ tuning_data.blk_pattern = tuning_blk_pattern_8bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_8bit);
+ } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ return -EINVAL;
+ }
+ } else if (opcode == MMC_SEND_TUNING_BLOCK) {
+ tuning_data.blk_pattern = tuning_blk_pattern_4bit;
+ tuning_data.blksz = sizeof(tuning_blk_pattern_4bit);
+ } else {
+ dev_err(host->dev, "Undefined command(%d) for tuning\n", opcode);
+ return -EINVAL;
+ }
if (drv_data && drv_data->execute_tuning)
- err = drv_data->execute_tuning(slot, opcode);
+ err = drv_data->execute_tuning(slot, opcode, &tuning_data);
return err;
}
-static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
- struct mmc_ios *ios)
+static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
{
u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
bool ret = false;
- u32 status = 0;
/*
* Resetting generates a block interrupt, hence setting
*/
mci_writel(host, RINTSTS, 0xFFFFFFFF);
- if (!host->use_dma) {
- ret = true;
- goto ciu_out;
- }
+ /* if using dma we wait for dma_req to clear */
+ if (host->use_dma) {
+ u32 status;
+
+ if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
+ status,
+ !(status & SDMMC_STATUS_DMA_REQ),
+ 1, 500 * USEC_PER_MSEC)) {
+ dev_err(host->dev,
+ "%s: Timeout waiting for dma_req to clear during reset\n",
+ __func__);
+ goto ciu_out;
+ }
- /* Wait for dma_req to be cleared */
- if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
- status,
- !(status & SDMMC_STATUS_DMA_REQ),
- 1, 500 * USEC_PER_MSEC)) {
- dev_err(host->dev,
- "%s: Timeout waiting for dma_req to be cleared\n",
- __func__);
- goto ciu_out;
+ /* when using DMA next we reset the fifo again */
+ if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
+ goto ciu_out;
}
-
- /* when using DMA next we reset the fifo again */
- if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
- goto ciu_out;
} else {
/* if the controller reset bit did clear, then set clock regs */
if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
ret = true;
-ciu_out:
+ ciu_out:
/* After a CTRL reset we need to have CIU set clock registers */
mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
}
static const struct mmc_host_ops dw_mci_ops = {
- .request = dw_mci_request,
- .pre_req = dw_mci_pre_req,
- .post_req = dw_mci_post_req,
- .set_ios = dw_mci_set_ios,
- .get_ro = dw_mci_get_ro,
- .get_cd = dw_mci_get_cd,
- .hw_reset = dw_mci_hw_reset,
- .enable_sdio_irq = dw_mci_enable_sdio_irq,
- .ack_sdio_irq = dw_mci_ack_sdio_irq,
- .execute_tuning = dw_mci_execute_tuning,
- .card_busy = dw_mci_card_busy,
+ .request = dw_mci_request,
+ .pre_req = dw_mci_pre_req,
+ .post_req = dw_mci_post_req,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+ .hw_reset = dw_mci_hw_reset,
+ .enable_sdio_irq = dw_mci_enable_sdio_irq,
+ .ack_sdio_irq = dw_mci_ack_sdio_irq,
+ .execute_tuning = dw_mci_execute_tuning,
+ .card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
- .init_card = dw_mci_init_card,
- .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
+ .init_card = dw_mci_init_card,
+ .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
- __releases(&host->lock)
- __acquires(&host->lock)
+__releases(&host->lock) __acquires(&host->lock)
{
struct dw_mci_slot *slot;
- struct mmc_host *prev_mmc = host->slot->mmc;
+ struct mmc_host *prev_mmc = host->slot->mmc;
WARN_ON(host->cmd || host->data);
+ del_timer(&host->timer);
+ host->req_state = DW_MMC_REQ_IDLE;
+ dw_mci_debug_req_log(host, mrq, STATE_REQ_END, 0);
host->slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
- slot = list_entry(host->queue.next,
- struct dw_mci_slot, queue_node);
+ slot = list_entry(host->queue.next, struct dw_mci_slot, queue_node);
list_del(&slot->queue_node);
- dev_vdbg(host->dev, "list not empty: %s is next\n",
- mmc_hostname(slot->mmc));
+ dev_vdbg(host->dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
}
spin_unlock(&host->lock);
+ if (host->qos_cntrl == true)
+ dw_mci_qos_put(host);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
+
+#ifdef CONFIG_CPU_IDLE
+ exynos_update_ip_idle_status(host->idle_ip_index, 1);
+#endif
}
static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
} else if (status & SDMMC_INT_DCRC) {
data->error = -EILSEQ;
} else if (status & SDMMC_INT_EBE) {
- if (host->dir_status ==
- DW_MCI_SEND_STATUS) {
+ if (host->dir_status == DW_MCI_SEND_STATUS) {
/*
* No data CRC status was returned.
* The number of bytes transferred
*/
data->bytes_xfered = 0;
data->error = -ETIMEDOUT;
- } else if (host->dir_status ==
- DW_MCI_RECV_STATUS) {
+ } else if (host->dir_status == DW_MCI_RECV_STATUS) {
data->error = -EILSEQ;
}
} else {
data->error = -EILSEQ;
}
- dev_dbg(host->dev, "data error, status 0x%08x\n", status);
+ dev_err(host->dev, "data error, status 0x%08x %d\n", status, host->dir_status);
/*
* After an error, there may be data lingering
* in the FIFO
*/
- dw_mci_reset(host);
+ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ dw_mci_fifo_reset(host->dev, host);
+ dw_mci_ciu_reset(host->dev, host);
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
if (drto_div == 0)
drto_div = 1;
- drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
- host->bus_hz);
+ drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, host->bus_hz);
/* add a bit spare time */
drto_ms += 10;
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
- struct mmc_data *data;
+ struct mmc_data *data;
struct mmc_command *cmd;
struct mmc_request *mrq;
enum dw_mci_state state;
spin_lock(&host->lock);
+ if (host->sw_timeout_chk == true)
+ goto unlock;
+
state = host->state;
data = host->data;
mrq = host->mrq;
err = dw_mci_command_complete(host, cmd);
if (cmd == mrq->sbc && !err) {
prev_state = state = STATE_SENDING_CMD;
- __dw_mci_start_request(host, host->slot,
- mrq->cmd);
+ __dw_mci_start_request(host, host->slot, mrq->cmd);
goto unlock;
}
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if ((err != -ETIMEDOUT) && (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
state = STATE_SENDING_DATA;
continue;
}
+ dw_mci_fifo_reset(host->dev, host);
dw_mci_stop_dma(host);
send_stop_abort(host, data);
state = STATE_SENDING_STOP;
+ dw_mci_debug_req_log(host, host->mrq, STATE_REQ_CMD_PROCESS, state);
break;
}
if (!cmd->data || err) {
- dw_mci_request_end(host, mrq);
+ if (host->sw_timeout_chk != true)
+ dw_mci_request_end(host, mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_DATA;
+ dw_mci_debug_req_log(host, host->mrq, STATE_REQ_CMD_PROCESS, state);
+
/* fall through */
case STATE_SENDING_DATA:
* transfer complete; stopping the DMA and sending an
* abort won't hurt.
*/
- if (test_and_clear_bit(EVENT_DATA_ERROR,
- &host->pending_events)) {
+ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
+ dw_mci_fifo_reset(host->dev, host);
dw_mci_stop_dma(host);
- if (!(host->data_status & (SDMMC_INT_DRTO |
- SDMMC_INT_EBE)))
+ if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
+ dw_mci_debug_req_log(host,
+ host->mrq, STATE_REQ_DATA_PROCESS, state);
break;
}
- if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
- &host->pending_events)) {
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE, &host->pending_events)) {
/*
* If all data-related interrupts don't come
* within the given time in reading data state.
*
* This has the advantage of sending the stop command.
*/
- if (test_and_clear_bit(EVENT_DATA_ERROR,
- &host->pending_events)) {
+ if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) {
+ dw_mci_fifo_reset(host->dev, host);
dw_mci_stop_dma(host);
- if (!(host->data_status & (SDMMC_INT_DRTO |
- SDMMC_INT_EBE)))
+ if (!(host->data_status & (SDMMC_INT_DRTO | SDMMC_INT_EBE)))
send_stop_abort(host, data);
state = STATE_DATA_ERROR;
+ dw_mci_debug_req_log(host, host->mrq,
+ STATE_REQ_DATA_PROCESS, state);
break;
}
prev_state = state = STATE_DATA_BUSY;
+ dw_mci_debug_req_log(host, host->mrq, STATE_REQ_DATA_PROCESS, state);
+
/* fall through */
case STATE_DATA_BUSY:
- if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
- &host->pending_events)) {
+ if (!test_and_clear_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
/*
* If data error interrupt comes but data over
* interrupt doesn't come within the given time.
break;
}
- host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
err = dw_mci_data_complete(host, data);
+ host->data = NULL;
+
if (!err) {
if (!data->stop || mrq->sbc) {
if (mrq->sbc && data->stop)
data->stop->error = 0;
- dw_mci_request_end(host, mrq);
+
+ if (host->sw_timeout_chk != true)
+ dw_mci_request_end(host, mrq);
goto unlock;
}
- /* stop command for open-ended transfer*/
+ /* stop command for open-ended transfer */
if (data->stop)
send_stop_abort(host, data);
} else {
* through to the SENDING_STOP command and
* everything will be peachy keen.
*/
- if (!test_bit(EVENT_CMD_COMPLETE,
- &host->pending_events)) {
+ if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
host->cmd = NULL;
- dw_mci_request_end(host, mrq);
+
+ if (host->sw_timeout_chk != true)
+ dw_mci_request_end(host, mrq);
goto unlock;
}
}
* stop-abort command has been already issued.
*/
prev_state = state = STATE_SENDING_STOP;
+ dw_mci_debug_req_log(host, host->mrq, STATE_REQ_DATA_PROCESS, state);
/* fall through */
break;
/* CMD error in data command */
- if (mrq->cmd->error && mrq->data)
- dw_mci_reset(host);
+ if (mrq->cmd->error && mrq->data) {
+ dw_mci_stop_dma(host);
+ sg_miter_stop(&host->sg_miter);
+ host->sg = NULL;
+ dw_mci_fifo_reset(host->dev, host);
+ }
host->cmd = NULL;
host->data = NULL;
else
host->cmd_status = 0;
- dw_mci_request_end(host, mrq);
+ if (host->sw_timeout_chk != true)
+ dw_mci_request_end(host, mrq);
goto unlock;
case STATE_DATA_ERROR:
- if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
- &host->pending_events))
+ if (!test_and_clear_bit(EVENT_XFER_COMPLETE, &host->pending_events))
break;
+ set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
+ set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
+ set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
+
state = STATE_DATA_BUSY;
+ dw_mci_debug_req_log(host, host->mrq, STATE_REQ_DATA_PROCESS, state);
+
break;
}
} while (state != prev_state);
host->state = state;
-unlock:
+ unlock:
spin_unlock(&host->lock);
}
{
cnt = min_t(int, cnt, host->part_buf_count);
if (cnt) {
- memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
- cnt);
+ memcpy(buf, (void *)&host->part_buf + host->part_buf_start, cnt);
host->part_buf_count -= cnt;
host->part_buf_start += cnt;
}
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
- /* Push data if we have reached the expected data length */
- if ((data->bytes_xfered + init_cnt) ==
- (data->blksz * data->blocks))
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) == (data->blksz * data->blocks))
mci_fifo_writew(host->fifo_reg, host->part_buf16);
}
}
buf += len;
cnt -= len;
if (host->part_buf_count == 4) {
- mci_fifo_writel(host->fifo_reg, host->part_buf32);
+ mci_fifo_writel(host->fifo_reg, host->part_buf32);
host->part_buf_count = 0;
}
}
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
- mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
+ mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
/* put anything remaining in the part_buf */
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
- /* Push data if we have reached the expected data length */
- if ((data->bytes_xfered + init_cnt) ==
- (data->blksz * data->blocks))
+ /* Push data if we have reached the expected data length */
+ if ((data->bytes_xfered + init_cnt) == (data->blksz * data->blocks))
mci_fifo_writel(host->fifo_reg, host->part_buf32);
}
}
cnt -= len;
if (host->part_buf_count == 8) {
- mci_fifo_writeq(host->fifo_reg, host->part_buf);
+ mci_fifo_writeq(host->fifo_reg, host->part_buf);
host->part_buf_count = 0;
}
}
cnt -= len;
/* push data from aligned buffer into fifo */
for (i = 0; i < items; ++i)
- mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
+ mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
}
} else
#endif
if (cnt) {
dw_mci_set_part_bytes(host, buf, cnt);
/* Push data if we have reached the expected data length */
- if ((data->bytes_xfered + init_cnt) ==
- (data->blksz * data->blocks))
+ if ((data->bytes_xfered + init_cnt) == (data->blksz * data->blocks))
mci_fifo_writeq(host->fifo_reg, host->part_buf);
}
}
struct sg_mapping_iter *sg_miter = &host->sg_miter;
void *buf;
unsigned int offset;
- struct mmc_data *data = host->data;
+ struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int len;
do {
fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
- << shift) + host->part_buf_count;
+ << shift) + host->part_buf_count;
len = min(remain, fcnt);
if (!len)
break;
sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- /* if the RXDR is ready read again */
- } while ((status & SDMMC_INT_RXDR) ||
- (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
+ /* if the RXDR is ready read again */
+ } while ((status & SDMMC_INT_RXDR) || (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
if (!remain) {
if (!sg_miter_next(sg_miter))
sg_miter_stop(sg_miter);
return;
-done:
+ done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb(); /* drain writebuffer */
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
struct sg_mapping_iter *sg_miter = &host->sg_miter;
void *buf;
unsigned int offset;
- struct mmc_data *data = host->data;
+ struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int len;
offset = 0;
do {
- fcnt = ((fifo_depth -
- SDMMC_GET_FCNT(mci_readl(host, STATUS)))
- << shift) - host->part_buf_count;
+ fcnt = ((fifo_depth - SDMMC_GET_FCNT(mci_readl(host, STATUS)))
+ << shift) - host->part_buf_count;
len = min(remain, fcnt);
if (!len)
break;
sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
- } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
+ } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
if (!remain) {
if (!sg_miter_next(sg_miter))
sg_miter_stop(sg_miter);
return;
-done:
+ done:
sg_miter_stop(sg_miter);
host->sg = NULL;
- smp_wmb(); /* drain writebuffer */
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
if (!host->cmd_status)
host->cmd_status = status;
- smp_wmb(); /* drain writebuffer */
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
+#if 0
static void dw_mci_handle_cd(struct dw_mci *host)
{
struct dw_mci_slot *slot = host->slot;
if (slot->mmc->ops->card_event)
slot->mmc->ops->card_event(slot->mmc);
- mmc_detect_change(slot->mmc,
- msecs_to_jiffies(host->pdata->detect_delay_ms));
+ mmc_detect_change(slot->mmc, msecs_to_jiffies(host->pdata->detect_delay_ms));
}
+#endif
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
- u32 pending;
+ u32 status, pending;
struct dw_mci_slot *slot = host->slot;
unsigned long irqflags;
- pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ status = mci_readl(host, RINTSTS);
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
if (pending) {
+ if (pending & SDMMC_INT_HLE) {
+ dev_err(host->dev, "hardware locked write error\n");
+ dw_mci_reg_dump(host);
+ mci_writel(host, RINTSTS, SDMMC_INT_HLE);
+ dw_mci_debug_cmd_log(host->cmd, host, false, DW_MCI_FLAG_ERROR, status);
+ host->cmd_status = pending;
+ tasklet_schedule(&host->tasklet);
+ }
+
/* Check volt switch first, since it can look like an error */
- if ((host->state == STATE_SENDING_CMD11) &&
- (pending & SDMMC_INT_VOLT_SWITCH)) {
+ if ((host->state == STATE_SENDING_CMD11) && (pending & SDMMC_INT_VOLT_SWITCH)) {
mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
pending &= ~SDMMC_INT_VOLT_SWITCH;
del_timer(&host->cto_timer);
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = pending;
- smp_wmb(); /* drain writebuffer */
+ dw_mci_debug_cmd_log(host->cmd, host, false, DW_MCI_FLAG_ERROR, status);
+
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ if (mci_readl(host, RINTSTS) & SDMMC_INT_HTO)
+ dw_mci_reg_dump(host);
+
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
+ dw_mci_debug_cmd_log(host->cmd, host, false, DW_MCI_FLAG_ERROR, status);
host->data_status = pending;
- smp_wmb(); /* drain writebuffer */
+ smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
del_timer(&host->dto_timer);
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
+ dw_mci_debug_cmd_log(host->cmd, host, false, DW_MCI_FLAG_DTO, 0);
if (!host->data_status)
host->data_status = pending;
- smp_wmb(); /* drain writebuffer */
+ smp_wmb(); /* drain writebuffer */
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host, true);
spin_lock_irqsave(&host->irq_lock, irqflags);
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
+ dw_mci_debug_cmd_log(host->cmd, host, false, DW_MCI_FLAG_CD, 0);
dw_mci_cmd_interrupt(host, pending);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
+#if 0
dw_mci_handle_cd(host);
+#else
+ queue_work(host->card_workqueue, &host->card_work);
+#endif
}
if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
- mci_writel(host, RINTSTS,
- SDMMC_INT_SDIO(slot->sdio_id));
+ mci_writel(host, RINTSTS, SDMMC_INT_SDIO(slot->sdio_id));
__dw_mci_enable_sdio_irq(slot, 0);
sdio_signal_irq(slot->mmc);
}
if (host->dma_64bit_address == 1) {
pending = mci_readl(host, IDSTS64);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
- mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
- SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
host->dma_ops->complete((void *)host);
} else {
pending = mci_readl(host, IDSTS);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
- mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
- SDMMC_IDMAC_INT_RI);
+ mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
host->dma_ops->complete((void *)host);
return 0;
}
+static void dw_mci_timeout_timer(unsigned long data)
+{
+ struct dw_mci *host = (struct dw_mci *)data;
+ struct mmc_request *mrq;
+
+ if (host && host->mrq) {
+ host->sw_timeout_chk = true;
+ mrq = host->mrq;
+
+ if (!(mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)) {
+ dev_err(host->dev,
+ "Timeout waiting for hardware interrupt. state = %d\n",
+ host->state);
+ dw_mci_reg_dump(host);
+ }
+ spin_lock(&host->lock);
+
+ host->sg = NULL;
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ case STATE_WAITING_CMD11_DONE:
+ break;
+ case STATE_SENDING_CMD11:
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (!mrq->data)
+ break;
+ /* fall through */
+ case STATE_SENDING_DATA:
+ mrq->data->error = -ENOMEDIUM;
+ dw_mci_stop_dma(host);
+ break;
+ case STATE_DATA_BUSY:
+ case STATE_DATA_ERROR:
+ if (mrq->data->error == -EINPROGRESS)
+ mrq->data->error = -ENOMEDIUM;
+ /* fall through */
+ case STATE_SENDING_STOP:
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ }
+
+ spin_unlock(&host->lock);
+ dw_mci_ciu_reset(host->dev, host);
+ dw_mci_fifo_reset(host->dev, host);
+ spin_lock(&host->lock);
+ dw_mci_request_end(host, mrq);
+ host->state = STATE_IDLE;
+ spin_unlock(&host->lock);
+ host->sw_timeout_chk = false;
+ }
+}
+
+#ifdef CONFIG_OF
+/* given a slot, find out the device node representing that slot */
+static struct device_node *dw_mci_of_find_slot_node(struct dw_mci_slot *slot)
+{
+ struct device *dev = slot->mmc->parent;
+ struct device_node *np;
+ const __be32 *addr;
+ int len;
+
+ if (!dev || !dev->of_node)
+ return NULL;
+
+ for_each_child_of_node(dev->of_node, np) {
+ addr = of_get_property(np, "reg", &len);
+ if (!addr || (len < sizeof(int)))
+ continue;
+ if (be32_to_cpup(addr) == slot->id)
+ return np;
+ }
+ return NULL;
+}
+
+static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
+{
+ struct device_node *np = dw_mci_of_find_slot_node(slot);
+
+ if (!np)
+ return;
+
+ if (of_property_read_bool(np, "disable-wp")) {
+ slot->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
+ dev_warn(slot->mmc->parent, "Slot quirk 'disable-wp' is deprecated\n");
+ }
+}
+#else /* CONFIG_OF */
+static void dw_mci_slot_of_parse(struct dw_mci_slot *slot)
+{
+}
+#endif /* CONFIG_OF */
+
+static irqreturn_t dw_mci_detect_interrupt(int irq, void *dev_id)
+{
+ struct dw_mci *host = dev_id;
+
+ queue_work(host->card_workqueue, &host->card_work);
+
+ return IRQ_HANDLED;
+}
+
static int dw_mci_init_slot(struct dw_mci *host)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
+ struct dw_mci_sfr_ram_dump *dump;
int ret;
u32 freq[2];
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
if (!mmc)
return -ENOMEM;
-
+ dump = devm_kzalloc(host->dev, sizeof(*dump), GFP_KERNEL);
+ if (!dump) {
+ dev_err(host->dev, "sfr dump memory alloc faile!\n");
+ return -ENOMEM;
+ }
+ host->sfr_dump = dump;
slot = mmc_priv(mmc);
slot->id = 0;
slot->sdio_id = host->sdio_id0 + slot->id;
host->slot = slot;
mmc->ops = &dw_mci_ops;
- if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
- freq, 2)) {
+ if (device_property_read_u32_array(host->dev, "clock-freq-min-max", freq, 2)) {
mmc->f_min = DW_MCI_FREQ_MIN;
mmc->f_max = DW_MCI_FREQ_MAX;
} else {
- dev_info(host->dev,
- "'clock-freq-min-max' property was deprecated.\n");
+ dev_info(host->dev, "'clock-freq-min-max' property was deprecated.\n");
mmc->f_min = freq[0];
mmc->f_max = freq[1];
}
- /*if there are external regulators, get them*/
- ret = mmc_regulator_get_supply(mmc);
- if (ret == -EPROBE_DEFER)
- goto err_host_allocated;
-
+ /*if there are external regulators, get them */
+ if (!(host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret == -EPROBE_DEFER)
+ goto err_host_allocated;
+ }
if (!mmc->ocr_avail)
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (ret)
goto err_host_allocated;
+ /* Process SDIO IRQs through the sdio_irq_work. */
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ dw_mci_slot_of_parse(slot);
+
/* Useful defaults if platform data is unset. */
if (host->use_dma == TRANS_MODE_IDMAC) {
mmc->max_segs = host->ring_size;
mmc->max_segs = 64;
mmc->max_blk_size = 65535;
mmc->max_blk_count = 65535;
- mmc->max_req_size =
- mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
} else {
/* TRANS_MODE_PIO */
mmc->max_segs = 64;
- mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
+ mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
- mmc->max_req_size = mmc->max_blk_size *
- mmc->max_blk_count;
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
}
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
+ /* For argos */
+ dw_mci_transferred_cnt_init(host, mmc);
+
+ /* Card initially undetected */
+ slot->last_detect_state = 0;
return 0;
-err_host_allocated:
+ err_host_allocated:
mmc_free_host(mmc);
return ret;
}
struct device *dev = host->dev;
/*
- * Check tansfer mode from HCON[17:16]
- * Clear the ambiguous description of dw_mmc databook:
- * 2b'00: No DMA Interface -> Actually means using Internal DMA block
- * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
- * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
- * 2b'11: Non DW DMA Interface -> pio only
- * Compared to DesignWare DMA Interface, Generic DMA Interface has a
- * simpler request/acknowledge handshake mechanism and both of them
- * are regarded as external dma master for dw_mmc.
- */
+ * Check tansfer mode from HCON[17:16]
+ * Clear the ambiguous description of dw_mmc databook:
+ * 2b'00: No DMA Interface -> Actually means using Internal DMA block
+ * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
+ * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
+ * 2b'11: Non DW DMA Interface -> pio only
+ * Compared to DesignWare DMA Interface, Generic DMA Interface has a
+ * simpler request/acknowledge handshake mechanism and both of them
+ * are regarded as external dma master for dw_mmc.
+ */
host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
if (host->use_dma == DMA_INTERFACE_IDMA) {
host->use_dma = TRANS_MODE_IDMAC;
- } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
- host->use_dma == DMA_INTERFACE_GDMA) {
+ } else if (host->use_dma == DMA_INTERFACE_DWDMA || host->use_dma == DMA_INTERFACE_GDMA) {
host->use_dma = TRANS_MODE_EDMAC;
} else {
goto no_dma;
/* Determine which DMA interface to use */
if (host->use_dma == TRANS_MODE_IDMAC) {
/*
- * Check ADDR_CONFIG bit in HCON to find
- * IDMAC address bus width
- */
+ * Check ADDR_CONFIG bit in HCON to find
+ * IDMAC address bus width
+ */
addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
if (addr_config == 1) {
/* host supports IDMAC in 64-bit address mode */
host->dma_64bit_address = 1;
- dev_info(host->dev,
- "IDMAC supports 64-bit address mode.\n");
+ dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
- dma_set_coherent_mask(host->dev,
- DMA_BIT_MASK(64));
+ dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
} else {
/* host supports IDMAC in 32-bit address mode */
host->dma_64bit_address = 0;
- dev_info(host->dev,
- "IDMAC supports 32-bit address mode.\n");
+ dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
}
+ if (host->pdata->desc_sz)
+ host->desc_sz = host->pdata->desc_sz;
+ else
+ host->desc_sz = 1;
+
/* Alloc memory for sg translation */
host->sg_cpu = dmam_alloc_coherent(host->dev,
- DESC_RING_BUF_SZ,
- &host->sg_dma, GFP_KERNEL);
+ host->desc_sz * PAGE_SIZE *
+ MMC_DW_IDMAC_MULTIPLIER, &host->sg_dma,
+ GFP_KERNEL);
+
if (!host->sg_cpu) {
- dev_err(host->dev,
- "%s: could not alloc DMA memory\n",
- __func__);
+ dev_err(host->dev, "%s: could not alloc DMA memory\n", __func__);
goto no_dma;
}
if (host->dma_ops->init && host->dma_ops->start &&
host->dma_ops->stop && host->dma_ops->cleanup) {
if (host->dma_ops->init(host)) {
- dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
- __func__);
+ dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n", __func__);
goto no_dma;
}
} else {
return;
-no_dma:
+ no_dma:
dev_info(host->dev, "Using PIO mode.\n");
host->use_dma = TRANS_MODE_PIO;
}
+void dw_mci_ciu_reset(struct device *dev, struct dw_mci *host)
+{
+ struct dw_mci_slot *slot = host->slot;
+ int retry = 10;
+ u32 status;
+
+ if (slot) {
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ /* Check For DATA busy */
+ do {
+ if (!readl_poll_timeout_atomic(host->regs + SDMMC_STATUS, status,
+ !(status & SDMMC_STATUS_BUSY),
+ 1, 10 * USEC_PER_MSEC)) {
+ goto out;
+ }
+ dw_mci_ctrl_reset(host, SDMMC_CTRL_RESET);
+ } while (--retry);
+ out:
+
+ /* After a CTRL reset we need to have CIU set clock registers */
+ dw_mci_update_clock(slot);
+ }
+}
+
+bool dw_mci_fifo_reset(struct device *dev, struct dw_mci *host)
+{
+ unsigned int ctrl, loop_count = 3;
+ bool result;
+
+ do {
+ result = dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+ if (!result)
+ break;
+
+ ctrl = mci_readl(host, STATUS);
+ if (!(ctrl & SDMMC_STATUS_DMA_REQ)) {
+ result = dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET);
+ if (result) {
+ /* clear exception raw interrupts can not be handled
+ ex) fifo full => RXDR interrupt rising */
+ ctrl = mci_readl(host, RINTSTS);
+ ctrl = ctrl & ~(mci_readl(host, MINTSTS));
+ if (ctrl)
+ mci_writel(host, RINTSTS, ctrl);
+
+ return true;
+ }
+ }
+ } while (loop_count--);
+
+ dev_err(dev, "%s: Timeout while resetting host controller after err\n", __func__);
+
+ return false;
+}
+
static void dw_mci_cmd11_timer(unsigned long arg)
{
struct dw_mci *host = (struct dw_mci *)arg;
* justified because in this function we don't actually cancel the
* pending command in the controller--we just assume it will never come.
*/
- pending = mci_readl(host, MINTSTS); /* read-only mask reg */
+ pending = mci_readl(host, MINTSTS); /* read-only mask reg */
if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
/* The interrupt should fire; no need to act but we can warn */
dev_warn(host->dev, "Unexpected interrupt latency\n");
tasklet_schedule(&host->tasklet);
break;
default:
- dev_warn(host->dev, "Unexpected command timeout, state %d\n",
- host->state);
+ dev_warn(host->dev, "Unexpected command timeout, state %d\n", host->state);
break;
}
-exit:
+ exit:
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
}
}
+static void dw_mci_work_routine_card(struct work_struct *work)
+{
+ struct dw_mci *host = container_of(work, struct dw_mci, card_work);
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+ struct dw_mci_slot *slot = host->slot;
+ struct mmc_host *mmc = slot->mmc;
+ struct mmc_request *mrq;
+ int present;
+
+ present = dw_mci_get_cd(mmc);
+ while (present != slot->last_detect_state) {
+ dev_info(&slot->mmc->class_dev, "card %s\n", present ? "inserted" : "removed");
+
+ spin_lock_bh(&host->lock);
+
+ /* Card change detected */
+ slot->last_detect_state = present;
+
+ /* Clean up queue if present */
+ mrq = slot->mrq;
+ if (mrq) {
+ if (mrq == host->mrq) {
+ host->data = NULL;
+ host->cmd = NULL;
+
+ switch (host->state) {
+ case STATE_IDLE:
+ case STATE_WAITING_CMD11_DONE:
+ break;
+ case STATE_SENDING_CMD11:
+ case STATE_SENDING_CMD:
+ mrq->cmd->error = -ENOMEDIUM;
+ if (!mrq->data)
+ break;
+ /* fall through */
+ case STATE_SENDING_DATA:
+ mrq->data->error = -ENOMEDIUM;
+ dw_mci_stop_dma(host);
+ break;
+ case STATE_DATA_BUSY:
+ case STATE_DATA_ERROR:
+ if (mrq->data->error == -EINPROGRESS)
+ mrq->data->error = -ENOMEDIUM;
+ /* fall through */
+ case STATE_SENDING_STOP:
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+ break;
+ }
+
+ dw_mci_request_end(host, mrq);
+ } else {
+ list_del(&slot->queue_node);
+ mrq->cmd->error = -ENOMEDIUM;
+ if (mrq->data)
+ mrq->data->error = -ENOMEDIUM;
+ if (mrq->stop)
+ mrq->stop->error = -ENOMEDIUM;
+
+ spin_unlock(&host->lock);
+ mmc_request_done(slot->mmc, mrq);
+ spin_lock(&host->lock);
+ }
+ }
+
+ /* Power down slot */
+ if (present == 0)
+ dw_mci_reset(host);
+ spin_unlock_bh(&host->lock);
+ }
+ if (present)
+ mmc_detect_change(slot->mmc, msecs_to_jiffies(host->pdata->detect_delay_ms));
+ else {
+ mmc_detect_change(slot->mmc, 0);
+ if (host->pdata->only_once_tune)
+ host->pdata->tuned = false;
+
+ if (host->pdata->quirks & DW_MCI_QUIRK_USE_SSC) {
+ if (drv_data && drv_data->ssclk_control)
+ drv_data->ssclk_control(host, 0);
+ }
+ }
+}
+
#ifdef CONFIG_OF
+static struct dw_mci_of_quirks {
+ char *quirk;
+ int id;
+} of_quirks[] = {
+ {
+ .quirk = "broken-cd", .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,}, {
+ .quirk = "disable-wp", .id = DW_MCI_QUIRK_NO_WRITE_PROTECT,}, {
+ .quirk = "fixed_voltage", .id = DW_MMC_QUIRK_FIXED_VOLTAGE,}, {
+ .quirk = "card-init-hwacg-ctrl", .id = DW_MCI_QUIRK_HWACG_CTRL,}, {
+.quirk = "enable-ulp-mode", .id = DW_MCI_QUIRK_ENABLE_ULP,},};
+
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
struct dw_mci_board *pdata;
struct device *dev = host->dev;
+ struct device_node *np = dev->of_node;
const struct dw_mci_drv_data *drv_data = host->drv_data;
- int ret;
+ int idx, ret;
u32 clock_frequency;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for pdata\n");
return ERR_PTR(-ENOMEM);
+ }
/* find reset controller when exist */
pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
if (!device_property_read_u32(dev, "num-slots", &pdata->num_slots))
dev_info(dev, "'num-slots' was deprecated.\n");
+ /* get quirks */
+ for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
+ if (of_get_property(np, of_quirks[idx].quirk, NULL))
+ pdata->quirks |= of_quirks[idx].id;
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
dev_info(dev,
"fifo-depth property not found, using value of FIFOTH register as default\n");
- device_property_read_u32(dev, "card-detect-delay",
- &pdata->detect_delay_ms);
+ device_property_read_u32(dev, "card-detect-delay", &pdata->detect_delay_ms);
+ if (of_property_read_u32(np, "qos-dvfs-level", &pdata->qos_dvfs_level))
+ host->qos_cntrl = false;
+ else
+ host->qos_cntrl = true;
+ if (!of_property_read_u32(np, "ssc-rate", &pdata->ssc_rate)) {
+ pdata->quirks |= DW_MCI_QUIRK_USE_SSC;
+ }
+ of_property_read_u32(np, "data-timeout", &pdata->data_timeout);
+ of_property_read_u32(np, "hto-timeout", &pdata->hto_timeout);
+ of_property_read_u32(np, "desc-size", &pdata->desc_sz);
device_property_read_u32(dev, "data-addr", &host->data_addr_override);
if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
pdata->bus_hz = clock_frequency;
+ if (of_find_property(np, "only_once_tune", NULL))
+ pdata->only_once_tune = true;
+
if (drv_data && drv_data->parse_dt) {
ret = drv_data->parse_dt(host);
if (ret)
return ERR_PTR(ret);
}
+ if (of_find_property(np, "supports-highspeed", NULL)) {
+ dev_info(dev, "supports-highspeed property is deprecated.\n");
+ pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
+ }
+
+ if (of_find_property(np, "clock-gate", NULL))
+ pdata->use_gate_clock = true;
+
+ if (of_find_property(np, "card-detect-invert", NULL))
+ pdata->use_gpio_invert = true;
+
+ /* caps */
+
+ if (of_find_property(np, "supports-8bit", NULL))
+ pdata->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (of_find_property(np, "supports-4bit", NULL))
+ pdata->caps |= MMC_CAP_4_BIT_DATA;
+
+ if (of_find_property(np, "supports-cmd23", NULL))
+ pdata->caps |= MMC_CAP_CMD23;
+
+ if (of_find_property(np, "supports-erase", NULL))
+ pdata->caps |= MMC_CAP_ERASE;
+
+ if (of_find_property(np, "pm-skip-mmc-resume-init", NULL))
+ pdata->pm_caps |= MMC_PM_SKIP_MMC_RESUME_INIT;
+ if (of_find_property(np, "card-detect-invert-gpio", NULL))
+ pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ if (of_find_property(np, "card-detect-gpio", NULL))
+ pdata->cd_type = DW_MCI_CD_GPIO;
+
return pdata;
}
-#else /* CONFIG_OF */
+#else /* CONFIG_OF */
static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
{
return ERR_PTR(-EINVAL);
}
-#endif /* CONFIG_OF */
+#endif /* CONFIG_OF */
static void dw_mci_enable_cd(struct dw_mci *host)
{
if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
spin_lock_irqsave(&host->irq_lock, irqflags);
temp = mci_readl(host, INTMASK);
- temp |= SDMMC_INT_CD;
+ temp |= SDMMC_INT_CD;
mci_writel(host, INTMASK, temp);
spin_unlock_irqrestore(&host->irq_lock, irqflags);
}
{
const struct dw_mci_drv_data *drv_data = host->drv_data;
int width, i, ret = 0;
- u32 fifo_size;
+ u32 fifo_size, msize, tx_wmark, rx_wmark;
if (!host->pdata) {
host->pdata = dw_mci_parse_dt(host);
ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
if (ret)
dev_warn(host->dev,
- "Unable to set bus rate to %uHz\n",
- host->pdata->bus_hz);
+ "Unable to set bus rate to %uHz\n", host->pdata->bus_hz);
}
host->bus_hz = clk_get_rate(host->ciu_clk);
}
if (!host->bus_hz) {
- dev_err(host->dev,
- "Platform data must supply bus speed\n");
+ dev_err(host->dev, "Platform data must supply bus speed\n");
ret = -ENODEV;
goto err_clk_ciu;
}
if (drv_data && drv_data->init) {
ret = drv_data->init(host);
if (ret) {
- dev_err(host->dev,
- "implementation specific init failed\n");
+ dev_err(host->dev, "implementation specific init failed\n");
goto err_clk_ciu;
}
}
- setup_timer(&host->cmd11_timer,
- dw_mci_cmd11_timer, (unsigned long)host);
+ host->quirks = host->pdata->quirks;
+
+#ifdef CONFIG_CPU_IDLE
+ host->idle_ip_index = exynos_get_idle_ip_index(dev_name(host->dev));
+ exynos_update_ip_idle_status(host->idle_ip_index, 0);
+#endif
+ if (host->quirks & DW_MCI_QUIRK_HWACG_CTRL) {
+ if (drv_data && drv_data->hwacg_control)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_EN);
+ } else {
+ if (drv_data && drv_data->hwacg_control)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_DIS);
+ }
+
+ if (drv_data && drv_data->access_control_get_dev) {
+ ret = drv_data->access_control_get_dev(host);
+ if (ret == -EPROBE_DEFER)
+ dev_err(host->dev, "%s: Access control device not probed yet.(%d)\n",
+ __func__, ret);
+ else if (ret)
+ dev_err(host->dev, "%s, Fail to get Access control device.(%d)\n",
+ __func__, ret);
+ }
+
+ if (drv_data && drv_data->access_control_sec_cfg) {
+ ret = drv_data->access_control_sec_cfg(host);
+ if (ret)
+ dev_err(host->dev, "%s: Fail to control security config.(%x)\n",
+ __func__, ret);
+ }
+
+ if (drv_data && drv_data->access_control_init) {
+ ret = drv_data->access_control_init(host);
+ if (ret)
+ dev_err(host->dev, "%s: Fail to initialize access control.(%d)\n",
+ __func__, ret);
+ }
+
+ setup_timer(&host->cmd11_timer, dw_mci_cmd11_timer, (unsigned long)host);
- setup_timer(&host->cto_timer,
- dw_mci_cto_timer, (unsigned long)host);
+ setup_timer(&host->cto_timer, dw_mci_cto_timer, (unsigned long)host);
- setup_timer(&host->dto_timer,
- dw_mci_dto_timer, (unsigned long)host);
+ setup_timer(&host->dto_timer, dw_mci_dto_timer, (unsigned long)host);
spin_lock_init(&host->lock);
spin_lock_init(&host->irq_lock);
} else {
/* Check for a reserved value, and warn if it is */
WARN((i != 1),
- "HCON reports a reserved host data width!\n"
- "Defaulting to 32-bit access.\n");
+ "HCON reports a reserved host data width!\n" "Defaulting to 32-bit access.\n");
host->push_data = dw_mci_push_data32;
host->pull_data = dw_mci_pull_data32;
width = 32;
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
- mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
fifo_size = host->pdata->fifo_depth;
}
host->fifo_depth = fifo_size;
- host->fifoth_val =
- SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
+
+ WARN_ON(fifo_size < 8);
+
+ /*
+ * HCON[9:7] -> H_DATA_WIDTH
+ * 000 16 bits
+ * 001 32 bits
+ * 010 64 bits
+ *
+ * FIFOTH[30:28] -> DW_DMA_Mutiple_Transaction_Size
+ * msize:
+ * 000 1 transfers
+ * 001 4
+ * 010 8
+ * 011 16
+ * 100 32
+ * 101 64
+ * 110 128
+ * 111 256
+ *
+ * AHB Master can support 1/4/8/16 burst in DMA.
+ * So, Max support burst spec is 16 burst.
+ *
+ * msize <= 011(16 burst)
+ * Transaction_Size = msize * H_DATA_WIDTH;
+ * rx_wmark = Transaction_Size - 1;
+ * tx_wmark = fifo_size - Transaction_Size;
+ */
+ msize = host->data_shift;
+ msize &= 7;
+ rx_wmark = ((1 << (msize + 1)) - 1) & 0xfff;
+ tx_wmark = (fifo_size - (1 << (msize + 1))) & 0xfff;
+
+ host->fifoth_val = msize << SDMMC_FIFOTH_DMA_MULTI_TRANS_SIZE;
+ host->fifoth_val |= (rx_wmark << SDMMC_FIFOTH_RX_WMARK) | tx_wmark;
+
mci_writel(host, FIFOTH, host->fifoth_val);
+ dev_info(host->dev, "FIFOTH: 0x %08x", mci_readl(host, FIFOTH));
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
host->fifo_reg = host->regs + DATA_240A_OFFSET;
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
+
+ host->card_workqueue = alloc_workqueue("dw-mci-card", WQ_MEM_RECLAIM, 1);
+ if (!host->card_workqueue) {
+ ret = -ENOMEM;
+ goto err_dmaunmap;
+ }
+ INIT_WORK(&host->card_work, dw_mci_work_routine_card);
+
+ /* INT min lock */
+ pm_workqueue = create_freezable_workqueue("dw_mci_clk_ctrl");
+ if (!pm_workqueue)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&host->qos_work, dw_mci_qos_work);
+ pm_qos_add_request(&host->pm_qos_lock, PM_QOS_DEVICE_THROUGHPUT, 0);
+
ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
host->irq_flags, "dw-mci", host);
+
+ setup_timer(&host->timer, dw_mci_timeout_timer, (unsigned long)host);
+ host->sw_timeout_chk = false;
+
if (ret)
- goto err_dmaunmap;
+ goto err_workqueue;
+
+ host->pdata->tuned = false;
+
+ if (host->pdata->num_slots)
+ host->num_slots = host->pdata->num_slots;
+ else
+ host->num_slots = 1;
+
+ if (host->num_slots < 1 || host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
+ dev_err(host->dev, "Platform data must supply correct num_slots.\n");
+ ret = -ENODEV;
+ goto err_clk_ciu;
+ }
/*
* Enable interrupts for command done, data over, data empty,
* receive ready and error such as transmit, receive timeout, crc error
*/
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
- SDMMC_INT_TXDR | SDMMC_INT_RXDR |
- DW_MCI_ERROR_FLAGS);
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS);
/* Enable mci interrupt */
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
/* We need at least one slot to succeed */
ret = dw_mci_init_slot(host);
if (ret) {
- dev_dbg(host->dev, "slot %d init failed\n", i);
+ dev_dbg(host->dev, "slot init failed\n");
goto err_dmaunmap;
}
+ dw_mci_debug_init(host);
+
+ if (drv_data && drv_data->misc_control) {
+ if (host->pdata->cd_type == DW_MCI_CD_GPIO)
+ drv_data->misc_control(host, CTRL_REQUEST_EXT_IRQ, dw_mci_detect_interrupt);
+ }
+
+ if (host->pdata->cd_type == DW_MCI_CD_INTERNAL) {
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+ }
+#ifdef CONFIG_CPU_IDLE
+ exynos_update_ip_idle_status(host->idle_ip_index, 1);
+#endif
/* Now that slots are all setup, we can enable card detect */
dw_mci_enable_cd(host);
return 0;
-err_dmaunmap:
+ err_workqueue:
+ destroy_workqueue(host->card_workqueue);
+ destroy_workqueue(pm_workqueue);
+ pm_qos_remove_request(&host->pm_qos_lock);
+
+ err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
if (!IS_ERR(host->pdata->rstc))
reset_control_assert(host->pdata->rstc);
-err_clk_ciu:
+ err_clk_ciu:
clk_disable_unprepare(host->ciu_clk);
-err_clk_biu:
+ err_clk_biu:
clk_disable_unprepare(host->biu_clk);
+#ifdef CONFIG_CPU_IDLE
+ exynos_update_ip_idle_status(host->idle_ip_index, 1);
+#endif
return ret;
}
+
EXPORT_SYMBOL(dw_mci_probe);
void dw_mci_remove(struct dw_mci *host)
{
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
+
dev_dbg(host->dev, "remove slot\n");
if (host->slot)
dw_mci_cleanup_slot(host->slot);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
- mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+ mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
+ del_timer_sync(&host->timer);
+ destroy_workqueue(host->card_workqueue);
+ destroy_workqueue(pm_workqueue);
+ pm_qos_remove_request(&host->pm_qos_lock);
+
+ if (host->pdata->quirks & DW_MCI_QUIRK_USE_SSC) {
+ if (drv_data && drv_data->ssclk_control)
+ drv_data->ssclk_control(host, 0);
+ }
+
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
clk_disable_unprepare(host->ciu_clk);
clk_disable_unprepare(host->biu_clk);
}
-EXPORT_SYMBOL(dw_mci_remove);
-
+EXPORT_SYMBOL(dw_mci_remove);
#ifdef CONFIG_PM
int dw_mci_runtime_suspend(struct device *dev)
clk_disable_unprepare(host->ciu_clk);
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
- !mmc_card_is_removable(host->slot->mmc)))
+ (mmc_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
return 0;
}
+
EXPORT_SYMBOL(dw_mci_runtime_suspend);
int dw_mci_runtime_resume(struct device *dev)
{
int ret = 0;
struct dw_mci *host = dev_get_drvdata(dev);
+ const struct dw_mci_drv_data *drv_data = host->drv_data;
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
- !mmc_card_is_removable(host->slot->mmc))) {
+ (mmc_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc))) {
ret = clk_prepare_enable(host->biu_clk);
if (ret)
return ret;
if (host->use_dma && host->dma_ops->init)
host->dma_ops->init(host);
+ if (host->quirks & DW_MCI_QUIRK_HWACG_CTRL) {
+ if (drv_data && drv_data->hwacg_control)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_EN);
+ } else {
+ if (drv_data && drv_data->hwacg_control)
+ drv_data->hwacg_control(host, HWACG_Q_ACTIVE_DIS);
+ }
+
+ if (drv_data && drv_data->access_control_sec_cfg) {
+ ret = drv_data->access_control_sec_cfg(host);
+ if (ret)
+ dev_err(host->dev, "%s: Fail to control security config.(%x)\n",
+ __func__, ret);
+ }
+
+ if (drv_data && drv_data->access_control_resume) {
+ ret = drv_data->access_control_resume(host);
+ if (ret)
+ dev_err(host->dev, "%s: Fail to resume access control.(%d)\n",
+ __func__, ret);
+ }
+
/*
* Restore the initial value at FIFOTH register
* And Invalidate the prev_blksz with zero
*/
- mci_writel(host, FIFOTH, host->fifoth_val);
- host->prev_blksz = 0;
+ mci_writel(host, FIFOTH, host->fifoth_val);
+ host->prev_blksz = 0;
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
- SDMMC_INT_TXDR | SDMMC_INT_RXDR |
- DW_MCI_ERROR_FLAGS);
+ SDMMC_INT_TXDR | SDMMC_INT_RXDR | DW_MCI_ERROR_FLAGS);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
-
- if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
+ if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER
+ || host->slot->mmc->pm_caps & MMC_PM_SKIP_MMC_RESUME_INIT) {
dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
- /* Force setup bus to guarantee available clock output */
- dw_mci_setup_bus(host->slot, true);
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(host->slot, true);
+ }
- /* Now that slots are all setup, we can enable card detect */
- dw_mci_enable_cd(host);
+ if (host->pdata->cd_type == DW_MCI_CD_INTERNAL) {
+ /* Now that slots are all setup, we can enable card detect */
+ dw_mci_enable_cd(host);
+ }
return 0;
-err:
+ err:
if (host->slot &&
- (mmc_can_gpio_cd(host->slot->mmc) ||
- !mmc_card_is_removable(host->slot->mmc)))
+ (mmc_can_gpio_cd(host->slot->mmc) || !mmc_card_is_removable(host->slot->mmc)))
clk_disable_unprepare(host->biu_clk);
return ret;
}
+
EXPORT_SYMBOL(dw_mci_runtime_resume);
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM */
static int __init dw_mci_init(void)
{