defconfig: exynos9610: Re-add dropped Wi-Fi AP options lost
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / mmc / host / dw_mmc-exynos.c
CommitLineData
c3665006
TA
1/*
2 * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
3 *
4 * Copyright (C) 2012, Samsung Electronics Co., Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/clk.h>
15#include <linux/mmc/host.h>
c537a1c5 16#include <linux/mmc/mmc.h>
c3665006
TA
17#include <linux/of.h>
18#include <linux/of_gpio.h>
cf5237ef 19#include <linux/pm_runtime.h>
c537a1c5 20#include <linux/slab.h>
e5cc0c74
JJ
21#include <linux/pinctrl/pinctrl.h>
22#include <linux/pinctrl/pinconf.h>
23#include <linux/smc.h>
c3665006
TA
24
25#include "dw_mmc.h"
26#include "dw_mmc-pltfm.h"
0b5fce48 27#include "dw_mmc-exynos.h"
c6d9deda 28
e5cc0c74
JJ
29extern int cal_pll_mmc_set_ssc(unsigned int mfr, unsigned int mrr, unsigned int ssc_on);
30extern int cal_pll_mmc_check(void);
31
32static void dw_mci_exynos_register_dump(struct dw_mci *host)
33{
34 dev_err(host->dev, ": EMMCP_BASE: 0x%08x\n",
35 host->sfr_dump->fmp_emmcp_base = mci_readl(host, EMMCP_BASE));
36 dev_err(host->dev, ": MPSECURITY: 0x%08x\n",
37 host->sfr_dump->mpsecurity = mci_readl(host, MPSECURITY));
38 dev_err(host->dev, ": MPSTAT: 0x%08x\n",
39 host->sfr_dump->mpstat = mci_readl(host, MPSTAT));
40 dev_err(host->dev, ": MPSBEGIN: 0x%08x\n",
41 host->sfr_dump->mpsbegin = mci_readl(host, MPSBEGIN0));
42 dev_err(host->dev, ": MPSEND: 0x%08x\n",
43 host->sfr_dump->mpsend = mci_readl(host, MPSEND0));
44 dev_err(host->dev, ": MPSCTRL: 0x%08x\n",
45 host->sfr_dump->mpsctrl = mci_readl(host, MPSCTRL0));
46 dev_err(host->dev, ": HS400_DQS_EN: 0x%08x\n",
47 host->sfr_dump->hs400_rdqs_en = mci_readl(host, HS400_DQS_EN));
48 dev_err(host->dev, ": HS400_ASYNC_FIFO_CTRL: 0x%08x\n",
49 host->sfr_dump->hs400_acync_fifo_ctrl = mci_readl(host, HS400_ASYNC_FIFO_CTRL));
50 dev_err(host->dev, ": HS400_DLINE_CTRL: 0x%08x\n",
51 host->sfr_dump->hs400_dline_ctrl = mci_readl(host, HS400_DLINE_CTRL));
52}
53
54void dw_mci_reg_dump(struct dw_mci *host)
55{
56 u32 reg;
57
58 dev_err(host->dev, ": ============== REGISTER DUMP ==============\n");
59 dev_err(host->dev, ": CTRL: 0x%08x\n", host->sfr_dump->contrl = mci_readl(host, CTRL));
60 dev_err(host->dev, ": PWREN: 0x%08x\n", host->sfr_dump->pwren = mci_readl(host, PWREN));
61 dev_err(host->dev, ": CLKDIV: 0x%08x\n",
62 host->sfr_dump->clkdiv = mci_readl(host, CLKDIV));
63 dev_err(host->dev, ": CLKSRC: 0x%08x\n",
64 host->sfr_dump->clksrc = mci_readl(host, CLKSRC));
65 dev_err(host->dev, ": CLKENA: 0x%08x\n",
66 host->sfr_dump->clkena = mci_readl(host, CLKENA));
67 dev_err(host->dev, ": TMOUT: 0x%08x\n", host->sfr_dump->tmout = mci_readl(host, TMOUT));
68 dev_err(host->dev, ": CTYPE: 0x%08x\n", host->sfr_dump->ctype = mci_readl(host, CTYPE));
69 dev_err(host->dev, ": BLKSIZ: 0x%08x\n",
70 host->sfr_dump->blksiz = mci_readl(host, BLKSIZ));
71 dev_err(host->dev, ": BYTCNT: 0x%08x\n",
72 host->sfr_dump->bytcnt = mci_readl(host, BYTCNT));
73 dev_err(host->dev, ": INTMSK: 0x%08x\n",
74 host->sfr_dump->intmask = mci_readl(host, INTMASK));
75 dev_err(host->dev, ": CMDARG: 0x%08x\n",
76 host->sfr_dump->cmdarg = mci_readl(host, CMDARG));
77 dev_err(host->dev, ": CMD: 0x%08x\n", host->sfr_dump->cmd = mci_readl(host, CMD));
78 dev_err(host->dev, ": RESP0: 0x%08x\n", mci_readl(host, RESP0));
79 dev_err(host->dev, ": RESP1: 0x%08x\n", mci_readl(host, RESP1));
80 dev_err(host->dev, ": RESP2: 0x%08x\n", mci_readl(host, RESP2));
81 dev_err(host->dev, ": RESP3: 0x%08x\n", mci_readl(host, RESP3));
82 dev_err(host->dev, ": MINTSTS: 0x%08x\n",
83 host->sfr_dump->mintsts = mci_readl(host, MINTSTS));
84 dev_err(host->dev, ": RINTSTS: 0x%08x\n",
85 host->sfr_dump->rintsts = mci_readl(host, RINTSTS));
86 dev_err(host->dev, ": STATUS: 0x%08x\n",
87 host->sfr_dump->status = mci_readl(host, STATUS));
88 dev_err(host->dev, ": FIFOTH: 0x%08x\n",
89 host->sfr_dump->fifoth = mci_readl(host, FIFOTH));
90 dev_err(host->dev, ": CDETECT: 0x%08x\n", mci_readl(host, CDETECT));
91 dev_err(host->dev, ": WRTPRT: 0x%08x\n", mci_readl(host, WRTPRT));
92 dev_err(host->dev, ": GPIO: 0x%08x\n", mci_readl(host, GPIO));
93 dev_err(host->dev, ": TCBCNT: 0x%08x\n",
94 host->sfr_dump->tcbcnt = mci_readl(host, TCBCNT));
95 dev_err(host->dev, ": TBBCNT: 0x%08x\n",
96 host->sfr_dump->tbbcnt = mci_readl(host, TBBCNT));
97 dev_err(host->dev, ": DEBNCE: 0x%08x\n", mci_readl(host, DEBNCE));
98 dev_err(host->dev, ": USRID: 0x%08x\n", mci_readl(host, USRID));
99 dev_err(host->dev, ": VERID: 0x%08x\n", mci_readl(host, VERID));
100 dev_err(host->dev, ": HCON: 0x%08x\n", mci_readl(host, HCON));
101 dev_err(host->dev, ": UHS_REG: 0x%08x\n",
102 host->sfr_dump->uhs_reg = mci_readl(host, UHS_REG));
103 dev_err(host->dev, ": BMOD: 0x%08x\n", host->sfr_dump->bmod = mci_readl(host, BMOD));
104 dev_err(host->dev, ": PLDMND: 0x%08x\n", mci_readl(host, PLDMND));
105 dev_err(host->dev, ": DBADDRL: 0x%08x\n",
106 host->sfr_dump->dbaddrl = mci_readl(host, DBADDRL));
107 dev_err(host->dev, ": DBADDRU: 0x%08x\n",
108 host->sfr_dump->dbaddru = mci_readl(host, DBADDRU));
109 dev_err(host->dev, ": DSCADDRL: 0x%08x\n",
110 host->sfr_dump->dscaddrl = mci_readl(host, DSCADDRL));
111 dev_err(host->dev, ": DSCADDRU: 0x%08x\n",
112 host->sfr_dump->dscaddru = mci_readl(host, DSCADDRU));
113 dev_err(host->dev, ": BUFADDR: 0x%08x\n",
114 host->sfr_dump->bufaddr = mci_readl(host, BUFADDR));
115 dev_err(host->dev, ": BUFADDRU: 0x%08x\n",
116 host->sfr_dump->bufaddru = mci_readl(host, BUFADDRU));
117 dev_err(host->dev, ": DBADDR: 0x%08x\n",
118 host->sfr_dump->dbaddr = mci_readl(host, DBADDR));
119 dev_err(host->dev, ": DSCADDR: 0x%08x\n",
120 host->sfr_dump->dscaddr = mci_readl(host, DSCADDR));
121 dev_err(host->dev, ": BUFADDR: 0x%08x\n",
122 host->sfr_dump->bufaddr = mci_readl(host, BUFADDR));
123 dev_err(host->dev, ": CLKSEL: 0x%08x\n",
124 host->sfr_dump->clksel = mci_readl(host, CLKSEL));
125 dev_err(host->dev, ": IDSTS: 0x%08x\n", mci_readl(host, IDSTS));
126 dev_err(host->dev, ": IDSTS64: 0x%08x\n",
127 host->sfr_dump->idsts64 = mci_readl(host, IDSTS64));
128 dev_err(host->dev, ": IDINTEN: 0x%08x\n", mci_readl(host, IDINTEN));
129 dev_err(host->dev, ": IDINTEN64: 0x%08x\n",
130 host->sfr_dump->idinten64 = mci_readl(host, IDINTEN64));
131 dev_err(host->dev, ": RESP_TAT: 0x%08x\n", mci_readl(host, RESP_TAT));
132 dev_err(host->dev, ": FORCE_CLK_STOP: 0x%08x\n",
133 host->sfr_dump->force_clk_stop = mci_readl(host, FORCE_CLK_STOP));
134 dev_err(host->dev, ": CDTHRCTL: 0x%08x\n", mci_readl(host, CDTHRCTL));
135 dw_mci_exynos_register_dump(host);
136 dev_err(host->dev, ": ============== STATUS DUMP ================\n");
137 dev_err(host->dev, ": cmd_status: 0x%08x\n",
138 host->sfr_dump->cmd_status = host->cmd_status);
139 dev_err(host->dev, ": data_status: 0x%08x\n",
140 host->sfr_dump->force_clk_stop = host->data_status);
141 dev_err(host->dev, ": pending_events: 0x%08lx\n",
142 host->sfr_dump->pending_events = host->pending_events);
143 dev_err(host->dev, ": completed_events:0x%08lx\n",
144 host->sfr_dump->completed_events = host->completed_events);
145 dev_err(host->dev, ": state: %d\n", host->sfr_dump->host_state = host->state);
146 dev_err(host->dev, ": gate-clk: %s\n",
147 atomic_read(&host->ciu_clk_cnt) ? "enable" : "disable");
148 dev_err(host->dev, ": ciu_en_win: %d\n", atomic_read(&host->ciu_en_win));
149 reg = mci_readl(host, CMD);
150 dev_err(host->dev, ": ================= CMD REG =================\n");
151 if ((reg >> 9) & 0x1) {
152 dev_err(host->dev, ": read/write : %s\n",
153 (reg & (0x1 << 10)) ? "write" : "read");
154 dev_err(host->dev, ": data expected : %d\n", (reg >> 9) & 0x1);
155 }
156 dev_err(host->dev, ": cmd index : %d\n",
157 host->sfr_dump->cmd_index = ((reg >> 0) & 0x3f));
158 reg = mci_readl(host, STATUS);
159 dev_err(host->dev, ": ================ STATUS REG ===============\n");
160 dev_err(host->dev, ": fifocount : %d\n",
161 host->sfr_dump->fifo_count = ((reg >> 17) & 0x1fff));
162 dev_err(host->dev, ": response index : %d\n", (reg >> 11) & 0x3f);
163 dev_err(host->dev, ": data state mc busy: %d\n", (reg >> 10) & 0x1);
164 dev_err(host->dev, ": data busy : %d\n",
165 host->sfr_dump->data_busy = ((reg >> 9) & 0x1));
166 dev_err(host->dev, ": data 3 state : %d\n",
167 host->sfr_dump->data_3_state = ((reg >> 8) & 0x1));
168 dev_err(host->dev, ": command fsm state : %d\n", (reg >> 4) & 0xf);
169 dev_err(host->dev, ": fifo full : %d\n", (reg >> 3) & 0x1);
170 dev_err(host->dev, ": fifo empty : %d\n", (reg >> 2) & 0x1);
171 dev_err(host->dev, ": fifo tx watermark : %d\n",
172 host->sfr_dump->fifo_tx_watermark = ((reg >> 1) & 0x1));
173 dev_err(host->dev, ": fifo rx watermark : %d\n",
174 host->sfr_dump->fifo_rx_watermark = ((reg >> 0) & 0x1));
175 dev_err(host->dev, ": ===========================================\n");
176}
177
c3665006
TA
178/* Variations in Exynos specific dw-mshc controller */
179enum dw_mci_exynos_type {
e5cc0c74 180 DW_MCI_TYPE_EXYNOS,
c3665006
TA
181};
182
c3665006 183static struct dw_mci_exynos_compatible {
e5cc0c74
JJ
184 char *compatible;
185 enum dw_mci_exynos_type ctrl_type;
c3665006
TA
186} exynos_compat[] = {
187 {
e5cc0c74 188.compatible = "samsung,exynos-dw-mshc", .ctrl_type = DW_MCI_TYPE_EXYNOS,},};
c3665006 189
80113132
SJ
190static inline u8 dw_mci_exynos_get_ciu_div(struct dw_mci *host)
191{
e5cc0c74 192 return SDMMC_CLKSEL_GET_DIV(mci_readl(host, CLKSEL)) + 1;
80113132
SJ
193}
194
e5cc0c74 195static int dw_mci_exynos_priv_init(struct dw_mci *host)
c3665006 196{
e6c784ed 197 struct dw_mci_exynos_priv_data *priv = host->priv;
c3665006 198
e5cc0c74
JJ
199 priv->saved_strobe_ctrl = mci_readl(host, HS400_DLINE_CTRL);
200 priv->saved_dqs_en = mci_readl(host, HS400_DQS_EN);
201 priv->saved_dqs_en |= AXI_NON_BLOCKING_WR;
202 mci_writel(host, HS400_DQS_EN, priv->saved_dqs_en);
203 if (!priv->dqs_delay)
204 priv->dqs_delay = DQS_CTRL_GET_RD_DELAY(priv->saved_strobe_ctrl);
205#if defined(CONFIG_MMC_DW_64BIT_DESC)
206 if (priv->voltage_int_extra != 0) {
207 u32 reg = 0;
208
209 reg = mci_readl(host, AXI_BURST_LEN);
210 reg &= ~(0x7 << 24);
211 reg |= (priv->voltage_int_extra << 24);
212 mci_writel(host, AXI_BURST_LEN, reg);
6bce431c 213 }
e5cc0c74
JJ
214#endif
215 return 0;
5659eead
JC
216}
217
e5cc0c74 218static void dw_mci_ssclk_control(struct dw_mci *host, int enable)
5659eead 219{
e5cc0c74
JJ
220#if 0
221 if (host->pdata->quirks & DW_MCI_QUIRK_USE_SSC) {
222 u32 err;
223
224 if (enable && cal_pll_mmc_check() == false) {
225 if (host->pdata->ssc_rate > 8) {
226 dev_info(host->dev, "unvalid SSC rate value.\n");
227 } else {
228 err = cal_pll_mmc_set_ssc(12, host->pdata->ssc_rate, 1);
229 if (err)
230 dev_info(host->dev, "SSC set fail.\n");
231 else
232 dev_info(host->dev, "SSC set enable.\n");
233 }
234 } else if (!enable && cal_pll_mmc_check() == true) {
235 err = cal_pll_mmc_set_ssc(0, 0, 0);
236 if (err)
237 dev_info(host->dev, "SSC set fail.\n");
238 else
239 dev_info(host->dev, "SSC set disable.\n");
240 }
80113132 241 }
e5cc0c74 242#endif
c3665006
TA
243}
244
80113132
SJ
245static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
246{
80113132
SJ
247 u32 clksel;
248
e5cc0c74 249 clksel = mci_readl(host, CLKSEL);
80113132
SJ
250 clksel = (clksel & ~SDMMC_CLKSEL_TIMING_MASK) | timing;
251
e5cc0c74
JJ
252 if (!((host->pdata->io_mode == MMC_TIMING_MMC_HS400) ||
253 (host->pdata->io_mode == MMC_TIMING_MMC_HS400_ES)))
254 clksel &= ~(BIT(30) | BIT(19));
aaaaeb7a 255
e5cc0c74 256 mci_writel(host, CLKSEL, clksel);
80113132
SJ
257}
258
cf5237ef
SL
259#ifdef CONFIG_PM
260static int dw_mci_exynos_runtime_resume(struct device *dev)
e2c63599 261{
e5cc0c74 262 return dw_mci_runtime_resume(dev);
e2c63599
DA
263}
264
265/**
266 * dw_mci_exynos_resume_noirq - Exynos-specific resume code
267 *
268 * On exynos5420 there is a silicon errata that will sometimes leave the
269 * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
270 * that it fired and we can clear it by writing a 1 back. Clear it to prevent
271 * interrupts from going off constantly.
272 *
273 * We run this code on all exynos variants because it doesn't hurt.
274 */
275
276static int dw_mci_exynos_resume_noirq(struct device *dev)
277{
278 struct dw_mci *host = dev_get_drvdata(dev);
279 u32 clksel;
280
e5cc0c74 281 clksel = mci_readl(host, CLKSEL);
89ad2be7 282
e5cc0c74
JJ
283 if (clksel & SDMMC_CLKSEL_WAKEUP_INT)
284 mci_writel(host, CLKSEL, clksel);
e2c63599
DA
285
286 return 0;
287}
288#else
e2c63599 289#define dw_mci_exynos_resume_noirq NULL
e5cc0c74
JJ
290#endif /* CONFIG_PM */
291
292static void dw_mci_card_int_hwacg_ctrl(struct dw_mci *host, u32 flag)
293{
294 u32 reg;
295
296 reg = mci_readl(host, FORCE_CLK_STOP);
297 if (flag == HWACG_Q_ACTIVE_EN) {
298 reg |= MMC_HWACG_CONTROL;
299 host->qactive_check = HWACG_Q_ACTIVE_EN;
300 } else {
301 reg &= ~(MMC_HWACG_CONTROL);
302 host->qactive_check = HWACG_Q_ACTIVE_DIS;
303 }
304 mci_writel(host, FORCE_CLK_STOP, reg);
305}
e2c63599 306
80113132 307static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
c3665006
TA
308{
309 struct dw_mci_exynos_priv_data *priv = host->priv;
80113132 310 u32 dqs, strobe;
c3665006 311
80113132
SJ
312 /*
313 * Not supported to configure register
314 * related to HS400
315 */
80113132
SJ
316
317 dqs = priv->saved_dqs_en;
318 strobe = priv->saved_strobe_ctrl;
319
e5cc0c74 320 if (timing == MMC_TIMING_MMC_HS400 || timing == MMC_TIMING_MMC_HS400_ES) {
53c8297e 321 dqs &= ~(DWMCI_TXDT_CRC_TIMER_SET(0xFF, 0xFF));
e5cc0c74
JJ
322 dqs |= (DWMCI_TXDT_CRC_TIMER_SET(priv->hs400_tx_t_fastlimit,
323 priv->hs400_tx_t_initval) | DWMCI_RDDQS_EN |
324 DWMCI_AXI_NON_BLOCKING_WRITE);
53c8297e 325 if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP) {
326 if (priv->delay_line || priv->tx_delay_line)
327 strobe = DWMCI_WD_DQS_DELAY_CTRL(priv->tx_delay_line) |
e5cc0c74
JJ
328 DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
329 DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
53c8297e 330 else
331 strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
e5cc0c74 332 DWMCI_RD_DQS_DELAY_CTRL(90);
53c8297e 333 } else {
334 if (priv->delay_line)
335 strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
e5cc0c74 336 DWMCI_RD_DQS_DELAY_CTRL(priv->delay_line);
53c8297e 337 else
338 strobe = DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
e5cc0c74 339 DWMCI_RD_DQS_DELAY_CTRL(90);
53c8297e 340 }
341 dqs |= (DATA_STROBE_EN | DWMCI_AXI_NON_BLOCKING_WRITE);
342 if (timing == MMC_TIMING_MMC_HS400_ES)
343 dqs |= DWMCI_RESP_RCLK_MODE;
c6d9deda 344 } else {
80113132 345 dqs &= ~DATA_STROBE_EN;
c6d9deda
SJ
346 }
347
80113132
SJ
348 mci_writel(host, HS400_DQS_EN, dqs);
349 mci_writel(host, HS400_DLINE_CTRL, strobe);
350}
351
352static void dw_mci_exynos_adjust_clock(struct dw_mci *host, unsigned int wanted)
353{
354 struct dw_mci_exynos_priv_data *priv = host->priv;
355 unsigned long actual;
356 u8 div;
357 int ret;
e5cc0c74
JJ
358 u32 clock;
359
a2a1fed8
SJ
360 /*
361 * Don't care if wanted clock is zero or
362 * ciu clock is unavailable
363 */
364 if (!wanted || IS_ERR(host->ciu_clk))
c6d9deda
SJ
365 return;
366
367 /* Guaranteed minimum frequency for cclkin */
368 if (wanted < EXYNOS_CCLKIN_MIN)
369 wanted = EXYNOS_CCLKIN_MIN;
370
80113132 371 div = dw_mci_exynos_get_ciu_div(host);
e5cc0c74
JJ
372
373 if (wanted == priv->cur_speed) {
374 clock = clk_get_rate(host->ciu_clk);
375 if (clock == priv->cur_speed * div)
376 return;
377 }
378
80113132
SJ
379 ret = clk_set_rate(host->ciu_clk, wanted * div);
380 if (ret)
e5cc0c74 381 dev_warn(host->dev, "failed to set clk-rate %u error: %d\n", wanted * div, ret);
80113132
SJ
382 actual = clk_get_rate(host->ciu_clk);
383 host->bus_hz = actual / div;
384 priv->cur_speed = wanted;
385 host->current_speed = 0;
386}
387
388static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios)
389{
390 struct dw_mci_exynos_priv_data *priv = host->priv;
391 unsigned int wanted = ios->clock;
e5cc0c74 392 u32 *clk_tbl = priv->ref_clk;
80113132 393 u32 timing = ios->timing, clksel;
e5cc0c74
JJ
394 u32 cclkin;
395
396 cclkin = clk_tbl[timing];
397 host->pdata->io_mode = timing;
398 if (host->bus_hz != cclkin)
399 wanted = cclkin;
80113132
SJ
400
401 switch (timing) {
402 case MMC_TIMING_MMC_HS400:
53c8297e 403 case MMC_TIMING_MMC_HS400_ES:
e5cc0c74
JJ
404 if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP) {
405 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs400_ulp_timing, priv->tuned_sample);
406 clksel |= (BIT(30) | BIT(19)); /* ultra low powermode on */
407 } else {
408 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs400_timing, priv->tuned_sample);
409 clksel &= ~(BIT(30) | BIT(19)); /* ultra low powermode on */
410 wanted <<= 1;
411 }
412 if (host->pdata->is_fine_tuned)
413 clksel |= BIT(6);
80113132
SJ
414 break;
415 case MMC_TIMING_MMC_DDR52:
e5cc0c74 416 case MMC_TIMING_UHS_DDR50:
80113132
SJ
417 clksel = priv->ddr_timing;
418 /* Should be double rate for DDR mode */
419 if (ios->bus_width == MMC_BUS_WIDTH_8)
420 wanted <<= 1;
421 break;
e5cc0c74
JJ
422 case MMC_TIMING_MMC_HS200:
423 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->hs200_timing, priv->tuned_sample);
424 break;
425 case MMC_TIMING_UHS_SDR104:
426 if (priv->sdr104_timing)
427 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr104_timing, priv->tuned_sample);
428 else {
429 dev_info(host->dev, "Setting of SDR104 timing in not been!!\n");
430 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr_timing, priv->tuned_sample);
431 }
432 dw_mci_ssclk_control(host, 1);
433 break;
434 case MMC_TIMING_UHS_SDR50:
435 if (priv->sdr50_timing)
436 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr50_timing, priv->tuned_sample);
437 else {
438 dev_info(host->dev, "Setting of SDR50 timing is not been!!\n");
439 clksel = SDMMC_CLKSEL_UP_SAMPLE(priv->sdr_timing, priv->tuned_sample);
440 }
441 dw_mci_ssclk_control(host, 1);
442 break;
80113132
SJ
443 default:
444 clksel = priv->sdr_timing;
c6d9deda 445 }
80113132 446
e5cc0c74
JJ
447 host->cclk_in = wanted;
448
449 /* Set clock timing for the requested speed mode */
80113132
SJ
450 dw_mci_exynos_set_clksel_timing(host, clksel);
451
452 /* Configure setting for HS400 */
453 dw_mci_exynos_config_hs400(host, timing);
454
455 /* Configure clock rate */
456 dw_mci_exynos_adjust_clock(host, wanted);
c3665006
TA
457}
458
e5cc0c74
JJ
459#ifndef MHZ
460#define MHZ (1000 * 1000)
461#endif
462
c3665006
TA
463static int dw_mci_exynos_parse_dt(struct dw_mci *host)
464{
e6c784ed 465 struct dw_mci_exynos_priv_data *priv;
c3665006 466 struct device_node *np = host->dev->of_node;
53c8297e 467 u32 timing[4];
e5cc0c74 468 u32 div = 0, voltage_int_extra = 0;
e6c784ed 469 int idx;
e5cc0c74
JJ
470 u32 ref_clk_size;
471 u32 *ref_clk;
472 u32 *ciu_clkin_values = NULL;
473 int idx_ref;
474 int ret = 0;
475 int id = 0, i;
c3665006 476
e6c784ed 477 priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
e5cc0c74
JJ
478 if (!priv) {
479 dev_err(host->dev, "mem alloc failed for private data\n");
e6c784ed 480 return -ENOMEM;
e5cc0c74 481 }
e6c784ed
YK
482
483 for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) {
484 if (of_device_is_compatible(np, exynos_compat[idx].compatible))
485 priv->ctrl_type = exynos_compat[idx].ctrl_type;
486 }
487
e5cc0c74
JJ
488 if (of_property_read_u32(np, "num-ref-clks", &ref_clk_size)) {
489 dev_err(host->dev, "Getting a number of referece clock failed\n");
490 ret = -ENODEV;
491 goto err_ref_clk;
492 }
493
494 ref_clk = devm_kzalloc(host->dev, ref_clk_size * sizeof(*ref_clk), GFP_KERNEL);
495 if (!ref_clk) {
496 dev_err(host->dev, "Mem alloc failed for reference clock table\n");
497 ret = -ENOMEM;
498 goto err_ref_clk;
499 }
500
501 ciu_clkin_values = devm_kzalloc(host->dev,
502 ref_clk_size * sizeof(*ciu_clkin_values), GFP_KERNEL);
503
504 if (!ciu_clkin_values) {
505 dev_err(host->dev, "Mem alloc failed for temporary clock values\n");
506 ret = -ENOMEM;
507 goto err_ref_clk;
508 }
509 if (of_property_read_u32_array(np, "ciu_clkin", ciu_clkin_values, ref_clk_size)) {
510 dev_err(host->dev, "Getting ciu_clkin values faild\n");
511 ret = -ENOMEM;
512 goto err_ref_clk;
513 }
514
515 for (idx_ref = 0; idx_ref < ref_clk_size; idx_ref++, ref_clk++, ciu_clkin_values++) {
516 if (*ciu_clkin_values > MHZ)
517 *(ref_clk) = (*ciu_clkin_values);
518 else
519 *(ref_clk) = (*ciu_clkin_values) * MHZ;
c6d9deda 520 }
c3665006 521
e5cc0c74
JJ
522 ref_clk -= ref_clk_size;
523 ciu_clkin_values -= ref_clk_size;
524 priv->ref_clk = ref_clk;
525
526 if (of_get_property(np, "card-detect", NULL))
527 priv->cd_gpio = of_get_named_gpio(np, "card-detect", 0);
528 else
529 priv->cd_gpio = -1;
530
531 /* Swapping clock drive strength */
532 of_property_read_u32(np, "clk-drive-number", &priv->clk_drive_number);
533
534 priv->pinctrl = devm_pinctrl_get(host->dev);
535
536 if (IS_ERR(priv->pinctrl)) {
537 priv->pinctrl = NULL;
538 } else {
539 priv->clk_drive_base = pinctrl_lookup_state(priv->pinctrl, "default");
540 priv->clk_drive_str[0] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-1x");
541 priv->clk_drive_str[1] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-2x");
542 priv->clk_drive_str[2] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-3x");
543 priv->clk_drive_str[3] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-4x");
544 priv->clk_drive_str[4] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-5x");
545 priv->clk_drive_str[5] = pinctrl_lookup_state(priv->pinctrl, "fast-slew-rate-6x");
546
547 for (i = 0; i < 6; i++) {
548 if (IS_ERR(priv->clk_drive_str[i]))
549 priv->clk_drive_str[i] = NULL;
550 }
551 }
552
553 of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div);
554 priv->ciu_div = div;
555
556 if (of_property_read_u32(np, "samsung,voltage-int-extra", &voltage_int_extra))
557 priv->voltage_int_extra = voltage_int_extra;
558
559 ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr-timing", timing, 4);
c3665006
TA
560 if (ret)
561 return ret;
562
53c8297e 563 priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
2d9f0bd1 564
e5cc0c74 565 ret = of_property_read_u32_array(np, "samsung,dw-mshc-ddr-timing", timing, 4);
c3665006
TA
566 if (ret)
567 return ret;
568
53c8297e 569 priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
80113132 570
e5cc0c74
JJ
571 of_property_read_u32(np, "ignore-phase", &priv->ignore_phase);
572 if (of_find_property(np, "bypass-for-allpass", NULL))
573 priv->ctrl_flag |= DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS;
574 if (of_find_property(np, "use-enable-shift", NULL))
575 priv->ctrl_flag |= DW_MMC_EXYNOS_ENABLE_SHIFT;
576
577 id = of_alias_get_id(host->dev->of_node, "mshc");
578 switch (id) {
579 /* dwmmc0 : eMMC */
580 case 0:
581 ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs200-timing", timing, 4);
582 if (ret)
583 goto err_ref_clk;
584 priv->hs200_timing =
585 SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
586
587 ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs400-timing", timing, 4);
588 if (ret)
589 goto err_ref_clk;
590
591 priv->hs400_timing =
592 SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
593
594 ret = of_property_read_u32_array(np, "samsung,dw-mshc-hs400-ulp-timing", timing, 4);
595 if (!ret)
596 priv->hs400_ulp_timing =
597 SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
598 else
599 ret = 0;
600
601 /* Rx Delay Line */
602 of_property_read_u32(np, "samsung,dw-mshc-hs400-delay-line", &priv->delay_line);
80113132 603
e5cc0c74
JJ
604 /* Tx Delay Line */
605 of_property_read_u32(np,
606 "samsung,dw-mshc-hs400-tx-delay-line", &priv->tx_delay_line);
53c8297e 607
e5cc0c74
JJ
608 /* The fast RXCRC packet arrival time */
609 of_property_read_u32(np,
610 "samsung,dw-mshc-txdt-crc-timer-fastlimit",
611 &priv->hs400_tx_t_fastlimit);
612
613 /* Initial value of the timeout down counter for RXCRC packet */
614 of_property_read_u32(np,
615 "samsung,dw-mshc-txdt-crc-timer-initval",
616 &priv->hs400_tx_t_initval);
617 break;
618 /* dwmmc1 : SDIO */
619 case 1:
620 /* dwmmc2 : SD Card */
621 case 2:
622 ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr50-timing", timing, 4); /* SDR50 100Mhz */
623 if (!ret)
624 priv->sdr50_timing =
625 SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
626 else {
627 priv->sdr50_timing = priv->sdr_timing;
628 ret = 0;
629 }
630
631 ret = of_property_read_u32_array(np, "samsung,dw-mshc-sdr104-timing", timing, 4); /* SDR104 200mhz */
632 if (!ret)
633 priv->sdr104_timing =
634 SDMMC_CLKSEL_TIMING(timing[0], timing[1], timing[2], timing[3]);
635 else {
636 priv->sdr104_timing = priv->sdr_timing;
637 ret = 0;
638 }
639 break;
640 default:
641 ret = -ENODEV;
642 }
e6c784ed 643 host->priv = priv;
e5cc0c74
JJ
644 err_ref_clk:
645 return ret;
c3665006
TA
646}
647
c537a1c5
SJ
648static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host)
649{
e5cc0c74 650 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL));
c537a1c5
SJ
651}
652
653static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample)
654{
655 u32 clksel;
89ad2be7 656
e5cc0c74 657 clksel = mci_readl(host, CLKSEL);
80113132 658 clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
e5cc0c74 659 mci_writel(host, CLKSEL, clksel);
c537a1c5
SJ
660}
661
662static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
663{
664 u32 clksel;
665 u8 sample;
666
e5cc0c74
JJ
667 clksel = mci_readl(host, CLKSEL);
668 sample = (clksel + 1) & 0x7;
669 clksel = (clksel & ~0x7) | sample;
670 mci_writel(host, CLKSEL, clksel);
671 return sample;
672}
673
674static void dw_mci_set_quirk_endbit(struct dw_mci *host, s8 mid)
675{
676 u32 clksel, phase;
677 u32 shift;
678
679 clksel = mci_readl(host, CLKSEL);
680 phase = (((clksel >> 24) & 0x7) + 1) << 1;
681 shift = 360 / phase;
682
683 if (host->verid < DW_MMC_260A && (shift * mid) % 360 >= 225)
684 host->quirks |= DW_MCI_QUIRK_NO_DETECT_EBIT;
89ad2be7 685 else
e5cc0c74
JJ
686 host->quirks &= ~DW_MCI_QUIRK_NO_DETECT_EBIT;
687}
688
689static void dw_mci_exynos_set_enable_shift(struct dw_mci *host, u32 sample, bool fine_tune)
690{
691 u32 i, j, en_shift, en_shift_phase[3][4] = { {0, 0, 1, 0},
692 {1, 2, 3, 3},
693 {2, 4, 5, 5}
694 };
695
696 en_shift = mci_readl(host, HS400_ENABLE_SHIFT)
697 & ~(DWMCI_ENABLE_SHIFT_MASK);
698
699 for (i = 0; i < 3; i++) {
700 for (j = 1; j < 4; j++) {
701 if (sample == en_shift_phase[i][j]) {
702 en_shift |= DWMCI_ENABLE_SHIFT(en_shift_phase[i][0]);
703 break;
704 }
705 }
706 }
707 if ((en_shift < 2) && fine_tune)
708 en_shift += 1;
709 mci_writel(host, HS400_ENABLE_SHIFT, en_shift);
710}
80113132 711
e5cc0c74
JJ
712static u8 dw_mci_tuning_sampling(struct dw_mci *host)
713{
714 struct dw_mci_exynos_priv_data *priv = host->priv;
715 u32 clksel, i;
716 u8 sample;
717
718 clksel = mci_readl(host, CLKSEL);
c537a1c5 719 sample = (clksel + 1) & 0x7;
80113132
SJ
720 clksel = SDMMC_CLKSEL_UP_SAMPLE(clksel, sample);
721
e5cc0c74
JJ
722 if (priv->ignore_phase) {
723 for (i = 0; i < 8; i++) {
724 if (priv->ignore_phase & (0x1 << sample))
725 sample = (sample + 1) & 0x7;
726 else
727 break;
728 }
729 }
730 clksel = (clksel & 0xfffffff8) | sample;
731 mci_writel(host, CLKSEL, clksel);
732
733 if (phase6_en & (0x1 << sample) || phase7_en & (0x1 << sample))
734 sample_path_sel_en(host, AXI_BURST_LEN);
89ad2be7 735 else
e5cc0c74
JJ
736 sample_path_sel_dis(host, AXI_BURST_LEN);
737
738 if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT)
739 dw_mci_exynos_set_enable_shift(host, sample, false);
80113132 740
c537a1c5
SJ
741 return sample;
742}
743
e5cc0c74
JJ
744/* initialize the clock sample to given value */
745static void dw_mci_exynos_set_sample(struct dw_mci *host, u32 sample, bool tuning)
c537a1c5 746{
e5cc0c74
JJ
747 struct dw_mci_exynos_priv_data *priv = host->priv;
748 u32 clksel;
c537a1c5 749
e5cc0c74
JJ
750 clksel = mci_readl(host, CLKSEL);
751 clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample);
752 mci_writel(host, CLKSEL, clksel);
753 if (sample == 6 || sample == 7)
754 sample_path_sel_en(host, AXI_BURST_LEN);
755 else
756 sample_path_sel_dis(host, AXI_BURST_LEN);
757
758 if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT)
759 dw_mci_exynos_set_enable_shift(host, sample, false);
760 if (!tuning)
761 dw_mci_set_quirk_endbit(host, clksel);
762}
763
764static void dw_mci_set_fine_tuning_bit(struct dw_mci *host, bool is_fine_tuning)
765{
766 struct dw_mci_exynos_priv_data *priv = host->priv;
767 u32 clksel, sample;
768
769 clksel = mci_readl(host, CLKSEL);
770 clksel = (clksel & ~BIT(6));
771 sample = (clksel & 0x7);
772
773 if (is_fine_tuning) {
774 host->pdata->is_fine_tuned = true;
775 clksel |= BIT(6);
776 } else
777 host->pdata->is_fine_tuned = false;
778 mci_writel(host, CLKSEL, clksel);
779 if (priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT) {
780 if (((sample % 2) == 1) && is_fine_tuning && sample != 0x7)
781 dw_mci_exynos_set_enable_shift(host, sample, true);
782 else
783 dw_mci_exynos_set_enable_shift(host, sample, false);
c537a1c5 784 }
e5cc0c74 785}
c537a1c5 786
e5cc0c74
JJ
787/* read current clock sample offset */
788static u32 dw_mci_exynos_get_sample(struct dw_mci *host)
789{
790 u32 clksel = mci_readl(host, CLKSEL);
791
792 return SDMMC_CLKSEL_CCLK_SAMPLE(clksel);
793}
794
795static int __find_median_of_16bits(u32 orig_bits, u16 mask, u8 startbit)
796{
797 u32 i, testbits;
798
799 testbits = orig_bits;
800 for (i = startbit; i < (16 + startbit); i++, testbits >>= 1)
801 if ((testbits & mask) == mask)
802 return SDMMC_CLKSEL_CCLK_FINE_SAMPLE(i);
803 return -1;
804}
805
806#define NUM_OF_MASK 7
807static int find_median_of_16bits(struct dw_mci *host, unsigned int map, bool force)
808{
809 struct dw_mci_exynos_priv_data *priv = host->priv;
810 u32 orig_bits;
811 u8 i, divratio;
812 int sel = -1;
813 u16 mask[NUM_OF_MASK] = { 0x1fff, 0x7ff, 0x1ff, 0x7f, 0x1f, 0xf, 0x7 };
814 /* Tuning during the center value is set to 3/2 */
815 int optimum[NUM_OF_MASK] = { 9, 7, 6, 5, 3, 2, 1 };
816
817 /* replicate the map so "arithimetic shift right" shifts in
818 * the same bits "again". e.g. portable "Rotate Right" bit operation.
819 */
820 if (map == 0xFFFF && force == false)
821 return sel;
822
823 divratio = (mci_readl(host, CLKSEL) >> 24) & 0x7;
824 dev_info(host->dev, "divratio: %d map: 0x %08x\n", divratio, map);
825
826 orig_bits = map | (map << 16);
827
828 if (divratio == 1) {
829 if (!(priv->ctrl_flag & DW_MMC_EXYNOS_ENABLE_SHIFT))
830 orig_bits = orig_bits & (orig_bits >> 8);
831 }
832
833 for (i = 0; i < NUM_OF_MASK; i++) {
834 sel = __find_median_of_16bits(orig_bits, mask[i], optimum[i]);
835 if (-1 != sel)
836 break;
c537a1c5
SJ
837 }
838
e5cc0c74 839 return sel;
c537a1c5
SJ
840}
841
e5cc0c74
JJ
842static void exynos_dwmci_tuning_drv_st(struct dw_mci *host)
843{
844 struct dw_mci_exynos_priv_data *priv = host->priv;
845
846 dev_info(host->dev, "Clock GPIO Drive Strength Value: x%d\n", (priv->clk_drive_tuning));
847
848 if (priv->pinctrl && priv->clk_drive_str[priv->clk_drive_tuning - 1])
849 pinctrl_select_state(priv->pinctrl,
850 priv->clk_drive_str[priv->clk_drive_tuning - 1]);
851}
852
853/*
854 * Test all 8 possible "Clock in" Sample timings.
855 * Create a bitmap of which CLock sample values work and find the "median"
856 * value. Apply it and remember that we found the best value.
857 */
858static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode,
859 struct dw_mci_tuning_data *tuning_data)
c537a1c5
SJ
860{
861 struct dw_mci *host = slot->host;
80113132 862 struct dw_mci_exynos_priv_data *priv = host->priv;
c537a1c5 863 struct mmc_host *mmc = slot->mmc;
e5cc0c74
JJ
864 unsigned int tuning_loop = MAX_TUNING_LOOP;
865 unsigned int drv_str_retries;
866 bool tuned = 0;
c537a1c5 867 int ret = 0;
e5cc0c74
JJ
868 u8 *tuning_blk; /* data read from device */
869
870 unsigned int sample_good = 0; /* bit map of clock sample (0-7) */
871 u32 test_sample = -1;
872 u32 orig_sample;
873 int best_sample = 0, best_sample_ori = 0;
874 u8 pass_index;
875 bool is_fine_tuning = false;
876 unsigned int abnormal_result = 0xFFFF;
877 unsigned int temp_ignore_phase = priv->ignore_phase;
878 int ffs_ignore_phase = 0;
879 u8 all_pass_count = 0;
880 bool bypass = false;
881
882 while (temp_ignore_phase) {
883 ffs_ignore_phase = ffs(temp_ignore_phase) - 1;
884 abnormal_result &= ~(0x3 << (2 * ffs_ignore_phase));
885 temp_ignore_phase &= ~(0x1 << ffs_ignore_phase);
886 }
c537a1c5 887
e5cc0c74
JJ
888 /* Short circuit: don't tune again if we already did. */
889 if (host->pdata->tuned) {
890 host->drv_data->misc_control(host, CTRL_RESTORE_CLKSEL, NULL);
891 mci_writel(host, CDTHRCTL, host->cd_rd_thr << 16 | 1);
892 dev_info(host->dev, "EN_SHIFT 0x %08x CLKSEL 0x %08x\n",
893 mci_readl(host, HS400_ENABLE_SHIFT), mci_readl(host, CLKSEL));
894 return 0;
895 }
896
897 tuning_blk = kmalloc(2 * tuning_data->blksz, GFP_KERNEL);
898 if (!tuning_blk)
899 return -ENOMEM;
900
901 test_sample = orig_sample = dw_mci_exynos_get_sample(host);
902 host->cd_rd_thr = 512;
903 mci_writel(host, CDTHRCTL, host->cd_rd_thr << 16 | 1);
904
905 /*
906 * eMMC 4.5 spec section 6.6.7.1 says the device is guaranteed to
907 * complete 40 iteration of CMD21 in 150ms. So this shouldn't take
908 * longer than about 30ms or so....at least assuming most values
909 * work and don't time out.
910 */
911
912 if (host->pdata->io_mode == MMC_TIMING_MMC_HS400)
913 host->quirks |= DW_MCI_QUIRK_NO_DETECT_EBIT;
914
915 dev_info(host->dev, "Tuning Abnormal_result 0x%08x.\n", abnormal_result);
916
917 priv->clk_drive_tuning = priv->clk_drive_number;
918 drv_str_retries = priv->clk_drive_number;
c537a1c5
SJ
919
920 do {
e5cc0c74
JJ
921 struct mmc_request mrq;
922 struct mmc_command cmd;
923 struct mmc_command stop;
924 struct mmc_data data;
925 struct scatterlist sg;
926
927 if (!tuning_loop)
928 break;
929
930 memset(&cmd, 0, sizeof(cmd));
931 cmd.opcode = opcode;
932 cmd.arg = 0;
933 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
934 cmd.error = 0;
935 cmd.busy_timeout = 10; /* 2x * (150ms/40 + setup overhead) */
936
937 memset(&stop, 0, sizeof(stop));
938 stop.opcode = MMC_STOP_TRANSMISSION;
939 stop.arg = 0;
940 stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
941 stop.error = 0;
942
943 memset(&data, 0, sizeof(data));
944 data.blksz = tuning_data->blksz;
945 data.blocks = 1;
946 data.flags = MMC_DATA_READ;
947 data.sg = &sg;
948 data.sg_len = 1;
949 data.error = 0;
950
951 memset(tuning_blk, ~0U, tuning_data->blksz);
952 sg_init_one(&sg, tuning_blk, tuning_data->blksz);
953
954 memset(&mrq, 0, sizeof(mrq));
955 mrq.cmd = &cmd;
956 mrq.stop = &stop;
957 mrq.data = &data;
958 host->mrq = &mrq;
959
960 /*
961 * DDR200 tuning Sequence with fine tuning setup
962 *
963 * 0. phase 0 (0 degree) + no fine tuning setup
964 * - pass_index = 0
965 * 1. phase 0 + fine tuning setup
966 * - pass_index = 1
967 * 2. phase 1 (90 degree) + no fine tuning setup
968 * - pass_index = 2
969 * ..
970 * 15. phase 7 + fine tuning setup
971 * - pass_index = 15
972 *
973 */
974 dw_mci_set_fine_tuning_bit(host, is_fine_tuning);
975
976 dw_mci_set_timeout(host, dw_mci_calc_timeout(host));
977 mmc_wait_for_req(mmc, &mrq);
978
979 pass_index = (u8) test_sample * 2;
980
981 if (is_fine_tuning)
982 pass_index++;
983
984 if (!cmd.error && !data.error) {
985 /*
986 * Verify the "tuning block" arrived (to host) intact.
987 * If yes, remember this sample value works.
988 */
989 if (host->use_dma == 1) {
990 sample_good |= (1 << pass_index);
991 } else {
992 if (!memcmp
993 (tuning_data->blk_pattern, tuning_blk, tuning_data->blksz))
994 sample_good |= (1 << pass_index);
995 }
996 } else {
997 dev_info(&mmc->class_dev,
998 "Tuning error: cmd.error:%d, data.error:%d CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
999 cmd.error, data.error,
1000 mci_readl(host, CLKSEL), mci_readl(host, HS400_ENABLE_SHIFT));
1001 }
c537a1c5 1002
e5cc0c74
JJ
1003 if (is_fine_tuning)
1004 test_sample = dw_mci_tuning_sampling(host);
1005
1006 is_fine_tuning = !is_fine_tuning;
1007
1008 if (orig_sample == test_sample && !is_fine_tuning) {
1009
1010 /*
1011 * Get at middle clock sample values.
1012 */
1013 if (sample_good == abnormal_result)
1014 all_pass_count++;
1015
1016 if (priv->ctrl_flag & DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS)
1017 bypass = (all_pass_count > priv->clk_drive_number) ? true : false;
1018
1019 if (bypass) {
1020 dev_info(host->dev, "Bypassed for all pass at %d times\n",
1021 priv->clk_drive_number);
1022 sample_good = abnormal_result & 0xFFFF;
1023 tuned = true;
1024 }
1025
1026 best_sample = find_median_of_16bits(host, sample_good, bypass);
1027
1028 if (best_sample >= 0) {
1029 dev_info(host->dev, "sample_good: 0x%02x best_sample: 0x%02x\n",
1030 sample_good, best_sample);
1031
1032 if (sample_good != abnormal_result || bypass) {
1033 tuned = true;
1034 break;
1035 }
1036 } else
1037 dev_info(host->dev,
1038 "Failed to find median value in sample good (0x%02x)\n",
1039 sample_good);
1040
1041 if (drv_str_retries) {
1042 drv_str_retries--;
1043 if (priv->clk_drive_str[0]) {
1044 exynos_dwmci_tuning_drv_st(host);
1045 if (priv->clk_drive_tuning > 0)
1046 priv->clk_drive_tuning--;
1047 }
1048 sample_good = 0;
1049 } else
1050 break;
1051 }
1052 tuning_loop--;
1053 } while (!tuned);
c537a1c5 1054
e5cc0c74
JJ
1055 /*
1056 * To set sample value with mid, the value should be divided by 2,
1057 * because mid represents index in pass map extended.(8 -> 16 bits)
1058 * And that mid is odd number, means the selected case includes
1059 * using fine tuning.
1060 */
1061
1062 best_sample_ori = best_sample;
1063 best_sample /= 2;
1064
1065 if (host->pdata->io_mode == MMC_TIMING_MMC_HS400)
1066 host->quirks &= ~DW_MCI_QUIRK_NO_DETECT_EBIT;
c537a1c5 1067
e5cc0c74
JJ
1068 if (tuned) {
1069 host->pdata->clk_smpl = priv->tuned_sample = best_sample;
1070 if (host->pdata->only_once_tune)
1071 host->pdata->tuned = true;
1072
1073 if (best_sample_ori % 2)
1074 best_sample += 1;
1075
1076 dw_mci_exynos_set_sample(host, best_sample, false);
1077 dw_mci_set_fine_tuning_bit(host, false);
80113132 1078 } else {
e5cc0c74
JJ
1079 /* Failed. Just restore and return error */
1080 dev_err(host->dev, "tuning err\n");
1081 mci_writel(host, CDTHRCTL, 0 << 16 | 0);
1082 dw_mci_exynos_set_sample(host, orig_sample, false);
c537a1c5 1083 ret = -EIO;
80113132 1084 }
c537a1c5 1085
e5cc0c74
JJ
1086 /* Rollback Clock drive strength */
1087 if (priv->pinctrl && priv->clk_drive_base)
1088 pinctrl_select_state(priv->pinctrl, priv->clk_drive_base);
1089
1090 dev_info(host->dev, "CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
1091 mci_readl(host, CLKSEL), mci_readl(host, HS400_ENABLE_SHIFT));
1092
1093 kfree(tuning_blk);
c537a1c5
SJ
1094 return ret;
1095}
1096
e5cc0c74 1097static int dw_mci_exynos_request_ext_irq(struct dw_mci *host, irq_handler_t func)
80113132
SJ
1098{
1099 struct dw_mci_exynos_priv_data *priv = host->priv;
e5cc0c74
JJ
1100 int ext_cd_irq = 0;
1101
1102 if (gpio_is_valid(priv->cd_gpio) && !gpio_request(priv->cd_gpio, "DWMCI_EXT_CD")) {
1103 ext_cd_irq = gpio_to_irq(priv->cd_gpio);
1104 if (ext_cd_irq &&
1105 devm_request_irq(host->dev, ext_cd_irq, func,
1106 IRQF_TRIGGER_RISING |
1107 IRQF_TRIGGER_FALLING |
1108 IRQF_ONESHOT, "tflash_det", host) == 0) {
1109 dev_info(host->dev, "success to request irq for card detect.\n");
1110 enable_irq_wake(ext_cd_irq);
1111 } else
1112 dev_info(host->dev, "cannot request irq for card detect.\n");
1113 }
80113132
SJ
1114
1115 return 0;
1116}
1117
e5cc0c74
JJ
1118static int dw_mci_exynos_check_cd(struct dw_mci *host)
1119{
1120 int ret = -1;
1121 struct dw_mci_exynos_priv_data *priv = host->priv;
1122
1123 if (gpio_is_valid(priv->cd_gpio)) {
1124 if (host->pdata->use_gpio_invert)
1125 ret = gpio_get_value(priv->cd_gpio) ? 1 : 0;
1126 else
1127 ret = gpio_get_value(priv->cd_gpio) ? 0 : 1;
1128 }
1129 return ret;
1130}
1131
0f6e73d0
DK
1132/* Common capabilities of Exynos4/Exynos5 SoC */
1133static unsigned long exynos_dwmmc_caps[4] = {
cab3a802 1134 MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23,
c3665006
TA
1135 MMC_CAP_CMD23,
1136 MMC_CAP_CMD23,
1137 MMC_CAP_CMD23,
1138};
1139
e5cc0c74
JJ
1140static int dw_mci_exynos_misc_control(struct dw_mci *host,
1141 enum dw_mci_misc_control control, void *priv)
1142{
1143 int ret = 0;
1144
1145 switch (control) {
1146 case CTRL_RESTORE_CLKSEL:
1147 dw_mci_exynos_set_sample(host, host->pdata->clk_smpl, false);
1148 dw_mci_set_fine_tuning_bit(host, host->pdata->is_fine_tuned);
1149 break;
1150 case CTRL_REQUEST_EXT_IRQ:
1151 ret = dw_mci_exynos_request_ext_irq(host, (irq_handler_t) priv);
1152 break;
1153 case CTRL_CHECK_CD:
1154 ret = dw_mci_exynos_check_cd(host);
1155 break;
1156 default:
1157 dev_err(host->dev, "dw_mmc exynos: wrong case\n");
1158 ret = -ENODEV;
1159 }
1160 return ret;
1161}
1162
1163#ifdef CONFIG_MMC_DW_EXYNOS_FMP
1164static int dw_mci_exynos_crypto_engine_cfg(struct dw_mci *host,
1165 void *desc,
1166 struct mmc_data *data,
1167 struct page *page, int sector_offset, bool cmdq_enabled)
1168{
1169 return exynos_mmc_fmp_cfg(host, desc, data, page, sector_offset, cmdq_enabled);
1170}
1171
1172static int dw_mci_exynos_crypto_engine_clear(struct dw_mci *host, void *desc, bool cmdq_enabled)
1173{
1174 return exynos_mmc_fmp_clear(host, desc, cmdq_enabled);
1175}
1176
1177static int dw_mci_exynos_access_control_get_dev(struct dw_mci *host)
1178{
1179 return exynos_mmc_smu_get_dev(host);
1180}
1181
1182static int dw_mci_exynos_access_control_sec_cfg(struct dw_mci *host)
1183{
1184 return exynos_mmc_smu_sec_cfg(host);
1185}
1186
1187static int dw_mci_exynos_access_control_init(struct dw_mci *host)
1188{
1189 return exynos_mmc_smu_init(host);
1190}
1191
1192static int dw_mci_exynos_access_control_abort(struct dw_mci *host)
1193{
1194 return exynos_mmc_smu_abort(host);
1195}
1196
1197static int dw_mci_exynos_access_control_resume(struct dw_mci *host)
1198{
1199 return exynos_mmc_smu_resume(host);
1200}
1201#endif
1202
0f6e73d0 1203static const struct dw_mci_drv_data exynos_drv_data = {
e5cc0c74 1204 .caps = exynos_dwmmc_caps,
ec274a65 1205 .num_caps = ARRAY_SIZE(exynos_dwmmc_caps),
e5cc0c74
JJ
1206 .init = dw_mci_exynos_priv_init,
1207 .set_ios = dw_mci_exynos_set_ios,
1208 .parse_dt = dw_mci_exynos_parse_dt,
1209 .execute_tuning = dw_mci_exynos_execute_tuning,
1210 .hwacg_control = dw_mci_card_int_hwacg_ctrl,
1211 .misc_control = dw_mci_exynos_misc_control,
1212#ifdef CONFIG_MMC_DW_EXYNOS_FMP
1213 .crypto_engine_cfg = dw_mci_exynos_crypto_engine_cfg,
1214 .crypto_engine_clear = dw_mci_exynos_crypto_engine_clear,
1215 .access_control_get_dev = dw_mci_exynos_access_control_get_dev,
1216 .access_control_sec_cfg = dw_mci_exynos_access_control_sec_cfg,
1217 .access_control_init = dw_mci_exynos_access_control_init,
1218 .access_control_abort = dw_mci_exynos_access_control_abort,
1219 .access_control_resume = dw_mci_exynos_access_control_resume,
1220#endif
1221
1222 .ssclk_control = dw_mci_ssclk_control,
c3665006
TA
1223};
1224
1225static const struct of_device_id dw_mci_exynos_match[] = {
e5cc0c74
JJ
1226 {.compatible = "samsung,exynos-dw-mshc",
1227 .data = &exynos_drv_data,},
c3665006
TA
1228 {},
1229};
e5cc0c74 1230
517cb9f1 1231MODULE_DEVICE_TABLE(of, dw_mci_exynos_match);
c3665006 1232
9665f7f2 1233static int dw_mci_exynos_probe(struct platform_device *pdev)
c3665006 1234{
8e2b36ea 1235 const struct dw_mci_drv_data *drv_data;
c3665006 1236 const struct of_device_id *match;
9b93d392 1237 int ret;
c3665006
TA
1238
1239 match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node);
1240 drv_data = match->data;
9b93d392
JS
1241
1242 pm_runtime_get_noresume(&pdev->dev);
1243 pm_runtime_set_active(&pdev->dev);
1244 pm_runtime_enable(&pdev->dev);
1245
1246 ret = dw_mci_pltfm_register(pdev, drv_data);
1247 if (ret) {
1248 pm_runtime_disable(&pdev->dev);
1249 pm_runtime_set_suspended(&pdev->dev);
1250 pm_runtime_put_noidle(&pdev->dev);
1251
1252 return ret;
1253 }
1254
1255 return 0;
1256}
1257
1258static int dw_mci_exynos_remove(struct platform_device *pdev)
1259{
1260 pm_runtime_disable(&pdev->dev);
1261 pm_runtime_set_suspended(&pdev->dev);
1262 pm_runtime_put_noidle(&pdev->dev);
1263
1264 return dw_mci_pltfm_remove(pdev);
c3665006
TA
1265}
1266
15a2e2ab 1267static const struct dev_pm_ops dw_mci_exynos_pmops = {
cf5237ef
SL
1268 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1269 pm_runtime_force_resume)
e5cc0c74
JJ
1270 SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend,
1271 dw_mci_exynos_runtime_resume,
1272 NULL)
1273 .resume_noirq = dw_mci_exynos_resume_noirq,
e2c63599
DA
1274 .thaw_noirq = dw_mci_exynos_resume_noirq,
1275 .restore_noirq = dw_mci_exynos_resume_noirq,
1276};
1277
c3665006 1278static struct platform_driver dw_mci_exynos_pltfm_driver = {
e5cc0c74
JJ
1279 .probe = dw_mci_exynos_probe,
1280 .remove = dw_mci_exynos_remove,
1281 .driver = {
1282 .name = "dwmmc_exynos",
1283 .of_match_table = dw_mci_exynos_match,
1284 .pm = &dw_mci_exynos_pmops,
1285 },
c3665006
TA
1286};
1287
1288module_platform_driver(dw_mci_exynos_pltfm_driver);
1289
1290MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
1291MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
1292MODULE_LICENSE("GPL v2");
2fc546fd 1293MODULE_ALIAS("platform:dwmmc_exynos");