2 * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver
4 * Copyright (C) 2012, Samsung Electronics Co., Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/clk.h>
15 #include <linux/mmc/host.h>
16 #include <linux/mmc/mmc.h>
18 #include <linux/of_gpio.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/slab.h>
21 #include <linux/pinctrl/pinctrl.h>
22 #include <linux/pinctrl/pinconf.h>
23 #include <linux/smc.h>
26 #include "dw_mmc-pltfm.h"
27 #include "dw_mmc-exynos.h"
29 extern int cal_pll_mmc_set_ssc(unsigned int mfr
, unsigned int mrr
, unsigned int ssc_on
);
30 extern int cal_pll_mmc_check(void);
32 static void dw_mci_exynos_register_dump(struct dw_mci
*host
)
34 dev_err(host
->dev
, ": EMMCP_BASE: 0x%08x\n",
35 host
->sfr_dump
->fmp_emmcp_base
= mci_readl(host
, EMMCP_BASE
));
36 dev_err(host
->dev
, ": MPSECURITY: 0x%08x\n",
37 host
->sfr_dump
->mpsecurity
= mci_readl(host
, MPSECURITY
));
38 dev_err(host
->dev
, ": MPSTAT: 0x%08x\n",
39 host
->sfr_dump
->mpstat
= mci_readl(host
, MPSTAT
));
40 dev_err(host
->dev
, ": MPSBEGIN: 0x%08x\n",
41 host
->sfr_dump
->mpsbegin
= mci_readl(host
, MPSBEGIN0
));
42 dev_err(host
->dev
, ": MPSEND: 0x%08x\n",
43 host
->sfr_dump
->mpsend
= mci_readl(host
, MPSEND0
));
44 dev_err(host
->dev
, ": MPSCTRL: 0x%08x\n",
45 host
->sfr_dump
->mpsctrl
= mci_readl(host
, MPSCTRL0
));
46 dev_err(host
->dev
, ": HS400_DQS_EN: 0x%08x\n",
47 host
->sfr_dump
->hs400_rdqs_en
= mci_readl(host
, HS400_DQS_EN
));
48 dev_err(host
->dev
, ": HS400_ASYNC_FIFO_CTRL: 0x%08x\n",
49 host
->sfr_dump
->hs400_acync_fifo_ctrl
= mci_readl(host
, HS400_ASYNC_FIFO_CTRL
));
50 dev_err(host
->dev
, ": HS400_DLINE_CTRL: 0x%08x\n",
51 host
->sfr_dump
->hs400_dline_ctrl
= mci_readl(host
, HS400_DLINE_CTRL
));
54 void dw_mci_reg_dump(struct dw_mci
*host
)
58 dev_err(host
->dev
, ": ============== REGISTER DUMP ==============\n");
59 dev_err(host
->dev
, ": CTRL: 0x%08x\n", host
->sfr_dump
->contrl
= mci_readl(host
, CTRL
));
60 dev_err(host
->dev
, ": PWREN: 0x%08x\n", host
->sfr_dump
->pwren
= mci_readl(host
, PWREN
));
61 dev_err(host
->dev
, ": CLKDIV: 0x%08x\n",
62 host
->sfr_dump
->clkdiv
= mci_readl(host
, CLKDIV
));
63 dev_err(host
->dev
, ": CLKSRC: 0x%08x\n",
64 host
->sfr_dump
->clksrc
= mci_readl(host
, CLKSRC
));
65 dev_err(host
->dev
, ": CLKENA: 0x%08x\n",
66 host
->sfr_dump
->clkena
= mci_readl(host
, CLKENA
));
67 dev_err(host
->dev
, ": TMOUT: 0x%08x\n", host
->sfr_dump
->tmout
= mci_readl(host
, TMOUT
));
68 dev_err(host
->dev
, ": CTYPE: 0x%08x\n", host
->sfr_dump
->ctype
= mci_readl(host
, CTYPE
));
69 dev_err(host
->dev
, ": BLKSIZ: 0x%08x\n",
70 host
->sfr_dump
->blksiz
= mci_readl(host
, BLKSIZ
));
71 dev_err(host
->dev
, ": BYTCNT: 0x%08x\n",
72 host
->sfr_dump
->bytcnt
= mci_readl(host
, BYTCNT
));
73 dev_err(host
->dev
, ": INTMSK: 0x%08x\n",
74 host
->sfr_dump
->intmask
= mci_readl(host
, INTMASK
));
75 dev_err(host
->dev
, ": CMDARG: 0x%08x\n",
76 host
->sfr_dump
->cmdarg
= mci_readl(host
, CMDARG
));
77 dev_err(host
->dev
, ": CMD: 0x%08x\n", host
->sfr_dump
->cmd
= mci_readl(host
, CMD
));
78 dev_err(host
->dev
, ": RESP0: 0x%08x\n", mci_readl(host
, RESP0
));
79 dev_err(host
->dev
, ": RESP1: 0x%08x\n", mci_readl(host
, RESP1
));
80 dev_err(host
->dev
, ": RESP2: 0x%08x\n", mci_readl(host
, RESP2
));
81 dev_err(host
->dev
, ": RESP3: 0x%08x\n", mci_readl(host
, RESP3
));
82 dev_err(host
->dev
, ": MINTSTS: 0x%08x\n",
83 host
->sfr_dump
->mintsts
= mci_readl(host
, MINTSTS
));
84 dev_err(host
->dev
, ": RINTSTS: 0x%08x\n",
85 host
->sfr_dump
->rintsts
= mci_readl(host
, RINTSTS
));
86 dev_err(host
->dev
, ": STATUS: 0x%08x\n",
87 host
->sfr_dump
->status
= mci_readl(host
, STATUS
));
88 dev_err(host
->dev
, ": FIFOTH: 0x%08x\n",
89 host
->sfr_dump
->fifoth
= mci_readl(host
, FIFOTH
));
90 dev_err(host
->dev
, ": CDETECT: 0x%08x\n", mci_readl(host
, CDETECT
));
91 dev_err(host
->dev
, ": WRTPRT: 0x%08x\n", mci_readl(host
, WRTPRT
));
92 dev_err(host
->dev
, ": GPIO: 0x%08x\n", mci_readl(host
, GPIO
));
93 dev_err(host
->dev
, ": TCBCNT: 0x%08x\n",
94 host
->sfr_dump
->tcbcnt
= mci_readl(host
, TCBCNT
));
95 dev_err(host
->dev
, ": TBBCNT: 0x%08x\n",
96 host
->sfr_dump
->tbbcnt
= mci_readl(host
, TBBCNT
));
97 dev_err(host
->dev
, ": DEBNCE: 0x%08x\n", mci_readl(host
, DEBNCE
));
98 dev_err(host
->dev
, ": USRID: 0x%08x\n", mci_readl(host
, USRID
));
99 dev_err(host
->dev
, ": VERID: 0x%08x\n", mci_readl(host
, VERID
));
100 dev_err(host
->dev
, ": HCON: 0x%08x\n", mci_readl(host
, HCON
));
101 dev_err(host
->dev
, ": UHS_REG: 0x%08x\n",
102 host
->sfr_dump
->uhs_reg
= mci_readl(host
, UHS_REG
));
103 dev_err(host
->dev
, ": BMOD: 0x%08x\n", host
->sfr_dump
->bmod
= mci_readl(host
, BMOD
));
104 dev_err(host
->dev
, ": PLDMND: 0x%08x\n", mci_readl(host
, PLDMND
));
105 dev_err(host
->dev
, ": DBADDRL: 0x%08x\n",
106 host
->sfr_dump
->dbaddrl
= mci_readl(host
, DBADDRL
));
107 dev_err(host
->dev
, ": DBADDRU: 0x%08x\n",
108 host
->sfr_dump
->dbaddru
= mci_readl(host
, DBADDRU
));
109 dev_err(host
->dev
, ": DSCADDRL: 0x%08x\n",
110 host
->sfr_dump
->dscaddrl
= mci_readl(host
, DSCADDRL
));
111 dev_err(host
->dev
, ": DSCADDRU: 0x%08x\n",
112 host
->sfr_dump
->dscaddru
= mci_readl(host
, DSCADDRU
));
113 dev_err(host
->dev
, ": BUFADDR: 0x%08x\n",
114 host
->sfr_dump
->bufaddr
= mci_readl(host
, BUFADDR
));
115 dev_err(host
->dev
, ": BUFADDRU: 0x%08x\n",
116 host
->sfr_dump
->bufaddru
= mci_readl(host
, BUFADDRU
));
117 dev_err(host
->dev
, ": DBADDR: 0x%08x\n",
118 host
->sfr_dump
->dbaddr
= mci_readl(host
, DBADDR
));
119 dev_err(host
->dev
, ": DSCADDR: 0x%08x\n",
120 host
->sfr_dump
->dscaddr
= mci_readl(host
, DSCADDR
));
121 dev_err(host
->dev
, ": BUFADDR: 0x%08x\n",
122 host
->sfr_dump
->bufaddr
= mci_readl(host
, BUFADDR
));
123 dev_err(host
->dev
, ": CLKSEL: 0x%08x\n",
124 host
->sfr_dump
->clksel
= mci_readl(host
, CLKSEL
));
125 dev_err(host
->dev
, ": IDSTS: 0x%08x\n", mci_readl(host
, IDSTS
));
126 dev_err(host
->dev
, ": IDSTS64: 0x%08x\n",
127 host
->sfr_dump
->idsts64
= mci_readl(host
, IDSTS64
));
128 dev_err(host
->dev
, ": IDINTEN: 0x%08x\n", mci_readl(host
, IDINTEN
));
129 dev_err(host
->dev
, ": IDINTEN64: 0x%08x\n",
130 host
->sfr_dump
->idinten64
= mci_readl(host
, IDINTEN64
));
131 dev_err(host
->dev
, ": RESP_TAT: 0x%08x\n", mci_readl(host
, RESP_TAT
));
132 dev_err(host
->dev
, ": FORCE_CLK_STOP: 0x%08x\n",
133 host
->sfr_dump
->force_clk_stop
= mci_readl(host
, FORCE_CLK_STOP
));
134 dev_err(host
->dev
, ": CDTHRCTL: 0x%08x\n", mci_readl(host
, CDTHRCTL
));
135 dw_mci_exynos_register_dump(host
);
136 dev_err(host
->dev
, ": ============== STATUS DUMP ================\n");
137 dev_err(host
->dev
, ": cmd_status: 0x%08x\n",
138 host
->sfr_dump
->cmd_status
= host
->cmd_status
);
139 dev_err(host
->dev
, ": data_status: 0x%08x\n",
140 host
->sfr_dump
->force_clk_stop
= host
->data_status
);
141 dev_err(host
->dev
, ": pending_events: 0x%08lx\n",
142 host
->sfr_dump
->pending_events
= host
->pending_events
);
143 dev_err(host
->dev
, ": completed_events:0x%08lx\n",
144 host
->sfr_dump
->completed_events
= host
->completed_events
);
145 dev_err(host
->dev
, ": state: %d\n", host
->sfr_dump
->host_state
= host
->state
);
146 dev_err(host
->dev
, ": gate-clk: %s\n",
147 atomic_read(&host
->ciu_clk_cnt
) ? "enable" : "disable");
148 dev_err(host
->dev
, ": ciu_en_win: %d\n", atomic_read(&host
->ciu_en_win
));
149 reg
= mci_readl(host
, CMD
);
150 dev_err(host
->dev
, ": ================= CMD REG =================\n");
151 if ((reg
>> 9) & 0x1) {
152 dev_err(host
->dev
, ": read/write : %s\n",
153 (reg
& (0x1 << 10)) ? "write" : "read");
154 dev_err(host
->dev
, ": data expected : %d\n", (reg
>> 9) & 0x1);
156 dev_err(host
->dev
, ": cmd index : %d\n",
157 host
->sfr_dump
->cmd_index
= ((reg
>> 0) & 0x3f));
158 reg
= mci_readl(host
, STATUS
);
159 dev_err(host
->dev
, ": ================ STATUS REG ===============\n");
160 dev_err(host
->dev
, ": fifocount : %d\n",
161 host
->sfr_dump
->fifo_count
= ((reg
>> 17) & 0x1fff));
162 dev_err(host
->dev
, ": response index : %d\n", (reg
>> 11) & 0x3f);
163 dev_err(host
->dev
, ": data state mc busy: %d\n", (reg
>> 10) & 0x1);
164 dev_err(host
->dev
, ": data busy : %d\n",
165 host
->sfr_dump
->data_busy
= ((reg
>> 9) & 0x1));
166 dev_err(host
->dev
, ": data 3 state : %d\n",
167 host
->sfr_dump
->data_3_state
= ((reg
>> 8) & 0x1));
168 dev_err(host
->dev
, ": command fsm state : %d\n", (reg
>> 4) & 0xf);
169 dev_err(host
->dev
, ": fifo full : %d\n", (reg
>> 3) & 0x1);
170 dev_err(host
->dev
, ": fifo empty : %d\n", (reg
>> 2) & 0x1);
171 dev_err(host
->dev
, ": fifo tx watermark : %d\n",
172 host
->sfr_dump
->fifo_tx_watermark
= ((reg
>> 1) & 0x1));
173 dev_err(host
->dev
, ": fifo rx watermark : %d\n",
174 host
->sfr_dump
->fifo_rx_watermark
= ((reg
>> 0) & 0x1));
175 dev_err(host
->dev
, ": ===========================================\n");
178 /* Variations in Exynos specific dw-mshc controller */
179 enum dw_mci_exynos_type
{
183 static struct dw_mci_exynos_compatible
{
185 enum dw_mci_exynos_type ctrl_type
;
186 } exynos_compat
[] = {
188 .compatible
= "samsung,exynos-dw-mshc", .ctrl_type
= DW_MCI_TYPE_EXYNOS
,},};
190 static inline u8
dw_mci_exynos_get_ciu_div(struct dw_mci
*host
)
192 return SDMMC_CLKSEL_GET_DIV(mci_readl(host
, CLKSEL
)) + 1;
195 static int dw_mci_exynos_priv_init(struct dw_mci
*host
)
197 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
199 priv
->saved_strobe_ctrl
= mci_readl(host
, HS400_DLINE_CTRL
);
200 priv
->saved_dqs_en
= mci_readl(host
, HS400_DQS_EN
);
201 priv
->saved_dqs_en
|= AXI_NON_BLOCKING_WR
;
202 mci_writel(host
, HS400_DQS_EN
, priv
->saved_dqs_en
);
203 if (!priv
->dqs_delay
)
204 priv
->dqs_delay
= DQS_CTRL_GET_RD_DELAY(priv
->saved_strobe_ctrl
);
205 #if defined(CONFIG_MMC_DW_64BIT_DESC)
206 if (priv
->voltage_int_extra
!= 0) {
209 reg
= mci_readl(host
, AXI_BURST_LEN
);
211 reg
|= (priv
->voltage_int_extra
<< 24);
212 mci_writel(host
, AXI_BURST_LEN
, reg
);
218 static void dw_mci_ssclk_control(struct dw_mci
*host
, int enable
)
221 if (host
->pdata
->quirks
& DW_MCI_QUIRK_USE_SSC
) {
224 if (enable
&& cal_pll_mmc_check() == false) {
225 if (host
->pdata
->ssc_rate
> 8) {
226 dev_info(host
->dev
, "unvalid SSC rate value.\n");
228 err
= cal_pll_mmc_set_ssc(12, host
->pdata
->ssc_rate
, 1);
230 dev_info(host
->dev
, "SSC set fail.\n");
232 dev_info(host
->dev
, "SSC set enable.\n");
234 } else if (!enable
&& cal_pll_mmc_check() == true) {
235 err
= cal_pll_mmc_set_ssc(0, 0, 0);
237 dev_info(host
->dev
, "SSC set fail.\n");
239 dev_info(host
->dev
, "SSC set disable.\n");
245 static void dw_mci_exynos_set_clksel_timing(struct dw_mci
*host
, u32 timing
)
249 clksel
= mci_readl(host
, CLKSEL
);
250 clksel
= (clksel
& ~SDMMC_CLKSEL_TIMING_MASK
) | timing
;
252 if (!((host
->pdata
->io_mode
== MMC_TIMING_MMC_HS400
) ||
253 (host
->pdata
->io_mode
== MMC_TIMING_MMC_HS400_ES
)))
254 clksel
&= ~(BIT(30) | BIT(19));
256 mci_writel(host
, CLKSEL
, clksel
);
260 static int dw_mci_exynos_runtime_resume(struct device
*dev
)
262 return dw_mci_runtime_resume(dev
);
266 * dw_mci_exynos_resume_noirq - Exynos-specific resume code
268 * On exynos5420 there is a silicon errata that will sometimes leave the
269 * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate
270 * that it fired and we can clear it by writing a 1 back. Clear it to prevent
271 * interrupts from going off constantly.
273 * We run this code on all exynos variants because it doesn't hurt.
276 static int dw_mci_exynos_resume_noirq(struct device
*dev
)
278 struct dw_mci
*host
= dev_get_drvdata(dev
);
281 clksel
= mci_readl(host
, CLKSEL
);
283 if (clksel
& SDMMC_CLKSEL_WAKEUP_INT
)
284 mci_writel(host
, CLKSEL
, clksel
);
289 #define dw_mci_exynos_resume_noirq NULL
290 #endif /* CONFIG_PM */
292 static void dw_mci_card_int_hwacg_ctrl(struct dw_mci
*host
, u32 flag
)
296 reg
= mci_readl(host
, FORCE_CLK_STOP
);
297 if (flag
== HWACG_Q_ACTIVE_EN
) {
298 reg
|= MMC_HWACG_CONTROL
;
299 host
->qactive_check
= HWACG_Q_ACTIVE_EN
;
301 reg
&= ~(MMC_HWACG_CONTROL
);
302 host
->qactive_check
= HWACG_Q_ACTIVE_DIS
;
304 mci_writel(host
, FORCE_CLK_STOP
, reg
);
307 static void dw_mci_exynos_config_hs400(struct dw_mci
*host
, u32 timing
)
309 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
313 * Not supported to configure register
317 dqs
= priv
->saved_dqs_en
;
318 strobe
= priv
->saved_strobe_ctrl
;
320 if (timing
== MMC_TIMING_MMC_HS400
|| timing
== MMC_TIMING_MMC_HS400_ES
) {
321 dqs
&= ~(DWMCI_TXDT_CRC_TIMER_SET(0xFF, 0xFF));
322 dqs
|= (DWMCI_TXDT_CRC_TIMER_SET(priv
->hs400_tx_t_fastlimit
,
323 priv
->hs400_tx_t_initval
) | DWMCI_RDDQS_EN
|
324 DWMCI_AXI_NON_BLOCKING_WRITE
);
325 if (host
->pdata
->quirks
& DW_MCI_QUIRK_ENABLE_ULP
) {
326 if (priv
->delay_line
|| priv
->tx_delay_line
)
327 strobe
= DWMCI_WD_DQS_DELAY_CTRL(priv
->tx_delay_line
) |
328 DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
329 DWMCI_RD_DQS_DELAY_CTRL(priv
->delay_line
);
331 strobe
= DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
332 DWMCI_RD_DQS_DELAY_CTRL(90);
334 if (priv
->delay_line
)
335 strobe
= DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
336 DWMCI_RD_DQS_DELAY_CTRL(priv
->delay_line
);
338 strobe
= DWMCI_FIFO_CLK_DELAY_CTRL(0x2) |
339 DWMCI_RD_DQS_DELAY_CTRL(90);
341 dqs
|= (DATA_STROBE_EN
| DWMCI_AXI_NON_BLOCKING_WRITE
);
342 if (timing
== MMC_TIMING_MMC_HS400_ES
)
343 dqs
|= DWMCI_RESP_RCLK_MODE
;
345 dqs
&= ~DATA_STROBE_EN
;
348 mci_writel(host
, HS400_DQS_EN
, dqs
);
349 mci_writel(host
, HS400_DLINE_CTRL
, strobe
);
352 static void dw_mci_exynos_adjust_clock(struct dw_mci
*host
, unsigned int wanted
)
354 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
355 unsigned long actual
;
361 * Don't care if wanted clock is zero or
362 * ciu clock is unavailable
364 if (!wanted
|| IS_ERR(host
->ciu_clk
))
367 /* Guaranteed minimum frequency for cclkin */
368 if (wanted
< EXYNOS_CCLKIN_MIN
)
369 wanted
= EXYNOS_CCLKIN_MIN
;
371 div
= dw_mci_exynos_get_ciu_div(host
);
373 if (wanted
== priv
->cur_speed
) {
374 clock
= clk_get_rate(host
->ciu_clk
);
375 if (clock
== priv
->cur_speed
* div
)
379 ret
= clk_set_rate(host
->ciu_clk
, wanted
* div
);
381 dev_warn(host
->dev
, "failed to set clk-rate %u error: %d\n", wanted
* div
, ret
);
382 actual
= clk_get_rate(host
->ciu_clk
);
383 host
->bus_hz
= actual
/ div
;
384 priv
->cur_speed
= wanted
;
385 host
->current_speed
= 0;
388 static void dw_mci_exynos_set_ios(struct dw_mci
*host
, struct mmc_ios
*ios
)
390 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
391 unsigned int wanted
= ios
->clock
;
392 u32
*clk_tbl
= priv
->ref_clk
;
393 u32 timing
= ios
->timing
, clksel
;
396 cclkin
= clk_tbl
[timing
];
397 host
->pdata
->io_mode
= timing
;
398 if (host
->bus_hz
!= cclkin
)
402 case MMC_TIMING_MMC_HS400
:
403 case MMC_TIMING_MMC_HS400_ES
:
404 if (host
->pdata
->quirks
& DW_MCI_QUIRK_ENABLE_ULP
) {
405 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->hs400_ulp_timing
, priv
->tuned_sample
);
406 clksel
|= (BIT(30) | BIT(19)); /* ultra low powermode on */
408 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->hs400_timing
, priv
->tuned_sample
);
409 clksel
&= ~(BIT(30) | BIT(19)); /* ultra low powermode on */
412 if (host
->pdata
->is_fine_tuned
)
415 case MMC_TIMING_MMC_DDR52
:
416 case MMC_TIMING_UHS_DDR50
:
417 clksel
= priv
->ddr_timing
;
418 /* Should be double rate for DDR mode */
419 if (ios
->bus_width
== MMC_BUS_WIDTH_8
)
422 case MMC_TIMING_MMC_HS200
:
423 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->hs200_timing
, priv
->tuned_sample
);
425 case MMC_TIMING_UHS_SDR104
:
426 if (priv
->sdr104_timing
)
427 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->sdr104_timing
, priv
->tuned_sample
);
429 dev_info(host
->dev
, "Setting of SDR104 timing in not been!!\n");
430 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->sdr_timing
, priv
->tuned_sample
);
432 dw_mci_ssclk_control(host
, 1);
434 case MMC_TIMING_UHS_SDR50
:
435 if (priv
->sdr50_timing
)
436 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->sdr50_timing
, priv
->tuned_sample
);
438 dev_info(host
->dev
, "Setting of SDR50 timing is not been!!\n");
439 clksel
= SDMMC_CLKSEL_UP_SAMPLE(priv
->sdr_timing
, priv
->tuned_sample
);
441 dw_mci_ssclk_control(host
, 1);
444 clksel
= priv
->sdr_timing
;
447 host
->cclk_in
= wanted
;
449 /* Set clock timing for the requested speed mode */
450 dw_mci_exynos_set_clksel_timing(host
, clksel
);
452 /* Configure setting for HS400 */
453 dw_mci_exynos_config_hs400(host
, timing
);
455 /* Configure clock rate */
456 dw_mci_exynos_adjust_clock(host
, wanted
);
460 #define MHZ (1000 * 1000)
463 static int dw_mci_exynos_parse_dt(struct dw_mci
*host
)
465 struct dw_mci_exynos_priv_data
*priv
;
466 struct device_node
*np
= host
->dev
->of_node
;
468 u32 div
= 0, voltage_int_extra
= 0;
472 u32
*ciu_clkin_values
= NULL
;
477 priv
= devm_kzalloc(host
->dev
, sizeof(*priv
), GFP_KERNEL
);
479 dev_err(host
->dev
, "mem alloc failed for private data\n");
483 for (idx
= 0; idx
< ARRAY_SIZE(exynos_compat
); idx
++) {
484 if (of_device_is_compatible(np
, exynos_compat
[idx
].compatible
))
485 priv
->ctrl_type
= exynos_compat
[idx
].ctrl_type
;
488 if (of_property_read_u32(np
, "num-ref-clks", &ref_clk_size
)) {
489 dev_err(host
->dev
, "Getting a number of referece clock failed\n");
494 ref_clk
= devm_kzalloc(host
->dev
, ref_clk_size
* sizeof(*ref_clk
), GFP_KERNEL
);
496 dev_err(host
->dev
, "Mem alloc failed for reference clock table\n");
501 ciu_clkin_values
= devm_kzalloc(host
->dev
,
502 ref_clk_size
* sizeof(*ciu_clkin_values
), GFP_KERNEL
);
504 if (!ciu_clkin_values
) {
505 dev_err(host
->dev
, "Mem alloc failed for temporary clock values\n");
509 if (of_property_read_u32_array(np
, "ciu_clkin", ciu_clkin_values
, ref_clk_size
)) {
510 dev_err(host
->dev
, "Getting ciu_clkin values faild\n");
515 for (idx_ref
= 0; idx_ref
< ref_clk_size
; idx_ref
++, ref_clk
++, ciu_clkin_values
++) {
516 if (*ciu_clkin_values
> MHZ
)
517 *(ref_clk
) = (*ciu_clkin_values
);
519 *(ref_clk
) = (*ciu_clkin_values
) * MHZ
;
522 ref_clk
-= ref_clk_size
;
523 ciu_clkin_values
-= ref_clk_size
;
524 priv
->ref_clk
= ref_clk
;
526 if (of_get_property(np
, "card-detect", NULL
))
527 priv
->cd_gpio
= of_get_named_gpio(np
, "card-detect", 0);
531 /* Swapping clock drive strength */
532 of_property_read_u32(np
, "clk-drive-number", &priv
->clk_drive_number
);
534 priv
->pinctrl
= devm_pinctrl_get(host
->dev
);
536 if (IS_ERR(priv
->pinctrl
)) {
537 priv
->pinctrl
= NULL
;
539 priv
->clk_drive_base
= pinctrl_lookup_state(priv
->pinctrl
, "default");
540 priv
->clk_drive_str
[0] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-1x");
541 priv
->clk_drive_str
[1] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-2x");
542 priv
->clk_drive_str
[2] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-3x");
543 priv
->clk_drive_str
[3] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-4x");
544 priv
->clk_drive_str
[4] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-5x");
545 priv
->clk_drive_str
[5] = pinctrl_lookup_state(priv
->pinctrl
, "fast-slew-rate-6x");
547 for (i
= 0; i
< 6; i
++) {
548 if (IS_ERR(priv
->clk_drive_str
[i
]))
549 priv
->clk_drive_str
[i
] = NULL
;
553 of_property_read_u32(np
, "samsung,dw-mshc-ciu-div", &div
);
556 if (of_property_read_u32(np
, "samsung,voltage-int-extra", &voltage_int_extra
))
557 priv
->voltage_int_extra
= voltage_int_extra
;
559 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-sdr-timing", timing
, 4);
563 priv
->sdr_timing
= SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
565 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-ddr-timing", timing
, 4);
569 priv
->ddr_timing
= SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
571 of_property_read_u32(np
, "ignore-phase", &priv
->ignore_phase
);
572 if (of_find_property(np
, "bypass-for-allpass", NULL
))
573 priv
->ctrl_flag
|= DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS
;
574 if (of_find_property(np
, "use-enable-shift", NULL
))
575 priv
->ctrl_flag
|= DW_MMC_EXYNOS_ENABLE_SHIFT
;
577 id
= of_alias_get_id(host
->dev
->of_node
, "mshc");
581 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-hs200-timing", timing
, 4);
585 SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
587 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-hs400-timing", timing
, 4);
592 SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
594 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-hs400-ulp-timing", timing
, 4);
596 priv
->hs400_ulp_timing
=
597 SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
602 of_property_read_u32(np
, "samsung,dw-mshc-hs400-delay-line", &priv
->delay_line
);
605 of_property_read_u32(np
,
606 "samsung,dw-mshc-hs400-tx-delay-line", &priv
->tx_delay_line
);
608 /* The fast RXCRC packet arrival time */
609 of_property_read_u32(np
,
610 "samsung,dw-mshc-txdt-crc-timer-fastlimit",
611 &priv
->hs400_tx_t_fastlimit
);
613 /* Initial value of the timeout down counter for RXCRC packet */
614 of_property_read_u32(np
,
615 "samsung,dw-mshc-txdt-crc-timer-initval",
616 &priv
->hs400_tx_t_initval
);
620 /* dwmmc2 : SD Card */
622 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-sdr50-timing", timing
, 4); /* SDR50 100Mhz */
625 SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
627 priv
->sdr50_timing
= priv
->sdr_timing
;
631 ret
= of_property_read_u32_array(np
, "samsung,dw-mshc-sdr104-timing", timing
, 4); /* SDR104 200mhz */
633 priv
->sdr104_timing
=
634 SDMMC_CLKSEL_TIMING(timing
[0], timing
[1], timing
[2], timing
[3]);
636 priv
->sdr104_timing
= priv
->sdr_timing
;
648 static inline u8
dw_mci_exynos_get_clksmpl(struct dw_mci
*host
)
650 return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host
, CLKSEL
));
653 static inline void dw_mci_exynos_set_clksmpl(struct dw_mci
*host
, u8 sample
)
657 clksel
= mci_readl(host
, CLKSEL
);
658 clksel
= SDMMC_CLKSEL_UP_SAMPLE(clksel
, sample
);
659 mci_writel(host
, CLKSEL
, clksel
);
662 static inline u8
dw_mci_exynos_move_next_clksmpl(struct dw_mci
*host
)
667 clksel
= mci_readl(host
, CLKSEL
);
668 sample
= (clksel
+ 1) & 0x7;
669 clksel
= (clksel
& ~0x7) | sample
;
670 mci_writel(host
, CLKSEL
, clksel
);
674 static void dw_mci_set_quirk_endbit(struct dw_mci
*host
, s8 mid
)
679 clksel
= mci_readl(host
, CLKSEL
);
680 phase
= (((clksel
>> 24) & 0x7) + 1) << 1;
683 if (host
->verid
< DW_MMC_260A
&& (shift
* mid
) % 360 >= 225)
684 host
->quirks
|= DW_MCI_QUIRK_NO_DETECT_EBIT
;
686 host
->quirks
&= ~DW_MCI_QUIRK_NO_DETECT_EBIT
;
689 static void dw_mci_exynos_set_enable_shift(struct dw_mci
*host
, u32 sample
, bool fine_tune
)
691 u32 i
, j
, en_shift
, en_shift_phase
[3][4] = { {0, 0, 1, 0},
696 en_shift
= mci_readl(host
, HS400_ENABLE_SHIFT
)
697 & ~(DWMCI_ENABLE_SHIFT_MASK
);
699 for (i
= 0; i
< 3; i
++) {
700 for (j
= 1; j
< 4; j
++) {
701 if (sample
== en_shift_phase
[i
][j
]) {
702 en_shift
|= DWMCI_ENABLE_SHIFT(en_shift_phase
[i
][0]);
707 if ((en_shift
< 2) && fine_tune
)
709 mci_writel(host
, HS400_ENABLE_SHIFT
, en_shift
);
712 static u8
dw_mci_tuning_sampling(struct dw_mci
*host
)
714 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
718 clksel
= mci_readl(host
, CLKSEL
);
719 sample
= (clksel
+ 1) & 0x7;
720 clksel
= SDMMC_CLKSEL_UP_SAMPLE(clksel
, sample
);
722 if (priv
->ignore_phase
) {
723 for (i
= 0; i
< 8; i
++) {
724 if (priv
->ignore_phase
& (0x1 << sample
))
725 sample
= (sample
+ 1) & 0x7;
730 clksel
= (clksel
& 0xfffffff8) | sample
;
731 mci_writel(host
, CLKSEL
, clksel
);
733 if (phase6_en
& (0x1 << sample
) || phase7_en
& (0x1 << sample
))
734 sample_path_sel_en(host
, AXI_BURST_LEN
);
736 sample_path_sel_dis(host
, AXI_BURST_LEN
);
738 if (priv
->ctrl_flag
& DW_MMC_EXYNOS_ENABLE_SHIFT
)
739 dw_mci_exynos_set_enable_shift(host
, sample
, false);
744 /* initialize the clock sample to given value */
745 static void dw_mci_exynos_set_sample(struct dw_mci
*host
, u32 sample
, bool tuning
)
747 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
750 clksel
= mci_readl(host
, CLKSEL
);
751 clksel
= (clksel
& ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample
);
752 mci_writel(host
, CLKSEL
, clksel
);
753 if (sample
== 6 || sample
== 7)
754 sample_path_sel_en(host
, AXI_BURST_LEN
);
756 sample_path_sel_dis(host
, AXI_BURST_LEN
);
758 if (priv
->ctrl_flag
& DW_MMC_EXYNOS_ENABLE_SHIFT
)
759 dw_mci_exynos_set_enable_shift(host
, sample
, false);
761 dw_mci_set_quirk_endbit(host
, clksel
);
764 static void dw_mci_set_fine_tuning_bit(struct dw_mci
*host
, bool is_fine_tuning
)
766 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
769 clksel
= mci_readl(host
, CLKSEL
);
770 clksel
= (clksel
& ~BIT(6));
771 sample
= (clksel
& 0x7);
773 if (is_fine_tuning
) {
774 host
->pdata
->is_fine_tuned
= true;
777 host
->pdata
->is_fine_tuned
= false;
778 mci_writel(host
, CLKSEL
, clksel
);
779 if (priv
->ctrl_flag
& DW_MMC_EXYNOS_ENABLE_SHIFT
) {
780 if (((sample
% 2) == 1) && is_fine_tuning
&& sample
!= 0x7)
781 dw_mci_exynos_set_enable_shift(host
, sample
, true);
783 dw_mci_exynos_set_enable_shift(host
, sample
, false);
787 /* read current clock sample offset */
788 static u32
dw_mci_exynos_get_sample(struct dw_mci
*host
)
790 u32 clksel
= mci_readl(host
, CLKSEL
);
792 return SDMMC_CLKSEL_CCLK_SAMPLE(clksel
);
795 static int __find_median_of_16bits(u32 orig_bits
, u16 mask
, u8 startbit
)
799 testbits
= orig_bits
;
800 for (i
= startbit
; i
< (16 + startbit
); i
++, testbits
>>= 1)
801 if ((testbits
& mask
) == mask
)
802 return SDMMC_CLKSEL_CCLK_FINE_SAMPLE(i
);
806 #define NUM_OF_MASK 7
807 static int find_median_of_16bits(struct dw_mci
*host
, unsigned int map
, bool force
)
809 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
813 u16 mask
[NUM_OF_MASK
] = { 0x1fff, 0x7ff, 0x1ff, 0x7f, 0x1f, 0xf, 0x7 };
814 /* Tuning during the center value is set to 3/2 */
815 int optimum
[NUM_OF_MASK
] = { 9, 7, 6, 5, 3, 2, 1 };
817 /* replicate the map so "arithimetic shift right" shifts in
818 * the same bits "again". e.g. portable "Rotate Right" bit operation.
820 if (map
== 0xFFFF && force
== false)
823 divratio
= (mci_readl(host
, CLKSEL
) >> 24) & 0x7;
824 dev_info(host
->dev
, "divratio: %d map: 0x %08x\n", divratio
, map
);
826 orig_bits
= map
| (map
<< 16);
829 if (!(priv
->ctrl_flag
& DW_MMC_EXYNOS_ENABLE_SHIFT
))
830 orig_bits
= orig_bits
& (orig_bits
>> 8);
833 for (i
= 0; i
< NUM_OF_MASK
; i
++) {
834 sel
= __find_median_of_16bits(orig_bits
, mask
[i
], optimum
[i
]);
842 static void exynos_dwmci_tuning_drv_st(struct dw_mci
*host
)
844 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
846 dev_info(host
->dev
, "Clock GPIO Drive Strength Value: x%d\n", (priv
->clk_drive_tuning
));
848 if (priv
->pinctrl
&& priv
->clk_drive_str
[priv
->clk_drive_tuning
- 1])
849 pinctrl_select_state(priv
->pinctrl
,
850 priv
->clk_drive_str
[priv
->clk_drive_tuning
- 1]);
854 * Test all 8 possible "Clock in" Sample timings.
855 * Create a bitmap of which CLock sample values work and find the "median"
856 * value. Apply it and remember that we found the best value.
858 static int dw_mci_exynos_execute_tuning(struct dw_mci_slot
*slot
, u32 opcode
,
859 struct dw_mci_tuning_data
*tuning_data
)
861 struct dw_mci
*host
= slot
->host
;
862 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
863 struct mmc_host
*mmc
= slot
->mmc
;
864 unsigned int tuning_loop
= MAX_TUNING_LOOP
;
865 unsigned int drv_str_retries
;
868 u8
*tuning_blk
; /* data read from device */
870 unsigned int sample_good
= 0; /* bit map of clock sample (0-7) */
871 u32 test_sample
= -1;
873 int best_sample
= 0, best_sample_ori
= 0;
875 bool is_fine_tuning
= false;
876 unsigned int abnormal_result
= 0xFFFF;
877 unsigned int temp_ignore_phase
= priv
->ignore_phase
;
878 int ffs_ignore_phase
= 0;
879 u8 all_pass_count
= 0;
882 while (temp_ignore_phase
) {
883 ffs_ignore_phase
= ffs(temp_ignore_phase
) - 1;
884 abnormal_result
&= ~(0x3 << (2 * ffs_ignore_phase
));
885 temp_ignore_phase
&= ~(0x1 << ffs_ignore_phase
);
888 /* Short circuit: don't tune again if we already did. */
889 if (host
->pdata
->tuned
) {
890 host
->drv_data
->misc_control(host
, CTRL_RESTORE_CLKSEL
, NULL
);
891 mci_writel(host
, CDTHRCTL
, host
->cd_rd_thr
<< 16 | 1);
892 dev_info(host
->dev
, "EN_SHIFT 0x %08x CLKSEL 0x %08x\n",
893 mci_readl(host
, HS400_ENABLE_SHIFT
), mci_readl(host
, CLKSEL
));
897 tuning_blk
= kmalloc(2 * tuning_data
->blksz
, GFP_KERNEL
);
901 test_sample
= orig_sample
= dw_mci_exynos_get_sample(host
);
902 host
->cd_rd_thr
= 512;
903 mci_writel(host
, CDTHRCTL
, host
->cd_rd_thr
<< 16 | 1);
906 * eMMC 4.5 spec section 6.6.7.1 says the device is guaranteed to
907 * complete 40 iteration of CMD21 in 150ms. So this shouldn't take
908 * longer than about 30ms or so....at least assuming most values
909 * work and don't time out.
912 if (host
->pdata
->io_mode
== MMC_TIMING_MMC_HS400
)
913 host
->quirks
|= DW_MCI_QUIRK_NO_DETECT_EBIT
;
915 dev_info(host
->dev
, "Tuning Abnormal_result 0x%08x.\n", abnormal_result
);
917 priv
->clk_drive_tuning
= priv
->clk_drive_number
;
918 drv_str_retries
= priv
->clk_drive_number
;
921 struct mmc_request mrq
;
922 struct mmc_command cmd
;
923 struct mmc_command stop
;
924 struct mmc_data data
;
925 struct scatterlist sg
;
930 memset(&cmd
, 0, sizeof(cmd
));
933 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_ADTC
;
935 cmd
.busy_timeout
= 10; /* 2x * (150ms/40 + setup overhead) */
937 memset(&stop
, 0, sizeof(stop
));
938 stop
.opcode
= MMC_STOP_TRANSMISSION
;
940 stop
.flags
= MMC_RSP_R1B
| MMC_CMD_AC
;
943 memset(&data
, 0, sizeof(data
));
944 data
.blksz
= tuning_data
->blksz
;
946 data
.flags
= MMC_DATA_READ
;
951 memset(tuning_blk
, ~0U, tuning_data
->blksz
);
952 sg_init_one(&sg
, tuning_blk
, tuning_data
->blksz
);
954 memset(&mrq
, 0, sizeof(mrq
));
961 * DDR200 tuning Sequence with fine tuning setup
963 * 0. phase 0 (0 degree) + no fine tuning setup
965 * 1. phase 0 + fine tuning setup
967 * 2. phase 1 (90 degree) + no fine tuning setup
970 * 15. phase 7 + fine tuning setup
974 dw_mci_set_fine_tuning_bit(host
, is_fine_tuning
);
976 dw_mci_set_timeout(host
, dw_mci_calc_timeout(host
));
977 mmc_wait_for_req(mmc
, &mrq
);
979 pass_index
= (u8
) test_sample
* 2;
984 if (!cmd
.error
&& !data
.error
) {
986 * Verify the "tuning block" arrived (to host) intact.
987 * If yes, remember this sample value works.
989 if (host
->use_dma
== 1) {
990 sample_good
|= (1 << pass_index
);
993 (tuning_data
->blk_pattern
, tuning_blk
, tuning_data
->blksz
))
994 sample_good
|= (1 << pass_index
);
997 dev_info(&mmc
->class_dev
,
998 "Tuning error: cmd.error:%d, data.error:%d CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
999 cmd
.error
, data
.error
,
1000 mci_readl(host
, CLKSEL
), mci_readl(host
, HS400_ENABLE_SHIFT
));
1004 test_sample
= dw_mci_tuning_sampling(host
);
1006 is_fine_tuning
= !is_fine_tuning
;
1008 if (orig_sample
== test_sample
&& !is_fine_tuning
) {
1011 * Get at middle clock sample values.
1013 if (sample_good
== abnormal_result
)
1016 if (priv
->ctrl_flag
& DW_MMC_EXYNOS_BYPASS_FOR_ALL_PASS
)
1017 bypass
= (all_pass_count
> priv
->clk_drive_number
) ? true : false;
1020 dev_info(host
->dev
, "Bypassed for all pass at %d times\n",
1021 priv
->clk_drive_number
);
1022 sample_good
= abnormal_result
& 0xFFFF;
1026 best_sample
= find_median_of_16bits(host
, sample_good
, bypass
);
1028 if (best_sample
>= 0) {
1029 dev_info(host
->dev
, "sample_good: 0x%02x best_sample: 0x%02x\n",
1030 sample_good
, best_sample
);
1032 if (sample_good
!= abnormal_result
|| bypass
) {
1038 "Failed to find median value in sample good (0x%02x)\n",
1041 if (drv_str_retries
) {
1043 if (priv
->clk_drive_str
[0]) {
1044 exynos_dwmci_tuning_drv_st(host
);
1045 if (priv
->clk_drive_tuning
> 0)
1046 priv
->clk_drive_tuning
--;
1056 * To set sample value with mid, the value should be divided by 2,
1057 * because mid represents index in pass map extended.(8 -> 16 bits)
1058 * And that mid is odd number, means the selected case includes
1059 * using fine tuning.
1062 best_sample_ori
= best_sample
;
1065 if (host
->pdata
->io_mode
== MMC_TIMING_MMC_HS400
)
1066 host
->quirks
&= ~DW_MCI_QUIRK_NO_DETECT_EBIT
;
1069 host
->pdata
->clk_smpl
= priv
->tuned_sample
= best_sample
;
1070 if (host
->pdata
->only_once_tune
)
1071 host
->pdata
->tuned
= true;
1073 if (best_sample_ori
% 2)
1076 dw_mci_exynos_set_sample(host
, best_sample
, false);
1077 dw_mci_set_fine_tuning_bit(host
, false);
1079 /* Failed. Just restore and return error */
1080 dev_err(host
->dev
, "tuning err\n");
1081 mci_writel(host
, CDTHRCTL
, 0 << 16 | 0);
1082 dw_mci_exynos_set_sample(host
, orig_sample
, false);
1086 /* Rollback Clock drive strength */
1087 if (priv
->pinctrl
&& priv
->clk_drive_base
)
1088 pinctrl_select_state(priv
->pinctrl
, priv
->clk_drive_base
);
1090 dev_info(host
->dev
, "CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
1091 mci_readl(host
, CLKSEL
), mci_readl(host
, HS400_ENABLE_SHIFT
));
1097 static int dw_mci_exynos_request_ext_irq(struct dw_mci
*host
, irq_handler_t func
)
1099 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
1102 if (gpio_is_valid(priv
->cd_gpio
) && !gpio_request(priv
->cd_gpio
, "DWMCI_EXT_CD")) {
1103 ext_cd_irq
= gpio_to_irq(priv
->cd_gpio
);
1105 devm_request_irq(host
->dev
, ext_cd_irq
, func
,
1106 IRQF_TRIGGER_RISING
|
1107 IRQF_TRIGGER_FALLING
|
1108 IRQF_ONESHOT
, "tflash_det", host
) == 0) {
1109 dev_info(host
->dev
, "success to request irq for card detect.\n");
1110 enable_irq_wake(ext_cd_irq
);
1112 dev_info(host
->dev
, "cannot request irq for card detect.\n");
1118 static int dw_mci_exynos_check_cd(struct dw_mci
*host
)
1121 struct dw_mci_exynos_priv_data
*priv
= host
->priv
;
1123 if (gpio_is_valid(priv
->cd_gpio
)) {
1124 if (host
->pdata
->use_gpio_invert
)
1125 ret
= gpio_get_value(priv
->cd_gpio
) ? 1 : 0;
1127 ret
= gpio_get_value(priv
->cd_gpio
) ? 0 : 1;
1132 /* Common capabilities of Exynos4/Exynos5 SoC */
1133 static unsigned long exynos_dwmmc_caps
[4] = {
1134 MMC_CAP_1_8V_DDR
| MMC_CAP_8_BIT_DATA
| MMC_CAP_CMD23
,
1140 static int dw_mci_exynos_misc_control(struct dw_mci
*host
,
1141 enum dw_mci_misc_control control
, void *priv
)
1146 case CTRL_RESTORE_CLKSEL
:
1147 dw_mci_exynos_set_sample(host
, host
->pdata
->clk_smpl
, false);
1148 dw_mci_set_fine_tuning_bit(host
, host
->pdata
->is_fine_tuned
);
1150 case CTRL_REQUEST_EXT_IRQ
:
1151 ret
= dw_mci_exynos_request_ext_irq(host
, (irq_handler_t
) priv
);
1154 ret
= dw_mci_exynos_check_cd(host
);
1157 dev_err(host
->dev
, "dw_mmc exynos: wrong case\n");
1163 #ifdef CONFIG_MMC_DW_EXYNOS_FMP
1164 static int dw_mci_exynos_crypto_engine_cfg(struct dw_mci
*host
,
1166 struct mmc_data
*data
,
1167 struct page
*page
, int sector_offset
, bool cmdq_enabled
)
1169 return exynos_mmc_fmp_cfg(host
, desc
, data
, page
, sector_offset
, cmdq_enabled
);
1172 static int dw_mci_exynos_crypto_engine_clear(struct dw_mci
*host
, void *desc
, bool cmdq_enabled
)
1174 return exynos_mmc_fmp_clear(host
, desc
, cmdq_enabled
);
1177 static int dw_mci_exynos_access_control_get_dev(struct dw_mci
*host
)
1179 return exynos_mmc_smu_get_dev(host
);
1182 static int dw_mci_exynos_access_control_sec_cfg(struct dw_mci
*host
)
1184 return exynos_mmc_smu_sec_cfg(host
);
1187 static int dw_mci_exynos_access_control_init(struct dw_mci
*host
)
1189 return exynos_mmc_smu_init(host
);
1192 static int dw_mci_exynos_access_control_abort(struct dw_mci
*host
)
1194 return exynos_mmc_smu_abort(host
);
1197 static int dw_mci_exynos_access_control_resume(struct dw_mci
*host
)
1199 return exynos_mmc_smu_resume(host
);
1203 static const struct dw_mci_drv_data exynos_drv_data
= {
1204 .caps
= exynos_dwmmc_caps
,
1205 .num_caps
= ARRAY_SIZE(exynos_dwmmc_caps
),
1206 .init
= dw_mci_exynos_priv_init
,
1207 .set_ios
= dw_mci_exynos_set_ios
,
1208 .parse_dt
= dw_mci_exynos_parse_dt
,
1209 .execute_tuning
= dw_mci_exynos_execute_tuning
,
1210 .hwacg_control
= dw_mci_card_int_hwacg_ctrl
,
1211 .misc_control
= dw_mci_exynos_misc_control
,
1212 #ifdef CONFIG_MMC_DW_EXYNOS_FMP
1213 .crypto_engine_cfg
= dw_mci_exynos_crypto_engine_cfg
,
1214 .crypto_engine_clear
= dw_mci_exynos_crypto_engine_clear
,
1215 .access_control_get_dev
= dw_mci_exynos_access_control_get_dev
,
1216 .access_control_sec_cfg
= dw_mci_exynos_access_control_sec_cfg
,
1217 .access_control_init
= dw_mci_exynos_access_control_init
,
1218 .access_control_abort
= dw_mci_exynos_access_control_abort
,
1219 .access_control_resume
= dw_mci_exynos_access_control_resume
,
1222 .ssclk_control
= dw_mci_ssclk_control
,
1225 static const struct of_device_id dw_mci_exynos_match
[] = {
1226 {.compatible
= "samsung,exynos-dw-mshc",
1227 .data
= &exynos_drv_data
,},
1231 MODULE_DEVICE_TABLE(of
, dw_mci_exynos_match
);
1233 static int dw_mci_exynos_probe(struct platform_device
*pdev
)
1235 const struct dw_mci_drv_data
*drv_data
;
1236 const struct of_device_id
*match
;
1239 match
= of_match_node(dw_mci_exynos_match
, pdev
->dev
.of_node
);
1240 drv_data
= match
->data
;
1242 pm_runtime_get_noresume(&pdev
->dev
);
1243 pm_runtime_set_active(&pdev
->dev
);
1244 pm_runtime_enable(&pdev
->dev
);
1246 ret
= dw_mci_pltfm_register(pdev
, drv_data
);
1248 pm_runtime_disable(&pdev
->dev
);
1249 pm_runtime_set_suspended(&pdev
->dev
);
1250 pm_runtime_put_noidle(&pdev
->dev
);
1258 static int dw_mci_exynos_remove(struct platform_device
*pdev
)
1260 pm_runtime_disable(&pdev
->dev
);
1261 pm_runtime_set_suspended(&pdev
->dev
);
1262 pm_runtime_put_noidle(&pdev
->dev
);
1264 return dw_mci_pltfm_remove(pdev
);
1267 static const struct dev_pm_ops dw_mci_exynos_pmops
= {
1268 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
1269 pm_runtime_force_resume
)
1270 SET_RUNTIME_PM_OPS(dw_mci_runtime_suspend
,
1271 dw_mci_exynos_runtime_resume
,
1273 .resume_noirq
= dw_mci_exynos_resume_noirq
,
1274 .thaw_noirq
= dw_mci_exynos_resume_noirq
,
1275 .restore_noirq
= dw_mci_exynos_resume_noirq
,
1278 static struct platform_driver dw_mci_exynos_pltfm_driver
= {
1279 .probe
= dw_mci_exynos_probe
,
1280 .remove
= dw_mci_exynos_remove
,
1282 .name
= "dwmmc_exynos",
1283 .of_match_table
= dw_mci_exynos_match
,
1284 .pm
= &dw_mci_exynos_pmops
,
1288 module_platform_driver(dw_mci_exynos_pltfm_driver
);
1290 MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension");
1291 MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com");
1292 MODULE_LICENSE("GPL v2");
1293 MODULE_ALIAS("platform:dwmmc_exynos");