battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / mmc / host / dw_mmc.c
CommitLineData
f95f3850
WN
1/*
2 * Synopsys DesignWare Multimedia Card Interface driver
3 * (Based on NXP driver for lpc 31xx)
4 *
5 * Copyright (C) 2009 NXP Semiconductors
6 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/blkdev.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/err.h>
20#include <linux/init.h>
21#include <linux/interrupt.h>
22#include <linux/ioport.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
3c2a0909 25#include <linux/ratelimit.h>
f95f3850
WN
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <linux/stat.h>
29#include <linux/delay.h>
30#include <linux/irq.h>
31#include <linux/mmc/host.h>
32#include <linux/mmc/mmc.h>
3c2a0909
S
33#include <linux/mmc/card.h>
34#include <linux/mmc/sdio.h>
f95f3850 35#include <linux/mmc/dw_mmc.h>
3c2a0909 36#include <linux/mmc/sd.h>
f95f3850 37#include <linux/bitops.h>
c07946a3 38#include <linux/regulator/consumer.h>
1791b13e 39#include <linux/workqueue.h>
c91eab4b 40#include <linux/of.h>
55a6ceb2 41#include <linux/of_gpio.h>
3c2a0909
S
42#include <linux/smc.h>
43#include <plat/map-s5p.h>
f95f3850
WN
44
45#include "dw_mmc.h"
3c2a0909
S
46#include "dw_mmc-exynos.h"
47
48#ifdef CONFIG_MMC_DW_FMP_DM_CRYPT
49#include "../card/queue.h"
50#endif
51
52#ifdef CONFIG_MMC_DW_FMP_ECRYPT_FS
53#include "fmp_derive_iv.h"
54#if defined(CONFIG_SDP)
55#include <linux/pagemap.h>
56#endif
57#endif
f95f3850
WN
58
59/* Common flag combinations */
60#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
61 SDMMC_INT_HTO | SDMMC_INT_SBE | \
62 SDMMC_INT_EBE)
63#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
64 SDMMC_INT_RESP_ERR)
65#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
66 DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
67#define DW_MCI_SEND_STATUS 1
68#define DW_MCI_RECV_STATUS 2
3c2a0909
S
69#define DW_MCI_DMA_THRESHOLD 4
70#define MMC_CHECK_CMDQ_MODE(host) \
71 (host->cur_slot && host->cur_slot->mmc && \
72 host->cur_slot->mmc->card && \
73 host->cur_slot->mmc->card->ext_csd.cmdq_mode_en)
f95f3850
WN
74
75#ifdef CONFIG_MMC_DW_IDMAC
3c2a0909
S
76#ifdef CONFIG_MMC_DW_64_IDMAC
77struct idmac_desc { /* 64bit */
78 u32 des0; /* Control Descriptor */
79#define IDMAC_DES0_DIC BIT(1)
80#define IDMAC_DES0_LD BIT(2)
81#define IDMAC_DES0_FD BIT(3)
82#define IDMAC_DES0_CH BIT(4)
83#define IDMAC_DES0_ER BIT(5)
84#define IDMAC_DES0_CES BIT(30)
85#define IDMAC_DES0_OWN BIT(31)
86 u32 des1;
87#define IDMAC_SET_BUFFER1_SIZE(d, s) \
88 ((d)->des2 = ((d)->des2 & 0x03ffe000) | ((s) & 0x1fff))
89 u32 des2;
90#define IDMAC_DES2_FKL BIT(26)
91#define IDMAC_DES2_DKL BIT(27)
92#define IDMAC_SET_FAS(d, v) \
93 ((d)->des2 = ((d)->des2 & 0xcfffffff) | v << 28)
94#define IDMAC_SET_DAS(d, v) \
95 ((d)->des2 = ((d)->des2 & 0x3fffffff) | v << 30)
96 u32 des3;
97 u32 des4;
98 u32 des5;
99 u32 des6;
100 u32 des7;
101 u32 des8;
102 u32 des9;
103 u32 des10;
104 u32 des11;
105 u32 des12;
106 u32 des13;
107 u32 des14;
108 u32 des15;
109 u32 des16;
110 u32 des17;
111 u32 des18;
112 u32 des19;
113 u32 des20;
114 u32 des21;
115 u32 des22;
116 u32 des23;
117 u32 des24;
118 u32 des25;
119 u32 des26;
120 u32 des27;
121 u32 des28;
122 u32 des29;
123 u32 des30;
124 u32 des31;
125#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
126#define IDMAC_SET_DESC_ADDR(d, a) \
127do { \
128 (d)->des6 = ((u32)(a)); \
129 (d)->des7 = ((u32)((a) >> 32)); \
130} while(0)
131#else
132#define IDMAC_SET_DESC_ADDR(d, a) \
133do { \
134 (d)->des7 = (a);
135} while(0)
136#endif
137};
138#else
f95f3850
WN
139struct idmac_desc {
140 u32 des0; /* Control Descriptor */
141#define IDMAC_DES0_DIC BIT(1)
142#define IDMAC_DES0_LD BIT(2)
143#define IDMAC_DES0_FD BIT(3)
144#define IDMAC_DES0_CH BIT(4)
145#define IDMAC_DES0_ER BIT(5)
146#define IDMAC_DES0_CES BIT(30)
147#define IDMAC_DES0_OWN BIT(31)
3c2a0909 148 u32 des1; /* 32bit Buffer sizes */
f95f3850 149#define IDMAC_SET_BUFFER1_SIZE(d, s) \
9b7bbe10 150 ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
3c2a0909
S
151 u32 des2; /* 32bit buffer 1 physical address */
152 u32 des3; /* 32bit buffer 2 physical address */
153 u32 des4; /* sector key */
154 u32 des5; /* Application Key 0 */
155 u32 des6; /* Application Key 1 */
156 u32 des7; /* Application Key 2 */
157#define IDMAC_SET_DESC_ADDR(d, a) \
158do { \
159 ((d)->des3 = ((u32)(a))
160} while(0)
f95f3850 161};
3c2a0909 162#endif
f95f3850
WN
163#endif /* CONFIG_MMC_DW_IDMAC */
164
3c2a0909
S
165#define DATA_RETRY 1
166#define MAX_RETRY_CNT 2
167#define DRTO 200
168#define DRTO_MON_PERIOD 50
169#define DW_MCI_BUSY_WAIT_TIMEOUT 250
170
171static struct dma_attrs dw_mci_direct_attrs;
172
173#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT) || defined(CONFIG_MMC_DW_FMP_ECRYPT_FS)
174static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq,
175 enum dw_mci_state *state);
176#endif
177
178#if defined(CONFIG_MMC_DW_DEBUG)
179static struct dw_mci_debug_data dw_mci_debug __cacheline_aligned;
180
181/* Add sysfs for read cmd_logs */
182static ssize_t dw_mci_debug_log_show(struct device *dev,
183 struct device_attribute *attr, char *buf)
184{
185 ssize_t total_len = 0;
186 int j = 0, k = 0;
187 struct dw_mci_cmd_log *cmd_log;
188 unsigned int offset;
189
190 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
191 struct dw_mci *host = dw_mci_debug.host[mmc->index];
192
193 /*
194 * print cmd_log from prev. 14 to last
195 */
196 if (host->debug_info->en_logging & DW_MCI_DEBUG_ON_CMD) {
197 offset = atomic_read(&host->debug_info->cmd_log_count) - 13;
198 offset &= DWMCI_LOG_MAX - 1;
199 total_len += snprintf(buf, PAGE_SIZE, "HOST%1d\n", mmc->index);
200 buf += (sizeof(char) * 6);
201 cmd_log = host->debug_info->cmd_log;
202 for (j = 0; j < 14; j++) {
203 total_len += snprintf(buf+(sizeof(char)*71*j)+
204 (sizeof(char)*(2*k+6*(k+1))), PAGE_SIZE,
205 "%04d:%2d,0x%08x,%04d,%016llu,%016llu,%02x,%04x,%03d.\n",
206 offset,
207 cmd_log[offset].cmd, cmd_log[offset].arg,
208 cmd_log[offset].data_size, cmd_log[offset].send_time,
209 cmd_log[offset].done_time, cmd_log[offset].seq_status,
210 cmd_log[offset].rint_sts, cmd_log[offset].status_count);
211 offset++;
212 }
213 total_len += snprintf(buf + (sizeof(char)*2), PAGE_SIZE, "\n\n");
214 k++;
215 }
216
217 return total_len;
218}
219
220static ssize_t dw_mci_debug_log_control(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf,
223 size_t len)
224{
225 int enable = 0;
226 int ret = 0;
227 struct mmc_host *mmc = container_of(dev, struct mmc_host, class_dev);
228 struct dw_mci *host = dw_mci_debug.host[mmc->index];
229
230 ret = kstrtoint(buf, 0, &enable);
231 if (ret)
232 goto out;
233
234 host->debug_info->en_logging = enable;
235 printk("%s: en_logging is %d.\n",
236 mmc_hostname(host->cur_slot->mmc),
237 host->debug_info->en_logging);
238
239 out:
240 return len;
241}
242DEVICE_ATTR(dwmci_debug, 0777, dw_mci_debug_log_show, dw_mci_debug_log_control);
243
244/*
245 * new_cmd : has to be true Only send_command.(except CMD13)
246 * flags :
247 * 0x1 : send_cmd : start_command(all)
248 * 0x2 : resp(CD) : set done_time without data case
249 * 0x4 : data_done(DTO) : set done_time with data case
250 * 0x8 : error interrupt occurs : set rint_sts read from RINTSTS
f95f3850 251 */
3c2a0909
S
252static void dw_mci_debug_cmd_log(struct mmc_command *cmd, struct dw_mci *host,
253 bool new_cmd, u8 flags, u32 rintsts)
254{
255 int cpu = raw_smp_processor_id();
256 unsigned int count;
257 struct dw_mci_cmd_log *cmd_log;
f95f3850 258
3c2a0909
S
259 if (!host->debug_info || !(host->debug_info->en_logging & DW_MCI_DEBUG_ON_CMD))
260 return;
a70aaa64 261
3c2a0909 262 cmd_log = host->debug_info->cmd_log;
f95f3850 263
3c2a0909
S
264 if (!new_cmd) {
265 count = atomic_read(&host->debug_info->cmd_log_count) &
266 (DWMCI_LOG_MAX - 1);
267 if (flags & DW_MCI_FLAG_SEND_CMD) /* CMD13 */
268 cmd_log[count].status_count++;
269 if (flags & DW_MCI_FLAG_CD) {
270 cmd_log[count].seq_status |= DW_MCI_FLAG_CD;
271 cmd_log[count].done_time = cpu_clock(cpu);
272 }
273 if (flags & DW_MCI_FLAG_DTO) {
274 cmd_log[count].seq_status |= DW_MCI_FLAG_DTO;
275 cmd_log[count].done_time = cpu_clock(cpu);
276 }
277 if (flags & DW_MCI_FLAG_ERROR) {
278 cmd_log[count].seq_status |= DW_MCI_FLAG_ERROR;
279 cmd_log[count].rint_sts |= (rintsts & 0xFFFF);
280 }
281 } else {
282 count = atomic_inc_return(&host->debug_info->cmd_log_count) &
283 (DWMCI_LOG_MAX - 1);
284 cmd_log[count].cmd = cmd->opcode;
285 cmd_log[count].arg = cmd->arg;
286 if (cmd->data)
287 cmd_log[count].data_size = cmd->data->blocks;
288 else
289 cmd_log[count].data_size = 0;
f95f3850 290
3c2a0909
S
291 cmd_log[count].send_time = cpu_clock(cpu);
292
293 cmd_log[count].done_time = 0x0;
294 cmd_log[count].seq_status = DW_MCI_FLAG_SEND_CMD;
295 if (!flags & DW_MCI_FLAG_SEND_CMD)
296 cmd_log[count].seq_status |= DW_MCI_FLAG_NEW_CMD_ERR;
297
298 cmd_log[count].rint_sts = 0x0;
299 cmd_log[count].status_count = 0;
300 }
301}
302
303static void dw_mci_debug_req_log(struct dw_mci *host, struct mmc_request *mrq,
304 enum dw_mci_req_log_state log_state, enum dw_mci_state state)
305{
306 int cpu = raw_smp_processor_id();
307 unsigned int count;
308 struct dw_mci_req_log *req_log;
309
310 if (!host->debug_info || !(host->debug_info->en_logging & DW_MCI_DEBUG_ON_REQ))
311 return;
312
313 req_log = host->debug_info->req_log;
314
315 count = atomic_inc_return(&host->debug_info->req_log_count)
316 & (DWMCI_REQ_LOG_MAX - 1);
317 if (log_state == STATE_REQ_START) {
318 req_log[count].info0 = mrq->cmd->opcode;
319 req_log[count].info1 = mrq->cmd->arg;
320 if (mrq->data) {
321 req_log[count].info2 = (u32)mrq->data->blksz;
322 req_log[count].info3 = (u32)mrq->data->blocks;
323 } else {
324 req_log[count].info2 = 0;
325 req_log[count].info3 = 0;
326 }
327 } else {
328 req_log[count].info0 = host->cmd_status;
329 req_log[count].info1 = host->data_status;
330 req_log[count].info2 = 0;
331 req_log[count].info3 = 0;
332 }
333 req_log[count].log_state = log_state;
334 req_log[count].pending_events = host->pending_events;
335 req_log[count].completed_events = host->completed_events;
336 req_log[count].timestamp = cpu_clock(cpu);
337 req_log[count].state_cmd = (log_state == STATE_REQ_CMD_PROCESS) ?
338 state : host->state_cmd;
339 req_log[count].state_dat = (log_state == STATE_REQ_DATA_PROCESS) ?
340 state : host->state_dat;
341}
342
343static void dw_mci_debug_init(struct dw_mci *host)
344{
345 unsigned int host_index;
346 unsigned int info_index;
347
348 host_index = dw_mci_debug.host_count++;
349 if (host_index < DWMCI_DBG_NUM_HOST) {
350 dw_mci_debug.host[host_index] = host;
351 if (DWMCI_DBG_MASK_INFO & DWMCI_DBG_BIT_HOST(host_index)) {
352 static atomic_t temp_cmd_log_count = ATOMIC_INIT(-1);
353 static atomic_t temp_req_log_count = ATOMIC_INIT(-1);
354 int sysfs_err = 0;
355
356 info_index = dw_mci_debug.info_count++;
357 dw_mci_debug.info_index[host_index] = info_index;
358 host->debug_info = &dw_mci_debug.debug_info[info_index];
359 host->debug_info->en_logging = DW_MCI_DEBUG_ON_CMD
360 | DW_MCI_DEBUG_ON_REQ;
361 host->debug_info->cmd_log_count = temp_cmd_log_count;
362 host->debug_info->req_log_count = temp_req_log_count;
363
364 sysfs_err = sysfs_create_file(&(host->slot[0]->mmc->class_dev.kobj),
365 &(dev_attr_dwmci_debug.attr));
366 pr_info("%s: create debug_log sysfs : %s.....\n", __func__,
367 sysfs_err ? "failed" : "successed");
368 dev_info(host->dev, "host %d debug On\n", host_index);
369 } else {
370 dw_mci_debug.info_index[host_index] = 0xFF;
371 }
372 }
373}
374#else
375static inline int dw_mci_debug_cmd_log(struct mmc_command *cmd,
376 struct dw_mci *host, bool new_cmd, u8 flags, u32 rintsts)
377{
378 return 0;
379}
380
381static inline int dw_mci_debug_req_log(struct dw_mci *host,
382 struct mmc_request *mrq, enum dw_mci_req_log_state log_state,
383 enum dw_mci_state state)
384{
385 return 0;
386}
387
388static inline int dw_mci_debug_init(struct dw_mci *host)
389{
390 return 0;
391}
392#endif
393
394int dw_mci_ciu_clk_en(struct dw_mci *host, bool force_gating)
395{
396 int ret = 0;
397 struct clk *gate_clk = (!IS_ERR(host->gate_clk)) ? host->gate_clk :
398 ((!IS_ERR(host->ciu_clk)) ? host->ciu_clk : NULL);
399
400 if (!host->pdata->use_gate_clock && !force_gating)
401 return 0;
402
403 if (!gate_clk) {
404 dev_err(host->dev, "no available CIU gating clock\n");
405 return 1;
406 }
407
408 if (!atomic_cmpxchg(&host->ciu_clk_cnt, 0, 1)) {
409 ret = clk_prepare_enable(gate_clk);
410 if (ret)
411 dev_err(host->dev, "failed to enable ciu clock\n");
412 }
413
414 return ret;
415}
416EXPORT_SYMBOL(dw_mci_ciu_clk_en);
417
418void dw_mci_ciu_clk_dis(struct dw_mci *host)
419{
420 struct clk *gate_clk = (!IS_ERR(host->gate_clk)) ? host->gate_clk :
421 ((!IS_ERR(host->ciu_clk)) ? host->ciu_clk : NULL);
422
423 BUG_ON(!gate_clk);
424
425 if (!host->pdata->use_gate_clock)
426 return;
427
428 if (host->pdata->enable_cclk_on_suspend && host->pdata->on_suspend)
429 return;
430
431 if (atomic_read(&host->ciu_en_win)) {
432 dev_err(host->dev, "Not available CIU off: %d\n",
433 atomic_read(&host->ciu_en_win));
434 return;
435 }
436
437 if (host->req_state == DW_MMC_REQ_BUSY)
438 return;
439
440 if (atomic_cmpxchg(&host->ciu_clk_cnt, 1, 0))
441 clk_disable_unprepare(gate_clk);
442}
443EXPORT_SYMBOL(dw_mci_ciu_clk_dis);
444
445int dw_mci_biu_clk_en(struct dw_mci *host, bool force_gating)
446{
447 int ret = 0;
448
449 if (!host->pdata->use_biu_gate_clock && !force_gating)
450 return 0;
451
452 if (!atomic_read(&host->biu_clk_cnt)) {
453 ret = clk_prepare_enable(host->biu_clk);
454 atomic_inc_return(&host->biu_clk_cnt);
455 if (ret)
456 dev_err(host->dev, "failed to enable biu clock\n");
457 }
458
459 return ret;
460}
461EXPORT_SYMBOL(dw_mci_biu_clk_en);
462
463void dw_mci_biu_clk_dis(struct dw_mci *host)
464{
465 if (!host->pdata->use_biu_gate_clock)
466 return;
467
468 if (host->pdata->enable_cclk_on_suspend && host->pdata->on_suspend)
469 return;
470
471 if (atomic_read(&host->biu_en_win)) {
472 dev_dbg(host->dev, "Not available BIU off: %d\n",
473 atomic_read(&host->biu_en_win));
474 return;
475 }
476
477 if (host->req_state == DW_MMC_REQ_BUSY)
478 return;
479
480 if (atomic_read(&host->biu_clk_cnt)) {
481 clk_disable_unprepare(host->biu_clk);
482 atomic_dec_return(&host->biu_clk_cnt);
483 }
484}
485EXPORT_SYMBOL(dw_mci_biu_clk_dis);
f95f3850
WN
486
487#if defined(CONFIG_DEBUG_FS)
488static int dw_mci_req_show(struct seq_file *s, void *v)
489{
490 struct dw_mci_slot *slot = s->private;
491 struct mmc_request *mrq;
492 struct mmc_command *cmd;
493 struct mmc_command *stop;
494 struct mmc_data *data;
495
496 /* Make sure we get a consistent snapshot */
497 spin_lock_bh(&slot->host->lock);
498 mrq = slot->mrq;
499
500 if (mrq) {
501 cmd = mrq->cmd;
502 data = mrq->data;
503 stop = mrq->stop;
504
505 if (cmd)
506 seq_printf(s,
507 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
508 cmd->opcode, cmd->arg, cmd->flags,
509 cmd->resp[0], cmd->resp[1], cmd->resp[2],
510 cmd->resp[2], cmd->error);
511 if (data)
512 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
513 data->bytes_xfered, data->blocks,
514 data->blksz, data->flags, data->error);
515 if (stop)
516 seq_printf(s,
517 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
518 stop->opcode, stop->arg, stop->flags,
519 stop->resp[0], stop->resp[1], stop->resp[2],
520 stop->resp[2], stop->error);
521 }
522
523 spin_unlock_bh(&slot->host->lock);
524
525 return 0;
526}
527
528static int dw_mci_req_open(struct inode *inode, struct file *file)
529{
530 return single_open(file, dw_mci_req_show, inode->i_private);
531}
532
533static const struct file_operations dw_mci_req_fops = {
534 .owner = THIS_MODULE,
535 .open = dw_mci_req_open,
536 .read = seq_read,
537 .llseek = seq_lseek,
538 .release = single_release,
539};
540
541static int dw_mci_regs_show(struct seq_file *s, void *v)
542{
543 seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
544 seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
545 seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
546 seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
547 seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
548 seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
549
550 return 0;
551}
552
553static int dw_mci_regs_open(struct inode *inode, struct file *file)
554{
555 return single_open(file, dw_mci_regs_show, inode->i_private);
556}
557
558static const struct file_operations dw_mci_regs_fops = {
559 .owner = THIS_MODULE,
560 .open = dw_mci_regs_open,
561 .read = seq_read,
562 .llseek = seq_lseek,
563 .release = single_release,
564};
565
566static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
567{
568 struct mmc_host *mmc = slot->mmc;
569 struct dw_mci *host = slot->host;
570 struct dentry *root;
571 struct dentry *node;
572
573 root = mmc->debugfs_root;
574 if (!root)
575 return;
576
577 node = debugfs_create_file("regs", S_IRUSR, root, host,
578 &dw_mci_regs_fops);
579 if (!node)
580 goto err;
581
582 node = debugfs_create_file("req", S_IRUSR, root, slot,
583 &dw_mci_req_fops);
584 if (!node)
585 goto err;
586
3c2a0909
S
587 node = debugfs_create_u32("state", S_IRUSR, root,
588 (u32 *)&host->state_cmd);
f95f3850
WN
589 if (!node)
590 goto err;
591
592 node = debugfs_create_x32("pending_events", S_IRUSR, root,
593 (u32 *)&host->pending_events);
594 if (!node)
595 goto err;
596
597 node = debugfs_create_x32("completed_events", S_IRUSR, root,
598 (u32 *)&host->completed_events);
599 if (!node)
600 goto err;
601
602 return;
603
604err:
605 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
606}
607#endif /* defined(CONFIG_DEBUG_FS) */
608
3c2a0909
S
609void dw_mci_cmd_reg_summary(struct dw_mci *host)
610{
611 u32 reg;
612 reg = mci_readl(host, CMD);
613
614 dev_err(host->dev, ": ================= CMD REG =================\n");
615 dev_err(host->dev, ": read/write : %s\n",
616 (reg & (0x1 << 10)) ? "write" : "read");
617 dev_err(host->dev, ": data expected : %d\n", (reg >> 9) & 0x1);
618 dev_err(host->dev, ": cmd index : %d\n", (reg >> 0) & 0x3f);
619}
620
621void dw_mci_status_reg_summary(struct dw_mci *host)
622{
623 u32 reg;
624 reg = mci_readl(host, STATUS);
625
626 dev_err(host->dev, ": ================ STATUS REG ===============\n");
627 dev_err(host->dev, ": fifocount : %d\n", (reg >> 17) & 0x1fff);
628 dev_err(host->dev, ": response index : %d\n", (reg >> 11) & 0x3f);
629 dev_err(host->dev, ": data state mc busy: %d\n", (reg >> 10) & 0x1);
630 dev_err(host->dev, ": data busy : %d\n", (reg >> 9) & 0x1);
631 dev_err(host->dev, ": data 3 state : %d\n", (reg >> 8) & 0x1);
632 dev_err(host->dev, ": command fsm state : %d\n", (reg >> 4) & 0xf);
633 dev_err(host->dev, ": fifo full : %d\n", (reg >> 3) & 0x1);
634 dev_err(host->dev, ": fifo empty : %d\n", (reg >> 2) & 0x1);
635 dev_err(host->dev, ": fifo tx watermark : %d\n", (reg >> 1) & 0x1);
636 dev_err(host->dev, ": fifo rx watermark : %d\n", (reg >> 0) & 0x1);
637}
638
639u32 dw_mci_disable_interrupt(struct dw_mci *host, unsigned int *int_mask)
640{
641 u32 ctrl;
642
643 ctrl = mci_readl(host, CTRL);
644 ctrl &= ~(SDMMC_CTRL_INT_ENABLE);
645 mci_writel(host, CTRL, ctrl);
646
647 *int_mask = mci_readl(host, INTMASK);
648
649 mci_writel(host, INTMASK, 0);
650
651 return ctrl;
652}
653
654void dw_mci_enable_interrupt(struct dw_mci *host, unsigned int int_mask)
655{
656 unsigned int ctrl;
657 mci_writel(host, INTMASK, int_mask);
658
659 ctrl = mci_readl(host, CTRL);
660 mci_writel(host, CTRL, ctrl | SDMMC_CTRL_INT_ENABLE);
661}
662
663static inline bool dw_mci_stop_abort_cmd(struct mmc_command *cmd)
f95f3850 664{
3c2a0909
S
665 u32 op = cmd->opcode;
666
667 if ((op == MMC_STOP_TRANSMISSION) ||
668 (op == MMC_GO_IDLE_STATE) ||
669 (op == MMC_GO_INACTIVE_STATE) ||
670 ((op == SD_IO_RW_DIRECT) && (cmd->arg & 0x80000000) &&
671 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
672 return true;
673 return false;
f95f3850
WN
674}
675
676static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
677{
678 struct mmc_data *data;
800d78bf 679 struct dw_mci_slot *slot = mmc_priv(mmc);
3c2a0909 680 struct dw_mci *host = slot->host;
e95baf13 681 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
3c2a0909 682 u32 cmdr, argr;
f95f3850
WN
683 cmd->error = -EINPROGRESS;
684
685 cmdr = cmd->opcode;
3c2a0909 686 argr = ((cmd->arg >> 9) & 0x1FFFF);
f95f3850
WN
687
688 if (cmdr == MMC_STOP_TRANSMISSION)
689 cmdr |= SDMMC_CMD_STOP;
3c2a0909
S
690 else if (cmdr != MMC_SEND_STATUS &&
691 cmdr != MMC_SET_QUEUE_CONTEXT &&
692 cmdr != MMC_QUEUE_READ_ADDRESS)
f95f3850
WN
693 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
694
3c2a0909
S
695 if ((cmd->opcode == SD_IO_RW_DIRECT) &&
696 (argr == SDIO_CCCR_ABORT)) {
697 cmdr &= ~SDMMC_CMD_PRV_DAT_WAIT;
698 cmdr |= SDMMC_CMD_STOP;
699 }
700
f95f3850
WN
701 if (cmd->flags & MMC_RSP_PRESENT) {
702 /* We expect a response, so set this bit */
703 cmdr |= SDMMC_CMD_RESP_EXP;
704 if (cmd->flags & MMC_RSP_136)
705 cmdr |= SDMMC_CMD_RESP_LONG;
706 }
707
708 if (cmd->flags & MMC_RSP_CRC)
709 cmdr |= SDMMC_CMD_RESP_CRC;
710
3c2a0909
S
711 if (host->quirks & DW_MMC_QUIRK_SW_DATA_TIMEOUT)
712 cmdr |= SDMMC_CMD_CEATA_RD;
713
f95f3850
WN
714 data = cmd->data;
715 if (data) {
716 cmdr |= SDMMC_CMD_DAT_EXP;
717 if (data->flags & MMC_DATA_STREAM)
718 cmdr |= SDMMC_CMD_STRM_MODE;
719 if (data->flags & MMC_DATA_WRITE)
720 cmdr |= SDMMC_CMD_DAT_WR;
721 }
722
cb27a843
JH
723 if (drv_data && drv_data->prepare_command)
724 drv_data->prepare_command(slot->host, &cmdr);
800d78bf 725
f95f3850
WN
726 return cmdr;
727}
728
3c2a0909
S
729static u32 dw_mci_prep_stop(struct dw_mci *host, struct mmc_command *cmd)
730{
731 struct mmc_command *stop = &host->stop;
732 const struct dw_mci_drv_data *drv_data = host->drv_data;
733 u32 cmdr = cmd->opcode;
734
735 memset(stop, 0, sizeof(struct mmc_command));
736
737 if (cmdr == MMC_READ_SINGLE_BLOCK ||
738 cmdr == MMC_READ_MULTIPLE_BLOCK ||
739 cmdr == MMC_WRITE_BLOCK ||
740 cmdr == MMC_WRITE_MULTIPLE_BLOCK) {
741 stop->opcode = MMC_STOP_TRANSMISSION;
742 stop->arg = 0;
743 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
744 } else if (cmdr == SD_IO_RW_EXTENDED) {
745 stop->opcode = SD_IO_RW_DIRECT;
746 stop->arg = 0x80000000;
747 /* stop->arg &= ~(1 << 28); */
748 stop->arg |= (cmd->arg >> 28) & 0x7;
749 stop->arg |= SDIO_CCCR_ABORT << 9;
750 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
751 } else
752 return 0;
753
754 cmdr = stop->opcode | SDMMC_CMD_STOP |
755 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
756
757 /* Use hold bit register */
758 if (drv_data && drv_data->prepare_command)
759 drv_data->prepare_command(host, &cmdr);
760
761 return cmdr;
762}
763
f95f3850
WN
764static void dw_mci_start_command(struct dw_mci *host,
765 struct mmc_command *cmd, u32 cmd_flags)
766{
3c2a0909
S
767 struct mmc_data *data;
768 u32 mask;
769
f95f3850 770 host->cmd = cmd;
3c2a0909
S
771 data = cmd->data;
772 mask = mci_readl(host, INTMASK);
773
4a90920c 774 dev_vdbg(host->dev,
f95f3850
WN
775 "start command: ARGR=0x%08x CMDR=0x%08x\n",
776 cmd->arg, cmd_flags);
777
3c2a0909
S
778 if ((host->quirks & DW_MCI_QUIRK_NO_DETECT_EBIT) &&
779 data && (data->flags & MMC_DATA_READ)) {
780 mask &= ~SDMMC_INT_EBE;
781 } else {
782 mask |= SDMMC_INT_EBE;
783 mci_writel(host, RINTSTS, SDMMC_INT_EBE);
784 }
785
786 mci_writel(host, INTMASK, mask);
787
788 if (MMC_CHECK_CMDQ_MODE(host)) {
789 if (mci_readl(host, SHA_CMD_IS) & QRDY_INT)
790 mci_writel(host, SHA_CMD_IS, QRDY_INT);
791 else {
792 mask = mci_readl(host, SHA_CMD_IE);
793 if (!(mask & QRDY_INT_EN)) {
794 mask |= QRDY_INT_EN;
795 mci_writel(host, SHA_CMD_IE, mask);
796 }
797 }
798 }
799
800 /* needed to
801 * add get node parse_dt for check to enable logging
802 * if defined(CMD_LOGGING)
803 * set en_logging to true
804 * init cmd_log_count
805 */
806 if (cmd->opcode == MMC_SEND_STATUS)
807 dw_mci_debug_cmd_log(cmd, host, false, DW_MCI_FLAG_SEND_CMD, 0);
808 else
809 dw_mci_debug_cmd_log(cmd, host, true, DW_MCI_FLAG_SEND_CMD, 0);
810
f95f3850
WN
811 mci_writel(host, CMDARG, cmd->arg);
812 wmb();
813
814 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
815}
816
817static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
818{
819 dw_mci_start_command(host, data->stop, host->stop_cmdr);
820}
821
822/* DMA interface functions */
823static void dw_mci_stop_dma(struct dw_mci *host)
824{
03e8cb53 825 if (host->using_dma) {
f95f3850
WN
826 host->dma_ops->stop(host);
827 host->dma_ops->cleanup(host);
3c2a0909 828 host->dma_ops->reset(host);
f95f3850
WN
829 } else {
830 /* Data transfer was stopped by the interrupt handler */
831 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
832 }
833}
834
3c2a0909
S
835static bool dw_mci_wait_reset(struct device *dev, struct dw_mci *host,
836 u32 reset_val)
837{
838 unsigned long timeout = jiffies + msecs_to_jiffies(500);
839 u32 ctrl;
840 unsigned int int_mask = 0;
841
842 /* Interrupt disable */
843 ctrl = dw_mci_disable_interrupt(host, &int_mask);
844
845 /* Reset */
846 ctrl |= reset_val;
847 mci_writel(host, CTRL, ctrl);
848
849 /* All interrupt clear */
850 mci_writel(host, RINTSTS, 0xFFFFFFFF);
851
852 /* Interrupt enable */
853 dw_mci_enable_interrupt(host, int_mask);
854
855 /* wait till resets clear */
856 do {
857 if (!(mci_readl(host, CTRL) & reset_val))
858 return true;
859 } while (time_before(jiffies, timeout));
860
861 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
862
863 return false;
864}
865
866static void dw_mci_update_clock(struct dw_mci_slot *slot)
867{
868 struct dw_mci *host = slot->host;
869 unsigned long timeout;
870 int retry = 10;
871 unsigned int int_mask = 0;
872 unsigned int cmd_status = 0;
873
874 if (slot->host->pdata->use_gate_clock)
875 atomic_inc_return(&slot->host->ciu_en_win);
876 dw_mci_ciu_clk_en(slot->host, false);
877 if (slot->host->pdata->use_gate_clock)
878 atomic_dec_return(&slot->host->ciu_en_win);
879
880 dw_mci_disable_interrupt(host, &int_mask);
881
882 do {
883 wmb();
884 mci_writel(host, CMD, SDMMC_CMD_START | SDMMC_CMD_UPD_CLK);
885
886 timeout = jiffies + msecs_to_jiffies(1);
887 while (time_before(jiffies, timeout)) {
888 cmd_status = mci_readl(host, CMD) & SDMMC_CMD_START;
889 if (!cmd_status)
890 goto out;
891
892 if (mci_readl(host, RINTSTS) & SDMMC_INT_HLE) {
893 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
894 break;
895 /* reset controller because a command is stuecked */
896 }
897 }
898
899 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_RESET);
900 } while (--retry);
901
902 dev_err(&slot->mmc->class_dev,
903 "Timeout updating command (status %#x)\n", cmd_status);
904out:
905 /* recover interrupt mask after updating clock */
906 dw_mci_enable_interrupt(host, int_mask);
907}
908
909void dw_mci_ciu_reset(struct device *dev, struct dw_mci *host)
910{
911 struct dw_mci_slot *slot = host->cur_slot;
912 unsigned long timeout = jiffies + msecs_to_jiffies(10);
913 int retry = 10;
914 u32 status;
915
916 if (slot) {
917 dw_mci_wait_reset(dev, host, SDMMC_CTRL_RESET);
918 /* Check For DATA busy */
919 do {
920
921 while (time_before(jiffies, timeout)) {
922 status = mci_readl(host, STATUS);
923 if (!(status & SDMMC_DATA_BUSY))
924 goto out;
925 }
926
927 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_RESET);
928 timeout = jiffies + msecs_to_jiffies(10);
929 } while (--retry);
930
931out:
932 dw_mci_update_clock(slot);
933 }
934}
935
936bool dw_mci_fifo_reset(struct device *dev, struct dw_mci *host)
937{
938 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
939 unsigned int ctrl;
940 bool result;
941
942 do {
943 result = dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_FIFO_RESET);
944
945 if (!result)
946 break;
947
948 ctrl = mci_readl(host, STATUS);
949 if (!(ctrl & SDMMC_STATUS_DMA_REQ)) {
950 result = dw_mci_wait_reset(host->dev, host,
951 SDMMC_CTRL_FIFO_RESET);
952 if (result) {
953 /* clear exception raw interrupts can not be handled
954 ex) fifo full => RXDR interrupt rising */
955 ctrl = mci_readl(host, RINTSTS);
956 ctrl = ctrl & ~(mci_readl(host, MINTSTS));
957 if (ctrl)
958 mci_writel(host, RINTSTS, ctrl);
959
960 return true;
961 }
962 }
963 } while (time_before(jiffies, timeout));
964
965 dev_err(dev, "%s: Timeout while resetting host controller after err\n",
966 __func__);
967
968 return false;
969}
970
9aa51408
SJ
971static int dw_mci_get_dma_dir(struct mmc_data *data)
972{
973 if (data->flags & MMC_DATA_WRITE)
974 return DMA_TO_DEVICE;
975 else
976 return DMA_FROM_DEVICE;
977}
978
9beee912 979#ifdef CONFIG_MMC_DW_IDMAC
f95f3850
WN
980static void dw_mci_dma_cleanup(struct dw_mci *host)
981{
982 struct mmc_data *data = host->data;
983
984 if (data)
9aa51408 985 if (!data->host_cookie)
4a90920c 986 dma_unmap_sg(host->dev,
9aa51408
SJ
987 data->sg,
988 data->sg_len,
989 dw_mci_get_dma_dir(data));
f95f3850
WN
990}
991
992static void dw_mci_idmac_stop_dma(struct dw_mci *host)
993{
994 u32 temp;
995
996 /* Disable and reset the IDMAC interface */
997 temp = mci_readl(host, CTRL);
998 temp &= ~SDMMC_CTRL_USE_IDMAC;
f95f3850
WN
999 mci_writel(host, CTRL, temp);
1000
3c2a0909
S
1001 /* reset the IDMAC interface */
1002 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_DMA_RESET);
1003
f95f3850
WN
1004 /* Stop the IDMAC running */
1005 temp = mci_readl(host, BMOD);
a5289a43 1006 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
f95f3850
WN
1007 mci_writel(host, BMOD, temp);
1008}
1009
3c2a0909
S
1010static void dw_mci_idma_reset_dma(struct dw_mci *host)
1011{
1012 u32 temp;
1013
1014 temp = mci_readl(host, BMOD);
1015 /* Software reset of DMA */
1016 temp |= SDMMC_IDMAC_SWRESET;
1017 mci_writel(host, BMOD, temp);
1018}
1019
f95f3850
WN
1020static void dw_mci_idmac_complete_dma(struct dw_mci *host)
1021{
1022 struct mmc_data *data = host->data;
3c2a0909
S
1023#if defined(CONFIG_MMC_DW_FMP_ECRYPT_FS) && defined(CONFIG_SDP) && defined(CONFIG_64BIT)
1024 struct idmac_desc *desc = host->sg_cpu;
1025 unsigned int i, j;
1026#endif
f95f3850 1027
4a90920c 1028 dev_vdbg(host->dev, "DMA complete\n");
f95f3850
WN
1029
1030 host->dma_ops->cleanup(host);
1031
3c2a0909
S
1032#if defined(CONFIG_MMC_DW_FMP_ECRYPT_FS) && defined(CONFIG_SDP) && defined(CONFIG_64BIT)
1033 if (data && data->sg_len) {
1034 for(i = 0; i < data->sg_len; i++) {
1035 if (sg_page(&data->sg[i])->mapping && !((unsigned long)(sg_page(&data->sg[i])->mapping) & 0x1)
1036 && sg_page(&data->sg[i])->mapping->key &&
1037 ((unsigned int)sg_page(&data->sg[i])->index >= 2)) {
1038 for (j = 0; j < DW_MMC_MAX_TRANSFER_SIZE/DW_MMC_SECTOR_SIZE; j++) {
1039 if (mapping_sensitive(sg_page(&data->sg[i])->mapping))
1040 memset(&(desc->des12), 0x0, sizeof(u32)*(4 + (sg_page(&data->sg[i])->mapping->key_length >> 2)));
1041 desc++;
1042 }
1043 } else
1044 desc++;
1045 }
1046 }
1047#endif
f95f3850
WN
1048 /*
1049 * If the card was removed, data will be NULL. No point in trying to
1050 * send the stop command or waiting for NBUSY in this case.
1051 */
1052 if (data) {
1053 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
1054 tasklet_schedule(&host->tasklet);
1055 }
1056}
1057
3c2a0909
S
1058#define byte2word(b0, b1, b2, b3) \
1059 ((unsigned int)(b0) << 24) | ((unsigned int)(b1) << 16) | ((unsigned int)(b2) << 8) | (b3)
1060#define word_in(x, c) byte2word(((unsigned char *)(x) + 4 * (c))[0], ((unsigned char *)(x) + 4 * (c))[1], \
1061 ((unsigned char *)(x) + 4 * (c))[2], ((unsigned char *)(x) + 4 * (c))[3])
1062
1063#define CLEAR 0
1064#define AES_CBC 1
1065#define AES_XTS 2
1066
1067extern volatile unsigned int disk_key_flag;
1068extern spinlock_t disk_key_lock;
1069
f95f3850
WN
1070static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
1071 unsigned int sg_len)
1072{
3c2a0909
S
1073 int i, j;
1074 int desc_cnt = 0;
f95f3850 1075 struct idmac_desc *desc = host->sg_cpu;
3c2a0909
S
1076 unsigned int rw_size = DW_MMC_MAX_TRANSFER_SIZE;
1077#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT) || defined(CONFIG_MMC_DW_FMP_ECRYPT_FS)
1078 unsigned int sector = 0;
1079 unsigned int sector_key = DW_MMC_BYPASS_SECTOR_BEGIN;
1080#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT)
1081 struct mmc_blk_request *brq = NULL;
1082 struct mmc_queue_req *mq_rq = NULL;
1083
1084 if ((data->mrq->host) &&
1085 (host->pdata->quirks & DW_MCI_QUIRK_USE_SMU)) {
1086 /* it means this request comes from block i/o */
1087 brq = container_of(data, struct mmc_blk_request, data);
1088 if (brq) {
1089 mq_rq = container_of(brq, struct mmc_queue_req, brq);
1090 sector = mq_rq->req->bio->bi_sector;
1091 rw_size = (mq_rq->req->bio->bi_sensitive_data == 1) ?
1092 DW_MMC_SECTOR_SIZE : DW_MMC_MAX_TRANSFER_SIZE;
1093 sector_key = (mq_rq->req->bio->bi_sensitive_data == 1) ?
1094 DW_MMC_ENCRYPTION_SECTOR_BEGIN : DW_MMC_BYPASS_SECTOR_BEGIN;
1095 }
1096 }
1097#endif
1098#endif
1099 for (i = 0; i < sg_len; i++) {
f95f3850 1100 unsigned int length = sg_dma_len(&data->sg[i]);
3c2a0909
S
1101 unsigned int sz_per_desc;
1102 unsigned int left = length;
1103 u64 mem_addr = sg_dma_address(&data->sg[i]);
1104#if defined(CONFIG_MMC_DW_FMP_ECRYPT_FS)
1105 unsigned int last_index = 0;
1106 unsigned long last_inode = 0;
1107#ifdef CONFIG_CRYPTO_FIPS
1108 char extent_iv[SHA256_HASH_SIZE];
1109#else
1110 char extent_iv[MD5_DIGEST_SIZE];
1111#endif
1112 if (!((unsigned long)(sg_page(&data->sg[i])->mapping) & 0x1) && sg_page(&data->sg[i])->mapping
1113 && sg_page(&data->sg[i])->mapping->key &&
1114 ((unsigned int)sg_page(&data->sg[i])->index >= 2)) {
1115 sector_key |= DW_MMC_FILE_ENCRYPTION_SECTOR_BEGIN;
1116 rw_size = DW_MMC_SECTOR_SIZE;
1117 } else {
1118 sector_key &= ~DW_MMC_FILE_ENCRYPTION_SECTOR_BEGIN;
1119 if (mq_rq && !(mq_rq->req->bio->bi_sensitive_data))
1120 rw_size = DW_MMC_MAX_TRANSFER_SIZE;
1121 }
1122#endif
1123 for (j = 0; j < (length + rw_size - 1) / rw_size; j++) {
1124 /*
1125 * Set the OWN bit
1126 * and disable interrupts for this descriptor
1127 */
1128 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
1129 IDMAC_DES0_CH;
1130
1131 /* Buffer length */
1132 sz_per_desc = min(left, rw_size);
1133#ifdef CONFIG_MMC_DW_64_IDMAC /*64bit*/
1134 desc->des1 = 0;
1135 desc->des2 = length;
1136 IDMAC_SET_BUFFER1_SIZE(desc, sz_per_desc);
1137 /* Physical address to DMA to/from */
1138 desc->des4 = (u32)(mem_addr);
1139 desc->des5 = (u32)(mem_addr >> 32);
1140 desc->des7 = 0;
1141#else /*32bit*/
1142 desc->des1 = length;
1143 IDMAC_SET_BUFFER1_SIZE(desc, sz_per_desc);
1144 /* Physical address to DMA to/from */
1145 desc->des2 = mem_addr;
1146#endif
1147
1148#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT) || defined(CONFIG_MMC_DW_FMP_ECRYPT_FS)
1149#ifdef CONFIG_MMC_DW_64_IDMAC /*64bit*/
1150 if (sector_key == DW_MMC_BYPASS_SECTOR_BEGIN) {
1151 IDMAC_SET_DAS(desc, CLEAR);
1152 IDMAC_SET_FAS(desc, CLEAR);
1153 } else {
1154 if ((sector_key & DW_MMC_ENCRYPTION_SECTOR_BEGIN) &&
1155 (host->pdata->quirks & DW_MCI_QUIRK_USE_SMU)) { /* disk encryption */
1156 /* disk algorithm selector */
1157 IDMAC_SET_DAS(desc, AES_XTS);
1158 desc->des2 |= IDMAC_DES2_DKL;
1159
1160 /* Disk IV */
1161 desc->des28 = 0;
1162 desc->des29 = 0;
1163 desc->des30 = 0;
1164 desc->des31 = htonl(sector);
1165
1166 /* Disk Enc Key, Tweak Key */
1167 if (disk_key_flag) {
1168 int ret;
1169
1170 /* Disk Enc Key, Tweak Key*/
1171 ret = exynos_smc(SMC_CMD_FMP, FMP_KEY_SET, EMMC0_FMP, 0);
1172 if (ret) {
1173 dev_err(host->dev, "Failed to smc call for FMP key setting: %x\n", ret);
1174 spin_lock(&host->lock);
1175 host->mrq_cmd->cmd->error = -ENOKEY;
1176 dw_mci_request_end(host, host->mrq_cmd, &host->state_dat);
1177 host->state_cmd = STATE_IDLE;
1178 spin_unlock(&host->lock);
1179 }
1180 spin_lock(&disk_key_lock);
1181 disk_key_flag = 0;
1182 spin_unlock(&disk_key_lock);
1183 }
f95f3850 1184
3c2a0909
S
1185 }
1186#if defined(CONFIG_MMC_DW_FMP_ECRYPT_FS)
1187 if ((sector_key & DW_MMC_FILE_ENCRYPTION_SECTOR_BEGIN) &&
1188 (host->pdata->quirks & DW_MCI_QUIRK_USE_SMU)) {
1189 unsigned int aes_alg = 0;
1190 unsigned int j;
1191 int ret;
1192 loff_t index;
1193
1194 /* File algorithm selector*/
1195 if (!strncmp(sg_page(&data->sg[i])->mapping->alg, "aes", sizeof("aes")))
1196 aes_alg = AES_CBC;
1197 else if (!strncmp(sg_page(&data->sg[i])->mapping->alg, "aesxts", sizeof("aesxts")))
1198 aes_alg = AES_XTS;
1199 else {
1200 dev_err(host->dev, "Invalid file algorithm: %s\n", sg_page(&data->sg[i])->mapping->alg);
1201 spin_lock(&host->lock);
1202 host->mrq_cmd->cmd->error = -EBADR;
1203 dw_mci_request_end(host, host->mrq_cmd, &host->state_dat);
1204 host->state_cmd = STATE_IDLE;
1205 spin_unlock(&host->lock);
1206 }
f95f3850 1207
3c2a0909
S
1208 IDMAC_SET_FAS(desc, aes_alg);
1209
1210 /* File enc key size */
1211 switch (sg_page(&data->sg[i])->mapping->key_length) {
1212 case 16:
1213 desc->des2 &= ~IDMAC_DES2_FKL;
1214 break;
1215 case 32:
1216 case 64:
1217 desc->des2 |= IDMAC_DES2_FKL;
1218 break;
1219 default:
1220 dev_err(host->dev, "Invalid file key length: %lx\n", sg_page(&data->sg[i])->mapping->key_length);
1221 spin_lock(&host->lock);
1222 host->mrq_cmd->cmd->error = -EBADR;
1223 dw_mci_request_end(host, host->mrq_cmd, &host->state_dat);
1224 host->state_cmd = STATE_IDLE;
1225 spin_unlock(&host->lock);
1226 }
1227
1228 index = sg_page(&data->sg[i])->index;
1229 if ((last_index != index) || (last_inode != sg_page(&data->sg[i])->mapping->host->i_ino)) {
1230 index = index - sg_page(&data->sg[i])->mapping->sensitive_data_index;
1231 ret = file_enc_derive_iv(sg_page(&data->sg[i])->mapping, index, extent_iv);
1232 if (ret) {
1233 dev_err(host->dev, "Error attemping to derive IV\n");
1234 spin_lock(&host->lock);
1235 host->mrq_cmd->cmd->error = -EBADR;
1236 dw_mci_request_end(host, host->mrq_cmd, &host->state_dat);
1237 host->state_cmd = STATE_IDLE;
1238 spin_unlock(&host->lock);
1239 }
1240 }
1241 last_index = sg_page(&data->sg[i])->index;
1242 last_inode = sg_page(&data->sg[i])->mapping->host->i_ino;
1243
1244 /* File IV */
1245 desc->des8 = word_in(extent_iv, 3);
1246 desc->des9 = word_in(extent_iv, 2);
1247 desc->des10 = word_in(extent_iv, 1);
1248 desc->des11 = word_in(extent_iv, 0);
1249
1250 /* File Enc key */
1251 for (j = 0; j < sg_page(&data->sg[i])->mapping->key_length >> 2; j++)
1252 *(&(desc->des12) + j) =
1253 word_in(sg_page(&data->sg[i])->mapping->key, (sg_page(&data->sg[i])->mapping->key_length >> 2) - (j + 1));
1254 }
1255#endif
1256 }
1257#else /*32bit*/
1258 if ((sector_key == DW_MMC_ENCRYPTION_SECTOR_BEGIN) &&
1259 (host->pdata->quirks & DW_MCI_QUIRK_USE_SMU)) {
1260 desc->des4 = sector;
1261 desc->des5 = 0;
1262 desc->des6 = 0;
1263 desc->des7 = 0;
1264 } else
1265 desc->des4 = DW_MMC_BYPASS_SECTOR_BEGIN;
1266#endif
1267 sector += rw_size / DW_MMC_SECTOR_SIZE;
1268#else
1269#ifdef CONFIG_MMC_DW_64_IDMAC /*64bit*/
1270 //desc->des8 = DW_MMC_BYPASS_SECTOR_BEGIN;
1271#else /*32bit*/
1272 desc->des4 = DW_MMC_BYPASS_SECTOR_BEGIN;
1273#endif
1274#endif
1275 desc++;
1276 desc_cnt++;
1277 mem_addr += sz_per_desc;
1278 left -= sz_per_desc;
1279 }
f95f3850 1280
f95f3850
WN
1281 }
1282
1283 /* Set first descriptor */
1284 desc = host->sg_cpu;
1285 desc->des0 |= IDMAC_DES0_FD;
1286
1287 /* Set last descriptor */
3c2a0909 1288 desc = host->sg_cpu + (desc_cnt - 1) * sizeof(struct idmac_desc);
f95f3850
WN
1289 desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
1290 desc->des0 |= IDMAC_DES0_LD;
1291
1292 wmb();
1293}
1294
1295static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
1296{
1297 u32 temp;
1298
1299 dw_mci_translate_sglist(host, host->data, sg_len);
1300
3c2a0909
S
1301 if (host->data->blocks == 1 &&
1302 host->quirks & DW_MMC_QUIRK_FMP_SIZE_MISMATCH) {
1303 unsigned int blksz = host->data->blksz;
1304
1305 if (blksz == 8) {
1306 /* Use AXI single burst when sending 8 bytes */
1307 mci_writel(host, AXI_BURST_LEN,
1308 DWMCI_BURST_LENGTH_CTRL(0));
1309 } else if (blksz == 64) {
1310 /*
1311 * Use AXI 4 burst and set sector size for EMMCP
1312 * with real block size when sending 64 bytes
1313 */
1314 u32 sector_size;
1315
1316 mci_writel(host, AXI_BURST_LEN,
1317 DWMCI_BURST_LENGTH_CTRL(0x3));
1318 sector_size = mci_readl(host, SECTOR_NUM_INC);
1319 sector_size &= ~(DWMCI_SECTOR_SIZE_MASK);
1320 sector_size |= DWMCI_SECTOR_SIZE_CTRL(blksz);
1321 mci_writel(host, SECTOR_NUM_INC, sector_size);
1322 }
1323 }
1324
f95f3850
WN
1325 /* Select IDMAC interface */
1326 temp = mci_readl(host, CTRL);
1327 temp |= SDMMC_CTRL_USE_IDMAC;
1328 mci_writel(host, CTRL, temp);
1329
1330 wmb();
1331
1332 /* Enable the IDMAC */
1333 temp = mci_readl(host, BMOD);
a5289a43 1334 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
f95f3850
WN
1335 mci_writel(host, BMOD, temp);
1336
1337 /* Start it running */
1338 mci_writel(host, PLDMND, 1);
1339}
1340
1341static int dw_mci_idmac_init(struct dw_mci *host)
1342{
1343 struct idmac_desc *p;
897b69e7 1344 int i;
3c2a0909 1345 dma_addr_t addr;
f95f3850
WN
1346
1347 /* Number of descriptors in the ring buffer */
3c2a0909 1348 host->ring_size = host->desc_sz * PAGE_SIZE / sizeof(struct idmac_desc);
f95f3850
WN
1349
1350 /* Forward link the descriptor list */
3c2a0909
S
1351 for (i = 0, p = host->sg_cpu; i < host->ring_size *
1352 MMC_DW_IDMAC_MULTIPLIER - 1; i++, p++) {
1353 addr = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
1354 IDMAC_SET_DESC_ADDR(p, addr);
1355 }
f95f3850
WN
1356
1357 /* Set the last descriptor as the end-of-ring descriptor */
3c2a0909 1358 IDMAC_SET_DESC_ADDR(p, host->sg_dma);
f95f3850
WN
1359 p->des0 = IDMAC_DES0_ER;
1360
141a712a
SJ
1361 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
1362
f95f3850
WN
1363 /* Mask out interrupts - get Tx & Rx complete only */
1364 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
1365 SDMMC_IDMAC_INT_TI);
1366
3c2a0909 1367#ifdef CONFIG_MMC_DW_64_IDMAC
f95f3850 1368 /* Set the descriptor base address */
3c2a0909
S
1369 mci_writel(host, DBADDRL, host->sg_dma);
1370#else
f95f3850 1371 mci_writel(host, DBADDR, host->sg_dma);
3c2a0909
S
1372#endif
1373 if (host->quirks & DW_MMC_QUIRK_NOT_ALLOW_SINGLE_DMA)
1374 host->align_size = 32;
1375 else
1376 host->align_size = (host->data_shift == 3) ? 8 : 4;
1377
f95f3850
WN
1378 return 0;
1379}
1380
8e2b36ea 1381static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
885c3e80
SJ
1382 .init = dw_mci_idmac_init,
1383 .start = dw_mci_idmac_start_dma,
1384 .stop = dw_mci_idmac_stop_dma,
3c2a0909 1385 .reset = dw_mci_idma_reset_dma,
885c3e80
SJ
1386 .complete = dw_mci_idmac_complete_dma,
1387 .cleanup = dw_mci_dma_cleanup,
1388};
1389#endif /* CONFIG_MMC_DW_IDMAC */
1390
9aa51408
SJ
1391static int dw_mci_pre_dma_transfer(struct dw_mci *host,
1392 struct mmc_data *data,
1393 bool next)
f95f3850
WN
1394{
1395 struct scatterlist *sg;
3c2a0909
S
1396 struct dw_mci_slot *slot = host->cur_slot;
1397 struct mmc_card *card = slot->mmc->card;
1398 struct mmc_host *mmc = slot->mmc;
9aa51408 1399 unsigned int i, sg_len;
3c2a0909
S
1400 unsigned int align_mask = host->align_size - 1;
1401#ifdef CONFIG_MMC_DW_SKIP_CACHE_OP
1402 struct dma_attrs *attrs;
1403#endif
03e8cb53 1404
9aa51408
SJ
1405 if (!next && data->host_cookie)
1406 return data->host_cookie;
f95f3850
WN
1407
1408 /*
1409 * We don't do DMA on "complex" transfers, i.e. with
1410 * non-word-aligned buffers or lengths. Also, we don't bother
1411 * with all the DMA setup overhead for short transfers.
1412 */
1413 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
1414 return -EINVAL;
9aa51408 1415
3c2a0909
S
1416 if (data->blksz & align_mask)
1417 return -EINVAL;
1418
1419 if (data->blksz < (1 << host->data_shift))
f95f3850
WN
1420 return -EINVAL;
1421
3c2a0909
S
1422 if (host->quirks & DW_MMC_QUIRK_USE_CPU_MODE_TUNING) {
1423 if (mmc->tuning_progress)
1424 return -EINVAL;
1425 }
1426
1427 if (card && mmc_card_sdio(card)) {
1428 unsigned int rxwmark_val = 0, txwmark_val = 0, msize_val = 0;
1429
1430 if (data->blksz >= (4 * (1 << host->data_shift))) {
1431 msize_val = 1;
1432 rxwmark_val = 3;
1433 txwmark_val = 4;
1434 } else {
1435 msize_val = 0;
1436 rxwmark_val = 1;
1437 txwmark_val = host->fifo_depth / 2;
1438 }
1439
1440 host->fifoth_val = ((msize_val << 28) | (rxwmark_val << 16) |
1441 (txwmark_val << 0));
1442 dev_dbg(host->dev,
1443 "data->blksz: %d data->blocks %d Transfer Size %d "
1444 "msize_val : %d, rxwmark_val : %d host->fifoth_val: 0x%08x\n",
1445 data->blksz, data->blocks, (data->blksz * data->blocks),
1446 msize_val, rxwmark_val, host->fifoth_val);
1447
1448 mci_writel(host, FIFOTH, host->fifoth_val);
1449
1450 if (mmc_card_uhs(card)
1451 && card->host->caps & MMC_CAP_UHS_SDR104
1452 && data->flags & MMC_DATA_READ)
1453 mci_writel(host, CDTHRCTL, data->blksz << 16 | 1);
1454 }
1455
1456 if (data->blocks == 1 &&
1457 host->quirks & DW_MMC_QUIRK_FMP_SIZE_MISMATCH) {
1458 if (data->blksz == 64) {
1459 host->fifoth_val = ((2 << 28) | (7 << 16) |
1460 ((host->fifo_depth - 8)<< 0));
1461 mci_writel(host, FIFOTH, host->fifoth_val);
1462 }
1463 }
1464
f95f3850 1465 for_each_sg(data->sg, sg, data->sg_len, i) {
3c2a0909 1466 if (sg->offset & align_mask || sg->length & align_mask)
f95f3850
WN
1467 return -EINVAL;
1468 }
1469
3c2a0909
S
1470#ifdef CONFIG_MMC_DW_SKIP_CACHE_OP
1471 attrs = (data->flags & MMC_DATA_DIRECT) ? &dw_mci_direct_attrs : NULL;
1472 sg_len = dma_map_sg_attr(host->dev,
1473 data->sg,
1474 data->sg_len,
1475 dw_mci_get_dma_dir(data), attrs);
1476#else
4a90920c 1477 sg_len = dma_map_sg(host->dev,
9aa51408
SJ
1478 data->sg,
1479 data->sg_len,
1480 dw_mci_get_dma_dir(data));
3c2a0909 1481#endif
9aa51408
SJ
1482 if (sg_len == 0)
1483 return -EINVAL;
03e8cb53 1484
9aa51408
SJ
1485 if (next)
1486 data->host_cookie = sg_len;
f95f3850 1487
9aa51408
SJ
1488 return sg_len;
1489}
1490
9aa51408
SJ
1491static void dw_mci_pre_req(struct mmc_host *mmc,
1492 struct mmc_request *mrq,
1493 bool is_first_req)
1494{
1495 struct dw_mci_slot *slot = mmc_priv(mmc);
1496 struct mmc_data *data = mrq->data;
1497
1498 if (!slot->host->use_dma || !data)
1499 return;
1500
1501 if (data->host_cookie) {
1502 data->host_cookie = 0;
1503 return;
1504 }
1505
1506 if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
1507 data->host_cookie = 0;
1508}
1509
1510static void dw_mci_post_req(struct mmc_host *mmc,
1511 struct mmc_request *mrq,
1512 int err)
1513{
1514 struct dw_mci_slot *slot = mmc_priv(mmc);
1515 struct mmc_data *data = mrq->data;
1516
1517 if (!slot->host->use_dma || !data)
1518 return;
1519
1520 if (data->host_cookie)
4a90920c 1521 dma_unmap_sg(slot->host->dev,
9aa51408
SJ
1522 data->sg,
1523 data->sg_len,
1524 dw_mci_get_dma_dir(data));
1525 data->host_cookie = 0;
1526}
1527
1528static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1529{
1530 int sg_len;
1531 u32 temp;
1532
1533 host->using_dma = 0;
1534
1535 /* If we don't have a channel, we can't do DMA */
1536 if (!host->use_dma)
1537 return -ENODEV;
1538
3c2a0909
S
1539 if (host->use_dma && host->dma_ops->init && host->dma_ops->reset) {
1540 host->dma_ops->init(host);
1541 host->dma_ops->reset(host);
1542 }
1543
9aa51408 1544 sg_len = dw_mci_pre_dma_transfer(host, data, 0);
a99aa9b9
SJ
1545 if (sg_len < 0) {
1546 host->dma_ops->stop(host);
9aa51408 1547 return sg_len;
a99aa9b9 1548 }
9aa51408
SJ
1549
1550 host->using_dma = 1;
f95f3850 1551
4a90920c 1552 dev_vdbg(host->dev,
f95f3850
WN
1553 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1554 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
1555 sg_len);
1556
1557 /* Enable the DMA interface */
1558 temp = mci_readl(host, CTRL);
1559 temp |= SDMMC_CTRL_DMA_ENABLE;
1560 mci_writel(host, CTRL, temp);
1561
1562 /* Disable RX/TX IRQs, let DMA handle it */
3c2a0909 1563 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
1564 temp = mci_readl(host, INTMASK);
1565 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1566 mci_writel(host, INTMASK, temp);
1567
1568 host->dma_ops->start(host, sg_len);
1569
1570 return 0;
1571}
1572
1573static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1574{
1575 u32 temp;
1576
1577 data->error = -EINPROGRESS;
1578
1579 WARN_ON(host->data);
1580 host->sg = NULL;
1581 host->data = data;
1582
55c5efbc
JH
1583 if (data->flags & MMC_DATA_READ)
1584 host->dir_status = DW_MCI_RECV_STATUS;
1585 else
1586 host->dir_status = DW_MCI_SEND_STATUS;
1587
f95f3850 1588 if (dw_mci_submit_data_dma(host, data)) {
f9c2a0dc 1589 int flags = SG_MITER_ATOMIC;
3c2a0909
S
1590
1591 if (SDMMC_GET_FCNT(mci_readl(host, STATUS)))
1592 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_FIFO_RESET);
1593
f9c2a0dc
SJ
1594 if (host->data->flags & MMC_DATA_READ)
1595 flags |= SG_MITER_TO_SG;
1596 else
1597 flags |= SG_MITER_FROM_SG;
1598
1599 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
f95f3850 1600 host->sg = data->sg;
34b664a2
JH
1601 host->part_buf_start = 0;
1602 host->part_buf_count = 0;
f95f3850 1603
b40af3aa 1604 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
f95f3850
WN
1605 temp = mci_readl(host, INTMASK);
1606 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1607 mci_writel(host, INTMASK, temp);
1608
1609 temp = mci_readl(host, CTRL);
1610 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1611 mci_writel(host, CTRL, temp);
1612 }
1613}
1614
3c2a0909 1615static bool dw_mci_wait_data_busy(struct dw_mci *host, struct mmc_request *mrq)
f95f3850 1616{
3c2a0909
S
1617 u32 status;
1618 unsigned long timeout = jiffies + msecs_to_jiffies(DW_MCI_BUSY_WAIT_TIMEOUT);
1619 struct dw_mci_slot *slot = host->cur_slot;
1620 int try = 2;
1621 u32 clkena;
1622 bool ret = false;
f95f3850 1623
3c2a0909
S
1624 do {
1625 do {
1626 status = mci_readl(host, STATUS);
1627 if (!(status & SDMMC_DATA_BUSY)) {
1628 ret = true;
1629 goto out;
1630 }
f95f3850 1631
3c2a0909
S
1632 usleep_range(10, 20);
1633 } while (time_before(jiffies, timeout));
1634
1635 /* card is checked every 1s by CMD13 at least */
1636 if (mrq->cmd->opcode == MMC_SEND_STATUS)
1637 return true;
1638
1639 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_RESET);
1640 /* After CTRL Reset, Should be needed clk val to CIU */
1641 if (host->cur_slot) {
1642 /* Disable low power mode */
1643 clkena = mci_readl(host, CLKENA);
1644 clkena &= ~((SDMMC_CLKEN_LOW_PWR) << slot->id);
1645 mci_writel(host, CLKENA, clkena);
1646
1647 dw_mci_update_clock(host->cur_slot);
1648 }
1649 timeout = jiffies + msecs_to_jiffies(DW_MCI_BUSY_WAIT_TIMEOUT);
1650 } while (--try);
1651out:
1652 if (host->cur_slot) {
1653 if (ret == false)
1654 dev_err(host->dev, "Data[0]: data is busy\n");
1655
1656 /* enable clock */
1657 mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
1658 SDMMC_CLKEN_LOW_PWR) << slot->id));
1659
1660 /* inform CIU */
1661 dw_mci_update_clock(slot);
f95f3850 1662 }
3c2a0909
S
1663
1664 return ret;
f95f3850
WN
1665}
1666
ab269128 1667static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
f95f3850
WN
1668{
1669 struct dw_mci *host = slot->host;
3c2a0909
S
1670 u32 div, actual_speed;
1671 bool reset_div = false;
9623b5b9 1672 u32 clk_en_a;
f95f3850 1673
3c2a0909
S
1674 if (slot->clock && ((slot->clock != host->current_speed) || force_clkinit)) {
1675 do {
1676 div = host->bus_hz / slot->clock;
1677 if ((host->bus_hz % slot->clock) &&
1678 (host->bus_hz > slot->clock))
1679 /*
1680 * move the + 1 after the divide to prevent
1681 * over-clocking the card.
1682 */
1683 div++;
1684
1685 div = (host->bus_hz != slot->clock) ?
1686 DIV_ROUND_UP(div, 2) : 0;
1687
1688 /* CLKDIV limitation is 0xFF */
1689 if (div > 0xFF)
1690 div = 0xFF;
1691
1692 actual_speed = div ?
1693 (host->bus_hz / div) >> 1 : host->bus_hz;
1694
1695 /* Change SCLK_MMC */
1696 if (actual_speed > slot->clock &&
1697 host->bus_hz != 0 && !reset_div) {
1698 dev_err(host->dev,
1699 "Actual clock is high than a reqeust clock."
1700 "Source clock is needed to change\n");
1701 reset_div = true;
1702 slot->mmc->ios.timing = MMC_TIMING_LEGACY;
1703 host->drv_data->set_ios(host, 0, &slot->mmc->ios);
1704 } else
1705 reset_div = false;
1706 } while (reset_div);
f95f3850
WN
1707
1708 dev_info(&slot->mmc->class_dev,
1709 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
1710 " div = %d)\n", slot->id, host->bus_hz, slot->clock,
1711 div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
1712
1713 /* disable clock */
1714 mci_writel(host, CLKENA, 0);
1715 mci_writel(host, CLKSRC, 0);
1716
1717 /* inform CIU */
3c2a0909 1718 dw_mci_update_clock(slot);
f95f3850
WN
1719
1720 /* set clock to desired speed */
1721 mci_writel(host, CLKDIV, div);
1722
1723 /* inform CIU */
3c2a0909 1724 dw_mci_update_clock(slot);
f95f3850 1725
9623b5b9
DA
1726 /* enable clock; only low power if no SDIO */
1727 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1728 if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
1729 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1730 mci_writel(host, CLKENA, clk_en_a);
f95f3850
WN
1731
1732 /* inform CIU */
3c2a0909 1733 dw_mci_update_clock(slot);
f95f3850
WN
1734
1735 host->current_speed = slot->clock;
1736 }
1737
1738 /* Set the current slot bus width */
1d56c453 1739 mci_writel(host, CTYPE, (slot->ctype << slot->id));
f95f3850
WN
1740}
1741
3c2a0909
S
1742inline u32 dw_mci_calc_timeout(struct dw_mci *host)
1743{
1744 u32 target_timeout;
1745 u32 count;
1746 u32 host_clock = host->bus_hz;
1747
1748 if (!host->pdata->data_timeout)
1749 return 0xFFFFFFFF; /* timeout maximum */
1750
1751 target_timeout = host->pdata->data_timeout;
1752
1753 /* Calculating Timeout value */
1754 count = (target_timeout * (host_clock / 1000)) /
1755 (SDMMC_DATA_TMOUT_CRT * SDMMC_DATA_TMOUT_EXT);
1756
1757 if (count > 0x1FFFFF)
1758 count = 0x1FFFFF;
1759
1760 /* 'count' of DDR200 ULP is equal to that of DDR200
1761 But 'bus_hz' is half(200mhz) of DDR200(400mhz)
1762 So need to double 'count' for DDR200 ULP. This
1763 also satisfies the requirement that 'count' should
1764 be even for DDR200 ULP */
1765 if (host->pdata->quirks & DW_MCI_QUIRK_ENABLE_ULP)
1766 count *= 2;
1767
1768 /* Set return value */
1769 return ((count << SDMMC_DATA_TMOUT_SHIFT)
1770 | (SDMMC_DATA_TMOUT_EXT << SDMMC_DATA_TMOUT_EXT_SHIFT)
1771 | SDMMC_RESP_TMOUT);
1772}
1773
053b3ce6
SJ
1774static void __dw_mci_start_request(struct dw_mci *host,
1775 struct dw_mci_slot *slot,
1776 struct mmc_command *cmd)
f95f3850
WN
1777{
1778 struct mmc_request *mrq;
f95f3850
WN
1779 struct mmc_data *data;
1780 u32 cmdflags;
1781
1782 mrq = slot->mrq;
1783 if (host->pdata->select_slot)
1784 host->pdata->select_slot(slot->id);
1785
3c2a0909
S
1786 if (host->pdata->sw_timeout)
1787 mod_timer(&host->timer,
1788 jiffies + msecs_to_jiffies(host->pdata->sw_timeout));
1789 else if (mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1790 mrq->cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1791 mod_timer(&host->timer, jiffies + msecs_to_jiffies(1000));
1792 else
1793 mod_timer(&host->timer, jiffies + msecs_to_jiffies(10000));
1794
f95f3850 1795 host->cur_slot = slot;
3c2a0909 1796 host->mrq_cmd = mrq;
f95f3850 1797
3c2a0909 1798 host->cmd_status = 0;
f95f3850 1799 host->completed_events = 0;
3c2a0909
S
1800 if (!MMC_CHECK_CMDQ_MODE(host))
1801 host->data_status = 0;
1802
1803 host->mrq_dat = mrq;
1804
1805 host->stop_cmdr = 0;
1806 host->stop_snd = false;
1807
f95f3850 1808 host->data_status = 0;
3c2a0909
S
1809 host->dir_status = 0;
1810 host->pending_events = 0;
1811
1812 if (MMC_CHECK_CMDQ_MODE(host))
1813 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
1814 else
1815 host->pending_events = 0;
1816
1817 if (host->pdata->tp_mon_tbl)
1818 host->cmd_cnt++;
f95f3850 1819
053b3ce6 1820 data = cmd->data;
f95f3850 1821 if (data) {
3c2a0909 1822 dw_mci_set_timeout(host, dw_mci_calc_timeout(host));
f95f3850
WN
1823 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1824 mci_writel(host, BLKSIZ, data->blksz);
3c2a0909
S
1825 if (host->pdata->tp_mon_tbl)
1826 host->transferred_cnt += data->blksz * data->blocks;
f95f3850
WN
1827 }
1828
f95f3850
WN
1829 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1830
1831 /* this is the first command, send the initialization clock */
1832 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1833 cmdflags |= SDMMC_CMD_INIT;
1834
1835 if (data) {
1836 dw_mci_submit_data(host, data);
1837 wmb();
1838 }
1839
3c2a0909
S
1840 dw_mci_debug_req_log(host, mrq, STATE_REQ_START, 0);
1841
f95f3850
WN
1842 dw_mci_start_command(host, cmd, cmdflags);
1843
1844 if (mrq->stop)
1845 host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
3c2a0909
S
1846 else {
1847 if (data)
1848 host->stop_cmdr = dw_mci_prep_stop(host, cmd);
1849 }
f95f3850
WN
1850}
1851
053b3ce6
SJ
1852static void dw_mci_start_request(struct dw_mci *host,
1853 struct dw_mci_slot *slot)
1854{
1855 struct mmc_request *mrq = slot->mrq;
1856 struct mmc_command *cmd;
1857
3c2a0909
S
1858 host->req_state = DW_MMC_REQ_BUSY;
1859
1860 if (mrq->cmd->data &&
1861 (mrq->cmd->error || mrq->cmd->data->error))
1862 cmd = mrq->stop;
1863 else
1864 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
053b3ce6
SJ
1865 __dw_mci_start_request(host, slot, cmd);
1866}
1867
7456caae 1868/* must be called with host->lock held */
f95f3850
WN
1869static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1870 struct mmc_request *mrq)
1871{
3c2a0909
S
1872 bool in_tasklet = (host->tasklet_state == 0)
1873 || !(host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION);
f95f3850 1874
3c2a0909
S
1875 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1876 host->state_cmd);
f95f3850 1877
3c2a0909
S
1878 if (host->state_cmd == STATE_IDLE && in_tasklet) {
1879 slot->mrq = mrq;
1880 host->state_cmd = STATE_SENDING_CMD;
f95f3850
WN
1881 dw_mci_start_request(host, slot);
1882 } else {
3c2a0909
S
1883 if (host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
1884 list_add_tail(&mrq->hlist, &slot->mrq_list);
f95f3850 1885 list_add_tail(&slot->queue_node, &host->queue);
3c2a0909 1886 dev_info(host->dev, "QUEUED!! (%d, %d )", host->state_cmd, host->state_dat);
f95f3850 1887 }
f95f3850
WN
1888}
1889
1890static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1891{
1892 struct dw_mci_slot *slot = mmc_priv(mmc);
1893 struct dw_mci *host = slot->host;
1894
f95f3850 1895 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
3c2a0909
S
1896 if (!mmc->card) {
1897 mrq->cmd->error = -ENOMEDIUM;
1898 mmc_request_done(mmc, mrq);
1899 return;
1900 }
1901 }
1902
1903 if (!MMC_CHECK_CMDQ_MODE(host)) {
1904 if (!dw_mci_stop_abort_cmd(mrq->cmd)) {
1905 if (!dw_mci_wait_data_busy(host, mrq)) {
1906 mrq->cmd->error = -ENOTRECOVERABLE;
1907 mmc_request_done(mmc, mrq);
1908 return;
1909 }
1910 }
f95f3850
WN
1911 }
1912
3c2a0909
S
1913 spin_lock_bh(&host->lock);
1914
f95f3850 1915 dw_mci_queue_request(host, slot, mrq);
7456caae
JH
1916
1917 spin_unlock_bh(&host->lock);
f95f3850
WN
1918}
1919
1920static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1921{
1922 struct dw_mci_slot *slot = mmc_priv(mmc);
e95baf13 1923 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
41babf75 1924 u32 regs;
3c2a0909
S
1925 bool cclk_request_turn_off = 0;
1926
1927 if (slot->host->pdata->use_biu_gate_clock)
1928 atomic_inc_return(&slot->host->biu_en_win);
1929 dw_mci_biu_clk_en(slot->host, false);
f95f3850 1930
f95f3850 1931 switch (ios->bus_width) {
f95f3850
WN
1932 case MMC_BUS_WIDTH_4:
1933 slot->ctype = SDMMC_CTYPE_4BIT;
1934 break;
c9b2a06f
JC
1935 case MMC_BUS_WIDTH_8:
1936 slot->ctype = SDMMC_CTYPE_8BIT;
1937 break;
b2f7cb45
JC
1938 default:
1939 /* set default 1 bit mode */
1940 slot->ctype = SDMMC_CTYPE_1BIT;
f95f3850
WN
1941 }
1942
3f514291
SJ
1943 regs = mci_readl(slot->host, UHS_REG);
1944
3c2a0909
S
1945 if (ios->timing == MMC_TIMING_UHS_DDR50 ||
1946 ios->timing == MMC_TIMING_MMC_HS200_DDR ||
1947 ios->timing == MMC_TIMING_MMC_HS200_DDR_ES) {
1948 if (!mmc->tuning_progress)
1949 regs |= ((SDMMC_UHS_DDR_MODE << slot->id) << 16);
1950 } else
1951 regs &= ~((SDMMC_UHS_DDR_MODE << slot->id) << 16);
1952
1953 if (slot->host->pdata->caps &
1954 (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
1955 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
1956 MMC_CAP_UHS_DDR50))
1957 regs |= (0x1 << slot->id);
3f514291
SJ
1958
1959 mci_writel(slot->host, UHS_REG, regs);
41babf75 1960
3c2a0909
S
1961 if (ios->timing == MMC_TIMING_MMC_HS200_DDR ||
1962 ios->timing == MMC_TIMING_MMC_HS200_DDR_ES)
1963 if (!mmc->tuning_progress)
1964 mci_writel(slot->host, CDTHRCTL, 512 << 16 | 1);
1965
f95f3850
WN
1966 if (ios->clock) {
1967 /*
1968 * Use mirror of ios->clock to prevent race with mmc
1969 * core ios update when finding the minimum.
1970 */
1971 slot->clock = ios->clock;
3c2a0909
S
1972 pm_qos_update_request(&slot->host->pm_qos_int,
1973 slot->host->pdata->qos_int_level);
1974 } else {
1975 pm_qos_update_request(&slot->host->pm_qos_int, 0);
1976 cclk_request_turn_off = 1;
1977 }
1978
1979 if (drv_data && drv_data->set_ios) {
1980 drv_data->set_ios(slot->host, mmc->tuning_progress, ios);
1981
1982 /* Reset the min/max in case the set_ios() changed bus_hz */
1983 mmc->f_min = DIV_ROUND_UP(slot->host->bus_hz, 510);
1984 mmc->f_max = slot->host->bus_hz;
f95f3850
WN
1985 }
1986
3c2a0909
S
1987 /*
1988 * CIU clock should be enabled because dw_mci_setup_bus is called
1989 * unconditionally in this function
1990 */
1991 if (slot->host->pdata->use_gate_clock)
1992 atomic_inc_return(&slot->host->ciu_en_win);
1993 dw_mci_ciu_clk_en(slot->host, false);
800d78bf 1994
bf7cb224
JC
1995 /* Slot specific timing and width adjustment */
1996 dw_mci_setup_bus(slot, false);
3c2a0909
S
1997 if (slot->host->pdata->use_gate_clock)
1998 atomic_dec_return(&slot->host->ciu_en_win);
1999 if (slot->host->pdata->use_biu_gate_clock)
2000 atomic_dec_return(&slot->host->biu_en_win);
bf7cb224 2001
f95f3850
WN
2002 switch (ios->power_mode) {
2003 case MMC_POWER_UP:
3c2a0909
S
2004 if (slot->host->pdata->ext_setpower)
2005 slot->host->pdata->ext_setpower(slot->host,
2006 DW_MMC_EXT_VQMMC_ON | DW_MMC_EXT_VMMC_ON);
2007
f95f3850 2008 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
3c2a0909
S
2009 if (slot->host->pdata->tp_mon_tbl)
2010 schedule_delayed_work(&slot->host->tp_mon, HZ);
e6f34e2f
JH
2011 break;
2012 case MMC_POWER_OFF:
3c2a0909
S
2013 /*
2014 * If BIU clock is turned off in this case,
2015 * warning message would be shown when trying mutex lock.
2016 */
2017 cclk_request_turn_off = 0;
2018
2019 if (slot->host->pdata->tp_mon_tbl) {
2020 cancel_delayed_work_sync(&slot->host->tp_mon);
2021 pm_qos_update_request(&slot->host->pm_qos_mif, 0);
2022#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
2023 pm_qos_update_request(&slot->host->pm_qos_cluster0, 0);
2024#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
2025 pm_qos_update_request(&slot->host->pm_qos_cluster0, 0);
2026#else
2027 pm_qos_update_request(&slot->host->pm_qos_cluster1, 0);
2028#endif
2029 }
2030
2031 if (slot->host->pdata->ext_setpower) {
2032 slot->host->pdata->ext_setpower(slot->host, DW_MMC_EXT_VQMMC_ON);
2033 mdelay(5);
2034 }
f95f3850
WN
2035 break;
2036 default:
2037 break;
2038 }
3c2a0909
S
2039
2040 if (cclk_request_turn_off) {
2041 dw_mci_ciu_clk_dis(slot->host);
2042 dw_mci_biu_clk_dis(slot->host);
2043 }
2044}
2045
2046static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2047{
2048 struct dw_mci_slot *slot = mmc_priv(mmc);
2049 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
2050 int ret = 0;
2051
2052 if (drv_data && drv_data->execute_tuning)
2053 ret = drv_data->execute_tuning(slot->host, opcode);
2054
2055 return ret;
f95f3850
WN
2056}
2057
2058static int dw_mci_get_ro(struct mmc_host *mmc)
2059{
2060 int read_only;
2061 struct dw_mci_slot *slot = mmc_priv(mmc);
2062 struct dw_mci_board *brd = slot->host->pdata;
2063
2064 /* Use platform get_ro function, else try on board write protect */
9640639b 2065 if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT)
b4967aa5
TA
2066 read_only = 0;
2067 else if (brd->get_ro)
f95f3850 2068 read_only = brd->get_ro(slot->id);
55a6ceb2
DA
2069 else if (gpio_is_valid(slot->wp_gpio))
2070 read_only = gpio_get_value(slot->wp_gpio);
f95f3850
WN
2071 else
2072 read_only =
2073 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
2074
2075 dev_dbg(&mmc->class_dev, "card is %s\n",
2076 read_only ? "read-only" : "read-write");
2077
2078 return read_only;
2079}
2080
2081static int dw_mci_get_cd(struct mmc_host *mmc)
2082{
2083 int present;
3c2a0909 2084 int temp;
f95f3850 2085 struct dw_mci_slot *slot = mmc_priv(mmc);
3c2a0909
S
2086 struct dw_mci *host = slot->host;
2087 struct dw_mci_board *brd = host->pdata;
2088 const struct dw_mci_drv_data *drv_data = host->drv_data;
f95f3850
WN
2089
2090 /* Use platform get_cd function, else try onboard card detect */
fc3d7720
JC
2091 if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
2092 present = 1;
2093 else if (brd->get_cd)
f95f3850
WN
2094 present = !brd->get_cd(slot->id);
2095 else
3c2a0909 2096 present = (mci_readl(host, CDETECT) & (1 << slot->id))
f95f3850
WN
2097 == 0 ? 1 : 0;
2098
3c2a0909
S
2099 if (drv_data && drv_data->misc_control) {
2100 temp = drv_data->misc_control(host,
2101 CTRL_CHECK_CD, NULL);
2102 if (temp != -1)
2103 present = temp;
2104 }
2105
f95f3850
WN
2106 if (present)
2107 dev_dbg(&mmc->class_dev, "card is present\n");
2108 else
2109 dev_dbg(&mmc->class_dev, "card is not present\n");
2110
2111 return present;
2112}
2113
9623b5b9
DA
2114/*
2115 * Disable lower power mode.
2116 *
2117 * Low power mode will stop the card clock when idle. According to the
2118 * description of the CLKENA register we should disable low power mode
2119 * for SDIO cards if we need SDIO interrupts to work.
2120 *
2121 * This function is fast if low power mode is already disabled.
2122 */
2123static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
2124{
2125 struct dw_mci *host = slot->host;
2126 u32 clk_en_a;
2127 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
2128
2129 clk_en_a = mci_readl(host, CLKENA);
2130
2131 if (clk_en_a & clken_low_pwr) {
2132 mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
3c2a0909 2133 dw_mci_update_clock(slot);
9623b5b9
DA
2134 }
2135}
2136
1a5c8e1f
SH
2137static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
2138{
2139 struct dw_mci_slot *slot = mmc_priv(mmc);
2140 struct dw_mci *host = slot->host;
2141 u32 int_mask;
2142
2143 /* Enable/disable Slot Specific SDIO interrupt */
2144 int_mask = mci_readl(host, INTMASK);
2145 if (enb) {
9623b5b9
DA
2146 /*
2147 * Turn off low power mode if it was enabled. This is a bit of
2148 * a heavy operation and we disable / enable IRQs a lot, so
2149 * we'll leave low power mode disabled and it will get
2150 * re-enabled again in dw_mci_setup_bus().
2151 */
2152 dw_mci_disable_low_power(slot);
2153
1a5c8e1f 2154 mci_writel(host, INTMASK,
705ad047 2155 (int_mask | SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
2156 } else {
2157 mci_writel(host, INTMASK,
705ad047 2158 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
1a5c8e1f
SH
2159 }
2160}
2161
3c2a0909
S
2162static int dw_mci_3_3v_signal_voltage_switch(struct dw_mci_slot *slot)
2163{
2164 struct dw_mci *host = slot->host;
2165 u32 reg;
2166 int ret = 0;
2167
2168 if (host->vqmmc) {
2169 ret = regulator_set_voltage(host->vqmmc, 2800000, 2800000);
2170 if (ret) {
2171 dev_warn(host->dev, "Switching to 3.3V signalling "
2172 "voltage failed\n");
2173 return -EIO;
2174 }
2175 } else {
2176 reg = mci_readl(slot->host, UHS_REG);
2177 reg &= ~(0x1 << slot->id);
2178 mci_writel(slot->host, UHS_REG, reg);
2179 }
2180
2181 /* Wait for 5ms */
2182 usleep_range(5000, 5500);
2183
2184 return ret;
2185}
2186
2187static int dw_mci_1_8v_signal_voltage_switch(struct dw_mci_slot *slot)
2188{
2189 struct dw_mci *host = slot->host;
2190 unsigned long timeout = jiffies + msecs_to_jiffies(10);
2191 u32 reg;
2192 int ret = 0, retry = 10;
2193 u32 status;
2194
2195 if (host->pdata->use_biu_gate_clock)
2196 atomic_inc_return(&host->biu_en_win);
2197 dw_mci_biu_clk_en(host, false);
2198
2199 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_RESET);
2200
2201 /* Check For DATA busy */
2202 do {
2203
2204 while (time_before(jiffies, timeout)) {
2205 status = mci_readl(host, STATUS);
2206 if (!(status & SDMMC_DATA_BUSY))
2207 goto out;
2208 }
2209
2210 dw_mci_wait_reset(host->dev, host, SDMMC_CTRL_RESET);
2211 timeout = jiffies + msecs_to_jiffies(10);
2212 } while (--retry);
2213
2214out:
2215 if (host->pdata->use_gate_clock)
2216 atomic_inc_return(&host->ciu_en_win);
2217 dw_mci_ciu_clk_en(host, false);
2218 reg = mci_readl(host, CLKENA);
2219 reg &= ~((SDMMC_CLKEN_LOW_PWR | SDMMC_CLKEN_ENABLE) << slot->id);
2220 mci_writel(host, CLKENA, reg);
2221 dw_mci_update_clock(slot);
2222 if (host->vqmmc) {
2223 ret = regulator_set_voltage(host->vqmmc, 1800000, 1800000);
2224 if (ret) {
2225 dev_warn(host->dev, "Switching to 1.8V signalling "
2226 "voltage failed\n");
2227 return -EIO;
2228 }
2229 } else {
2230 reg = mci_readl(slot->host, UHS_REG);
2231 reg |= (0x1 << slot->id);
2232 mci_writel(slot->host, UHS_REG, reg);
2233 }
2234
2235 /* Wait for 5ms */
2236 usleep_range(5000, 5500);
2237
2238 dw_mci_ciu_clk_en(host, false);
2239 reg = mci_readl(host, CLKENA);
2240 reg |= SDMMC_CLKEN_ENABLE << slot->id;
2241 mci_writel(host, CLKENA, reg);
2242 dw_mci_update_clock(slot);
2243 if (host->pdata->use_gate_clock)
2244 atomic_dec_return(&host->ciu_en_win);
2245 if (host->pdata->use_biu_gate_clock)
2246 atomic_dec_return(&host->biu_en_win);
2247
2248 return ret;
2249}
2250
2251static int dw_mci_start_signal_voltage_switch(struct mmc_host *mmc,
2252 struct mmc_ios *ios)
2253{
2254 struct dw_mci_slot *slot = mmc_priv(mmc);
2255
2256 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
2257 return dw_mci_3_3v_signal_voltage_switch(slot);
2258 else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
2259 return dw_mci_1_8v_signal_voltage_switch(slot);
2260 else
2261 return 0;
2262}
2263
2264static void dw_mci_hw_reset(struct mmc_host *host)
2265{
2266 struct dw_mci_slot *slot = mmc_priv(host);
2267 struct dw_mci_board *brd = slot->host->pdata;
2268
2269 dev_dbg(&host->class_dev, "card is going to h/w reset\n");
2270
2271 /* Use platform hw_reset function */
2272 if (brd->hw_reset)
2273 brd->hw_reset(slot->id);
2274}
2275
2276static int dw_mci_card_busy(struct mmc_host *host)
2277{
2278 struct dw_mci_slot *slot = mmc_priv(host);
2279 u32 status, ret = -1;
2280
2281 status = mci_readl(slot->host, STATUS);
2282 ret = (status & SDMMC_DATA_BUSY);
2283
2284 return ret;
2285}
2286
f95f3850 2287static const struct mmc_host_ops dw_mci_ops = {
1a5c8e1f 2288 .request = dw_mci_request,
9aa51408
SJ
2289 .pre_req = dw_mci_pre_req,
2290 .post_req = dw_mci_post_req,
1a5c8e1f
SH
2291 .set_ios = dw_mci_set_ios,
2292 .get_ro = dw_mci_get_ro,
2293 .get_cd = dw_mci_get_cd,
2294 .enable_sdio_irq = dw_mci_enable_sdio_irq,
3c2a0909
S
2295 .execute_tuning = dw_mci_execute_tuning,
2296 .start_signal_voltage_switch = dw_mci_start_signal_voltage_switch,
2297 .hw_reset = dw_mci_hw_reset,
2298 .card_busy = dw_mci_card_busy,
f95f3850
WN
2299};
2300
3c2a0909
S
2301static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq,
2302 enum dw_mci_state *state)
f95f3850
WN
2303 __releases(&host->lock)
2304 __acquires(&host->lock)
2305{
f95f3850
WN
2306 struct mmc_host *prev_mmc = host->cur_slot->mmc;
2307
3c2a0909
S
2308 del_timer(&host->timer);
2309
2310 host->req_state = DW_MMC_REQ_IDLE;
2311
2312 (*state) = STATE_IDLE;
2313
2314 dw_mci_debug_req_log(host, mrq, STATE_REQ_END, 0);
2315
2316 /* SDIO,SD case */
2317 if (!(host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)) {
2318 host->cur_slot->mrq = NULL;
2319 host->mrq_cmd = NULL;
2320 host->mrq_dat = NULL;
2321 if (!list_empty(&host->queue)) {
2322 struct dw_mci_slot *slot;
2323 slot = list_entry(host->queue.next,
2324 struct dw_mci_slot, queue_node);
2325 list_del_init(&slot->queue_node);
2326 host->state_cmd = STATE_SENDING_CMD;
2327 dw_mci_start_request(host, slot);
2328 }
f95f3850
WN
2329 }
2330
2331 spin_unlock(&host->lock);
2332 mmc_request_done(prev_mmc, mrq);
2333 spin_lock(&host->lock);
2334}
2335
2336static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
2337{
3c2a0909
S
2338 struct dw_mci_slot *slot = host->cur_slot;
2339 struct mmc_host *mmc = slot->mmc;
2340 struct mmc_card *card = slot->mmc->card;
2341
f95f3850
WN
2342 u32 status = host->cmd_status;
2343
2344 host->cmd_status = 0;
2345
2346 /* Read the response from the card (up to 16 bytes) */
2347 if (cmd->flags & MMC_RSP_PRESENT) {
2348 if (cmd->flags & MMC_RSP_136) {
2349 cmd->resp[3] = mci_readl(host, RESP0);
2350 cmd->resp[2] = mci_readl(host, RESP1);
2351 cmd->resp[1] = mci_readl(host, RESP2);
2352 cmd->resp[0] = mci_readl(host, RESP3);
2353 } else {
2354 cmd->resp[0] = mci_readl(host, RESP0);
2355 cmd->resp[1] = 0;
2356 cmd->resp[2] = 0;
2357 cmd->resp[3] = 0;
2358 }
2359 }
2360
2361 if (status & SDMMC_INT_RTO)
2362 cmd->error = -ETIMEDOUT;
2363 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
2364 cmd->error = -EILSEQ;
2365 else if (status & SDMMC_INT_RESP_ERR)
2366 cmd->error = -EIO;
2367 else
2368 cmd->error = 0;
2369
2370 if (cmd->error) {
3c2a0909
S
2371 if (card && mmc_card_mmc(card)) {
2372 printk("%s: CMD%d : %d, status = %#x, CLKSEL = 0x%08x, EN_SHIFT = 0x%08x\n",
2373 mmc_hostname(host->cur_slot->mmc),
2374 cmd->opcode, cmd->error, status,
2375 mci_readl(host, CLKSEL),
2376 mci_readl(host, DDR200_ENABLE_SHIFT));
2377 }
2378 if ((host->quirks & DW_MMC_QUIRK_RETRY_ERROR)) {
2379 if ((status & SDMMC_INT_RTO) &&
2380 ((cmd->opcode == MMC_SET_BLOCK_COUNT) ||
2381 (cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) {
2382 dev_err(host->dev,
2383 "CMD error CMD%02d CLKSEL 0x %08x error %d "
2384 "tuning_progress %d status: 0x %08x Count %d\n",
2385 cmd->opcode,
2386 mci_readl(host, CLKSEL),
2387 cmd->error,
2388 mmc->tuning_progress,
2389 status,
2390 host->pdata->error_retry_cnt);
2391 if (host->pdata->error_retry_cnt < MAX_RETRY_CNT) {
2392 cmd->error = -ETIMEDOUT;
2393 if (cmd->data)
2394 cmd->data->error = 0;
2395 cmd->retries = DATA_RETRY;
2396 host->pdata->error_retry_cnt++;
2397 } else {
2398 cmd->retries = 0;
2399 host->pdata->error_retry_cnt = 0;
2400 }
2401 }
2402 }
f95f3850
WN
2403 /* newer ip versions need a delay between retries */
2404 if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
2405 mdelay(20);
2406
2407 if (cmd->data) {
f95f3850 2408 dw_mci_stop_dma(host);
fda5f736 2409 host->data = NULL;
3c2a0909 2410 host->sg = NULL;
f95f3850
WN
2411 }
2412 }
2413}
2414
3c2a0909
S
2415static void dw_mci_dto_timer(unsigned long data)
2416{
2417 struct dw_mci *host = (struct dw_mci *)data;
2418 u32 fifo_cnt = 0, done = false;
2419
2420 if (!(host->quirks & DW_MMC_QUIRK_SW_DATA_TIMEOUT))
2421 return;
2422
2423 /* Check Data trasnfer Done */
2424 if (host->pending_events & EVENT_DATA_COMPLETE ||
2425 host->completed_events & EVENT_DATA_COMPLETE)
2426 done = true;
2427
2428 /* Check Data Transfer start */
2429 fifo_cnt = mci_readl(host, STATUS);
2430 fifo_cnt = (fifo_cnt >> 17) & 0x1FFF;
2431 if (fifo_cnt > 0)
2432 done = true;
2433
2434 if (done == true) {
2435 dev_info(host->dev,
2436 "Done, S/W timer for data timeout %d ms fifo count %d\n",
2437 host->dto_cnt, fifo_cnt);
2438 return;
2439 }
2440
2441 if (host->dto_cnt < (DRTO / DRTO_MON_PERIOD)) {
2442 /* monitoring */
2443 host->dto_cnt++;
2444 mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(DRTO_MON_PERIOD));
2445 } else {
2446 /* data timeout */
2447 host->data_status |= SDMMC_INT_DTO;
2448 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2449 tasklet_schedule(&host->tasklet);
2450 }
2451}
2452
2453static int dw_mci_tasklet_cmd(struct dw_mci *host)
f95f3850 2454{
f95f3850
WN
2455 struct mmc_data *data;
2456 struct mmc_command *cmd;
2457 enum dw_mci_state state;
2458 enum dw_mci_state prev_state;
3c2a0909 2459 u32 done = 0;
f95f3850 2460
3c2a0909 2461 state = host->state_cmd;
f95f3850
WN
2462 data = host->data;
2463
2464 do {
2465 prev_state = state;
2466
2467 switch (state) {
2468 case STATE_IDLE:
2469 break;
2470
2471 case STATE_SENDING_CMD:
2472 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2473 &host->pending_events))
2474 break;
2475
2476 cmd = host->cmd;
f95f3850 2477 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
053b3ce6 2478 dw_mci_command_complete(host, cmd);
3c2a0909 2479 if (cmd && cmd == host->mrq_cmd->sbc && !cmd->error) {
053b3ce6
SJ
2480 prev_state = state = STATE_SENDING_CMD;
2481 __dw_mci_start_request(host, host->cur_slot,
3c2a0909
S
2482 host->mrq_cmd->cmd);
2483 goto exit_cmd;
2484 }
2485
2486 if (cmd->data && cmd->error &&
2487 cmd != host->mrq_cmd->data->stop) {
2488 /* To avoid fifo full condition */
2489 dw_mci_fifo_reset(host->dev, host);
2490 dw_mci_ciu_reset(host->dev, host);
2491
2492 if (MMC_CHECK_CMDQ_MODE(host)) {
2493 list_add_tail(&host->mrq_cmd->hlist,
2494 &host->cur_slot->mrq_list);
2495 del_timer(&host->timer);
2496 dw_mci_stop_dma(host);
2497 sg_miter_stop(&host->sg_miter);
2498 host->sg = NULL;
2499 dw_mci_fifo_reset(host->dev, host);
2500 state = STATE_IDLE;
2501 } else {
2502 if (host->mrq_cmd->data->stop)
2503 send_stop_cmd(host,
2504 host->mrq_cmd->data);
2505 else {
2506 dw_mci_start_command(host,
2507 &host->stop,
2508 host->stop_cmdr);
2509 host->stop_snd = true;
2510 }
2511 state = STATE_SENDING_STOP;
2512 dw_mci_debug_req_log(host,
2513 host->mrq_cmd,
2514 STATE_REQ_CMD_PROCESS, state);
2515 }
2516 break;
2517 }
2518
2519 if (!host->mrq_cmd->data || cmd->error) {
2520 done = 1;
2521 goto exit_cmd;
053b3ce6
SJ
2522 }
2523
3c2a0909
S
2524 if (cmd->data && cmd->error &&
2525 cmd == host->mrq_cmd->data->stop) {
2526 done = 1;
2527 goto exit_cmd;
f95f3850
WN
2528 }
2529
2530 prev_state = state = STATE_SENDING_DATA;
3c2a0909
S
2531 dw_mci_debug_req_log(host, host->mrq_cmd,
2532 STATE_REQ_CMD_PROCESS, state);
2533 if (host->quirks & DW_MMC_QUIRK_SW_DATA_TIMEOUT) {
2534 if (cmd->data &&
2535 (cmd->data->flags & MMC_DATA_READ)) {
2536 host->dto_cnt = 0;
2537 mod_timer(&host->dto_timer,
2538 jiffies + msecs_to_jiffies(DRTO_MON_PERIOD));
2539 }
2540 }
2541
2542 break;
2543
2544 case STATE_SENDING_STOP:
2545 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2546 &host->pending_events))
2547 break;
2548
2549 if (host->mrq_cmd->cmd->error &&
2550 host->mrq_cmd->data) {
2551 dw_mci_stop_dma(host);
2552 sg_miter_stop(&host->sg_miter);
2553 host->sg = NULL;
2554 dw_mci_fifo_reset(host->dev, host);
2555 }
2556
2557 host->cmd = NULL;
2558 host->data = NULL;
2559
2560 if (host->mrq_cmd->stop)
2561 dw_mci_command_complete(host,
2562 host->mrq_cmd->stop);
2563 else
2564 host->cmd_status = 0;
2565
2566 done = 1;
2567 goto exit_cmd;
2568
2569 default:
2570 break;
2571 }
2572 } while (state != prev_state);
2573
2574 host->state_cmd = state;
2575exit_cmd:
2576
2577 return done;
2578}
2579
2580static int dw_mci_tasklet_dat(struct dw_mci *host)
2581{
2582 struct mmc_data *data;
2583 enum dw_mci_state state;
2584 enum dw_mci_state prev_state;
2585 u32 status, done = 0;
2586
2587 state = host->state_dat;
2588 data = host->data;
2589
2590 do {
2591 prev_state = state;
2592
2593 switch (state) {
2594 case STATE_IDLE:
2595 break;
f95f3850
WN
2596
2597 case STATE_SENDING_DATA:
2598 if (test_and_clear_bit(EVENT_DATA_ERROR,
2599 &host->pending_events)) {
3c2a0909
S
2600 set_bit(EVENT_XFER_COMPLETE,
2601 &host->pending_events);
2602
2603 /* To avoid fifo full condition */
2604 dw_mci_fifo_reset(host->dev, host);
2605
2606 if (MMC_CHECK_CMDQ_MODE(host)) {
2607 list_add_tail(&host->mrq_dat->hlist,
2608 &host->cur_slot->mrq_list);
2609 del_timer(&host->timer);
2610 dw_mci_stop_dma(host);
2611 sg_miter_stop(&host->sg_miter);
2612 host->sg = NULL;
2613 dw_mci_fifo_reset(host->dev, host);
2614 state = STATE_IDLE;
2615 } else {
2616 if (data->stop)
2617 send_stop_cmd(host, data);
2618 else {
2619 dw_mci_start_command(host,
2620 &host->stop,
2621 host->stop_cmdr);
2622 host->stop_snd = true;
2623 }
2624 state = STATE_DATA_ERROR;
2625 dw_mci_debug_req_log(host,
2626 host->mrq_dat,
2627 STATE_REQ_DATA_PROCESS, state);
2628 }
f95f3850
WN
2629 break;
2630 }
2631
2632 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2633 &host->pending_events))
2634 break;
2635
2636 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2637 prev_state = state = STATE_DATA_BUSY;
3c2a0909
S
2638 dw_mci_debug_req_log(host, host->mrq_dat,
2639 STATE_REQ_DATA_PROCESS, state);
f95f3850
WN
2640 /* fall through */
2641
2642 case STATE_DATA_BUSY:
3c2a0909
S
2643 if (test_and_clear_bit(EVENT_DATA_ERROR,
2644 &host->pending_events))
2645 dw_mci_fifo_reset(host->dev, host);
f95f3850
WN
2646 if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
2647 &host->pending_events))
2648 break;
2649
f95f3850
WN
2650 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2651 status = host->data_status;
2652
2653 if (status & DW_MCI_DATA_ERROR_FLAGS) {
2654 if (status & SDMMC_INT_DTO) {
3c2a0909
S
2655 dev_err(host->dev,
2656 "data timeout error\n");
f95f3850 2657 data->error = -ETIMEDOUT;
3c2a0909 2658 host->mrq_dat->cmd->error = -ETIMEDOUT;
f95f3850 2659 } else if (status & SDMMC_INT_DCRC) {
3c2a0909
S
2660 dev_err(host->dev,
2661 "data CRC error %s\n",
2662 (data->flags & MMC_DATA_READ) ? "READ" : "WRITE");
f95f3850 2663 data->error = -EILSEQ;
3c2a0909
S
2664 } else if (status & SDMMC_INT_EBE) {
2665 if (host->dir_status ==
2666 DW_MCI_SEND_STATUS) {
2667 /*
2668 * No data CRC status was returned.
2669 * The number of bytes transferred will
2670 * be exaggerated in PIO mode.
2671 */
2672 data->bytes_xfered = 0;
2673 data->error = -ETIMEDOUT;
2674 dev_err(host->dev,
2675 "Write no CRC\n");
2676 dw_mci_reg_dump(host);
2677 } else {
2678 data->error = -EIO;
2679 dev_err(host->dev,
2680 "End bit error\n");
2681 }
2682
2683 } else if (status & SDMMC_INT_SBE) {
2684 dev_err(host->dev,
2685 "Start bit error "
2686 "(status=%08x)\n",
2687 status);
2688 data->error = -EIO;
f95f3850 2689 } else {
4a90920c 2690 dev_err(host->dev,
f95f3850
WN
2691 "data FIFO error "
2692 "(status=%08x)\n",
2693 status);
2694 data->error = -EIO;
2695 }
94dd5b33
JH
2696 /*
2697 * After an error, there may be data lingering
2698 * in the FIFO, so reset it - doing so
2699 * generates a block interrupt, hence setting
2700 * the scatter-gather pointer to NULL.
2701 */
f9c2a0dc 2702 sg_miter_stop(&host->sg_miter);
94dd5b33 2703 host->sg = NULL;
3c2a0909
S
2704 dw_mci_fifo_reset(host->dev, host);
2705 dw_mci_ciu_reset(host->dev, host);
2706
f95f3850
WN
2707 } else {
2708 data->bytes_xfered = data->blocks * data->blksz;
2709 data->error = 0;
3c2a0909
S
2710 host->pdata->error_retry_cnt = 0;
2711 }
2712
2713 if (host->quirks & DW_MMC_QUIRK_SW_DATA_TIMEOUT &&
2714 (data->flags & MMC_DATA_READ))
2715 del_timer(&host->dto_timer);
2716
2717 host->data = NULL;
2718
2719 if (!data->stop && !host->stop_snd) {
2720 done = 1;
2721 goto exit_dat;
2722 }
2723
2724 if (host->mrq_dat->sbc && !data->error) {
2725 if (data->stop)
2726 data->stop->error = 0;
2727 done = 1;
2728 goto exit_dat;
f95f3850
WN
2729 }
2730
3c2a0909
S
2731 if (MMC_CHECK_CMDQ_MODE(host) && !data->error) {
2732 done = 1;
2733 goto exit_dat;
f95f3850
WN
2734 }
2735
3c2a0909
S
2736 if (MMC_CHECK_CMDQ_MODE(host)) {
2737 list_add_tail(&host->mrq_dat->hlist,
2738 &host->cur_slot->mrq_list);
2739 del_timer(&host->timer);
2740 dw_mci_stop_dma(host);
2741 sg_miter_stop(&host->sg_miter);
2742 host->sg = NULL;
2743 dw_mci_fifo_reset(host->dev, host);
2744 state = STATE_IDLE;
2745 break;
053b3ce6
SJ
2746 }
2747
f95f3850 2748 prev_state = state = STATE_SENDING_STOP;
3c2a0909
S
2749 dw_mci_debug_req_log(host, host->mrq_dat,
2750 STATE_REQ_DATA_PROCESS, state);
2751 if (!data->error) {
2752 if (data->stop) {
2753 BUG_ON(!data->stop);
2754 send_stop_cmd(host, data);
2755 } else {
2756 dw_mci_start_command(host,
2757 &host->stop,
2758 host->stop_cmdr);
2759 host->stop_snd = true;
2760 }
2761 }
2762 if (test_and_clear_bit(EVENT_DATA_ERROR,
2763 &host->pending_events)) {
2764 if (MMC_CHECK_CMDQ_MODE(host)) {
2765 list_add_tail(&host->mrq_dat->hlist,
2766 &host->cur_slot->mrq_list);
2767 del_timer(&host->timer);
2768 dw_mci_stop_dma(host);
2769 sg_miter_stop(&host->sg_miter);
2770 host->sg = NULL;
2771 dw_mci_fifo_reset(host->dev, host);
2772 state = STATE_IDLE;
2773 break;
2774 } else {
2775 if (data->stop)
2776 send_stop_cmd(host, data);
2777 else {
2778 dw_mci_start_command(host,
2779 &host->stop,
2780 host->stop_cmdr);
2781 host->stop_snd = true;
2782 }
2783 }
2784 }
f95f3850
WN
2785 /* fall through */
2786
2787 case STATE_SENDING_STOP:
2788 if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
2789 &host->pending_events))
2790 break;
2791
3c2a0909
S
2792 if (host->mrq_dat->cmd->error &&
2793 host->mrq_dat->data) {
2794 dw_mci_stop_dma(host);
2795 sg_miter_stop(&host->sg_miter);
2796 host->sg = NULL;
2797 dw_mci_fifo_reset(host->dev, host);
2798 }
2799
f95f3850 2800 host->cmd = NULL;
3c2a0909
S
2801 host->data = NULL;
2802
2803 if (host->mrq_dat->stop)
2804 dw_mci_command_complete(host,
2805 host->mrq_dat->stop);
2806 else
2807 host->cmd_status = 0;
2808
2809 done = 1;
2810 goto exit_dat;
f95f3850
WN
2811
2812 case STATE_DATA_ERROR:
2813 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2814 &host->pending_events))
2815 break;
2816
3c2a0909
S
2817 dw_mci_stop_dma(host);
2818 dw_mci_fifo_reset(host->dev, host);
2819 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2820 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2821 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2822
f95f3850 2823 state = STATE_DATA_BUSY;
3c2a0909
S
2824 dw_mci_debug_req_log(host, host->mrq_dat,
2825 STATE_REQ_DATA_PROCESS, state);
2826 break;
2827
2828 default:
f95f3850
WN
2829 break;
2830 }
2831 } while (state != prev_state);
2832
3c2a0909
S
2833 host->state_dat = state;
2834exit_dat:
2835
2836 return done;
2837}
2838
2839static void dw_mci_tasklet_func(unsigned long priv)
2840{
2841 struct dw_mci *host = (struct dw_mci *)priv;
2842 int done_cmd, done_dat;
2843 struct mmc_request *mrq_cmd, *mrq_dat;
2844
2845 spin_lock(&host->lock);
2846 host->tasklet_state = 1;
2847
2848 if (host->cmd_status & SDMMC_INT_HLE) {
2849 dw_mci_reg_dump(host);
2850 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2851 dev_err(host->dev, "hardware locked write error\n");
2852 goto unlock;
2853 }
2854
2855 /* command state */
2856 done_cmd = dw_mci_tasklet_cmd(host);
2857 mrq_cmd = host->mrq_cmd;
2858
2859 if (done_cmd) {
2860 host->state_cmd = STATE_IDLE;
2861 dw_mci_debug_req_log(host, mrq_cmd, STATE_REQ_CMD_PROCESS,
2862 host->state_cmd);
2863 }
2864
2865 if (host->state_cmd == STATE_SENDING_DATA ||
2866 host->state_cmd == STATE_DATA_BUSY ||
2867 host->state_cmd == STATE_DATA_ERROR ||
2868 host->state_cmd == STATE_SENDING_STOP) {
2869 host->state_dat = host->state_cmd;
2870 host->mrq_dat = host->mrq_cmd;
2871 host->state_cmd = STATE_IDLE;
2872 host->mrq_cmd = NULL;
2873 dw_mci_debug_req_log(host, host->mrq_dat,
2874 STATE_REQ_DATA_PROCESS, host->state_dat);
2875 }
2876
2877 /* data state */
2878 done_dat = dw_mci_tasklet_dat(host);
2879 mrq_dat = host->mrq_dat;
2880
2881 if (done_dat)
2882 host->state_dat = STATE_IDLE;
2883
2884 /* SDIO,SD case */
2885 if (!(host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)) {
2886 if (done_cmd || done_dat)
2887 goto req_end;
2888 else
2889 goto unlock;
2890 }
2891
2892 if (host->state_cmd == STATE_IDLE) {
2893 if (!list_empty(&host->cur_slot->mrq_list)) {
2894 host->cur_slot->mrq = list_first_entry(
2895 &host->cur_slot->mrq_list,
2896 struct mmc_request, hlist);
2897 list_del_init(&host->cur_slot->mrq->hlist);
2898 host->state_cmd = STATE_SENDING_CMD;
2899 dw_mci_start_request(host, host->cur_slot);
2900 } else {
2901 host->cur_slot->mrq = NULL;
2902 if (!list_empty(&host->queue)) {
2903 struct dw_mci_slot *slot;
2904 slot = list_entry(host->queue.next,
2905 struct dw_mci_slot, queue_node);
2906 list_del_init(&slot->queue_node);
2907 if (!list_empty(&slot->mrq_list)) {
2908 slot->mrq = list_first_entry(
2909 &slot->mrq_list,
2910 struct mmc_request, hlist);
2911 list_del_init(&slot->mrq->hlist);
2912 host->state_cmd = STATE_SENDING_CMD;
2913 dw_mci_start_request(host, slot);
2914 } else
2915 slot->mrq = NULL;
2916 }
2917 }
2918 }
2919 host->tasklet_state = 0;
2920
2921req_end:
2922 if (done_cmd)
2923 dw_mci_request_end(host, mrq_cmd, &host->state_cmd);
2924
2925 if (done_dat)
2926 dw_mci_request_end(host, mrq_dat, &host->state_dat);
f95f3850 2927unlock:
3c2a0909
S
2928
2929 host->tasklet_state = 0;
f95f3850
WN
2930 spin_unlock(&host->lock);
2931
3c2a0909
S
2932 if (test_and_clear_bit(EVENT_QUEUE_READY, &host->pending_events))
2933 mmc_handle_queued_request(host->cur_slot->mmc);
f95f3850
WN
2934}
2935
34b664a2
JH
2936/* push final bytes to part_buf, only use during push */
2937static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2938{
34b664a2
JH
2939 memcpy((void *)&host->part_buf, buf, cnt);
2940 host->part_buf_count = cnt;
2941}
f95f3850 2942
34b664a2
JH
2943/* append bytes to part_buf, only use during push */
2944static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2945{
2946 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2947 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2948 host->part_buf_count += cnt;
2949 return cnt;
2950}
f95f3850 2951
34b664a2
JH
2952/* pull first bytes from part_buf, only use during pull */
2953static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2954{
2955 cnt = min(cnt, (int)host->part_buf_count);
2956 if (cnt) {
2957 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2958 cnt);
2959 host->part_buf_count -= cnt;
2960 host->part_buf_start += cnt;
f95f3850 2961 }
34b664a2 2962 return cnt;
f95f3850
WN
2963}
2964
34b664a2
JH
2965/* pull final bytes from the part_buf, assuming it's just been filled */
2966static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
f95f3850 2967{
34b664a2
JH
2968 memcpy(buf, &host->part_buf, cnt);
2969 host->part_buf_start = cnt;
2970 host->part_buf_count = (1 << host->data_shift) - cnt;
2971}
f95f3850 2972
34b664a2
JH
2973static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2974{
cfbeb59c
MC
2975 struct mmc_data *data = host->data;
2976 int init_cnt = cnt;
2977
34b664a2
JH
2978 /* try and push anything in the part_buf */
2979 if (unlikely(host->part_buf_count)) {
2980 int len = dw_mci_push_part_bytes(host, buf, cnt);
2981 buf += len;
2982 cnt -= len;
cfbeb59c 2983 if (host->part_buf_count == 2) {
4e0a5adf
JC
2984 mci_writew(host, DATA(host->data_offset),
2985 host->part_buf16);
34b664a2
JH
2986 host->part_buf_count = 0;
2987 }
2988 }
2989#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2990 if (unlikely((unsigned long)buf & 0x1)) {
2991 while (cnt >= 2) {
2992 u16 aligned_buf[64];
2993 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2994 int items = len >> 1;
2995 int i;
2996 /* memcpy from input buffer into aligned buffer */
2997 memcpy(aligned_buf, buf, len);
2998 buf += len;
2999 cnt -= len;
3000 /* push data from aligned buffer into fifo */
3001 for (i = 0; i < items; ++i)
4e0a5adf
JC
3002 mci_writew(host, DATA(host->data_offset),
3003 aligned_buf[i]);
34b664a2
JH
3004 }
3005 } else
3006#endif
3007 {
3008 u16 *pdata = buf;
3009 for (; cnt >= 2; cnt -= 2)
4e0a5adf 3010 mci_writew(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
3011 buf = pdata;
3012 }
3013 /* put anything remaining in the part_buf */
3014 if (cnt) {
3015 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
3016 /* Push data if we have reached the expected data length */
3017 if ((data->bytes_xfered + init_cnt) ==
3018 (data->blksz * data->blocks))
4e0a5adf 3019 mci_writew(host, DATA(host->data_offset),
cfbeb59c 3020 host->part_buf16);
34b664a2
JH
3021 }
3022}
f95f3850 3023
34b664a2
JH
3024static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
3025{
3026#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3027 if (unlikely((unsigned long)buf & 0x1)) {
3028 while (cnt >= 2) {
3029 /* pull data from fifo into aligned buffer */
3030 u16 aligned_buf[64];
3031 int len = min(cnt & -2, (int)sizeof(aligned_buf));
3032 int items = len >> 1;
3033 int i;
3034 for (i = 0; i < items; ++i)
4e0a5adf
JC
3035 aligned_buf[i] = mci_readw(host,
3036 DATA(host->data_offset));
34b664a2
JH
3037 /* memcpy from aligned buffer into output buffer */
3038 memcpy(buf, aligned_buf, len);
3039 buf += len;
3040 cnt -= len;
3041 }
3042 } else
3043#endif
3044 {
3045 u16 *pdata = buf;
3046 for (; cnt >= 2; cnt -= 2)
4e0a5adf 3047 *pdata++ = mci_readw(host, DATA(host->data_offset));
34b664a2
JH
3048 buf = pdata;
3049 }
3050 if (cnt) {
4e0a5adf 3051 host->part_buf16 = mci_readw(host, DATA(host->data_offset));
34b664a2 3052 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
3053 }
3054}
3055
3056static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
3057{
cfbeb59c
MC
3058 struct mmc_data *data = host->data;
3059 int init_cnt = cnt;
3060
34b664a2
JH
3061 /* try and push anything in the part_buf */
3062 if (unlikely(host->part_buf_count)) {
3063 int len = dw_mci_push_part_bytes(host, buf, cnt);
3064 buf += len;
3065 cnt -= len;
cfbeb59c 3066 if (host->part_buf_count == 4) {
4e0a5adf
JC
3067 mci_writel(host, DATA(host->data_offset),
3068 host->part_buf32);
34b664a2
JH
3069 host->part_buf_count = 0;
3070 }
3071 }
3072#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3073 if (unlikely((unsigned long)buf & 0x3)) {
3074 while (cnt >= 4) {
3075 u32 aligned_buf[32];
3076 int len = min(cnt & -4, (int)sizeof(aligned_buf));
3077 int items = len >> 2;
3078 int i;
3079 /* memcpy from input buffer into aligned buffer */
3080 memcpy(aligned_buf, buf, len);
3081 buf += len;
3082 cnt -= len;
3083 /* push data from aligned buffer into fifo */
3084 for (i = 0; i < items; ++i)
4e0a5adf
JC
3085 mci_writel(host, DATA(host->data_offset),
3086 aligned_buf[i]);
34b664a2
JH
3087 }
3088 } else
3089#endif
3090 {
3091 u32 *pdata = buf;
3092 for (; cnt >= 4; cnt -= 4)
4e0a5adf 3093 mci_writel(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
3094 buf = pdata;
3095 }
3096 /* put anything remaining in the part_buf */
3097 if (cnt) {
3098 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
3099 /* Push data if we have reached the expected data length */
3100 if ((data->bytes_xfered + init_cnt) ==
3101 (data->blksz * data->blocks))
4e0a5adf 3102 mci_writel(host, DATA(host->data_offset),
cfbeb59c 3103 host->part_buf32);
f95f3850
WN
3104 }
3105}
3106
3107static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
3108{
34b664a2
JH
3109#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3110 if (unlikely((unsigned long)buf & 0x3)) {
3111 while (cnt >= 4) {
3112 /* pull data from fifo into aligned buffer */
3113 u32 aligned_buf[32];
3114 int len = min(cnt & -4, (int)sizeof(aligned_buf));
3115 int items = len >> 2;
3116 int i;
3117 for (i = 0; i < items; ++i)
4e0a5adf
JC
3118 aligned_buf[i] = mci_readl(host,
3119 DATA(host->data_offset));
34b664a2
JH
3120 /* memcpy from aligned buffer into output buffer */
3121 memcpy(buf, aligned_buf, len);
3122 buf += len;
3123 cnt -= len;
3124 }
3125 } else
3126#endif
3127 {
3128 u32 *pdata = buf;
3129 for (; cnt >= 4; cnt -= 4)
4e0a5adf 3130 *pdata++ = mci_readl(host, DATA(host->data_offset));
34b664a2
JH
3131 buf = pdata;
3132 }
3133 if (cnt) {
4e0a5adf 3134 host->part_buf32 = mci_readl(host, DATA(host->data_offset));
34b664a2 3135 dw_mci_pull_final_bytes(host, buf, cnt);
f95f3850
WN
3136 }
3137}
3138
3139static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
3140{
cfbeb59c
MC
3141 struct mmc_data *data = host->data;
3142 int init_cnt = cnt;
3143
34b664a2
JH
3144 /* try and push anything in the part_buf */
3145 if (unlikely(host->part_buf_count)) {
3146 int len = dw_mci_push_part_bytes(host, buf, cnt);
3147 buf += len;
3148 cnt -= len;
c09fbd74 3149
cfbeb59c 3150 if (host->part_buf_count == 8) {
c09fbd74 3151 mci_writeq(host, DATA(host->data_offset),
4e0a5adf 3152 host->part_buf);
34b664a2
JH
3153 host->part_buf_count = 0;
3154 }
3155 }
3156#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3157 if (unlikely((unsigned long)buf & 0x7)) {
3158 while (cnt >= 8) {
3159 u64 aligned_buf[16];
3160 int len = min(cnt & -8, (int)sizeof(aligned_buf));
3161 int items = len >> 3;
3162 int i;
3163 /* memcpy from input buffer into aligned buffer */
3164 memcpy(aligned_buf, buf, len);
3165 buf += len;
3166 cnt -= len;
3167 /* push data from aligned buffer into fifo */
3168 for (i = 0; i < items; ++i)
4e0a5adf
JC
3169 mci_writeq(host, DATA(host->data_offset),
3170 aligned_buf[i]);
34b664a2
JH
3171 }
3172 } else
3173#endif
3174 {
3175 u64 *pdata = buf;
3176 for (; cnt >= 8; cnt -= 8)
4e0a5adf 3177 mci_writeq(host, DATA(host->data_offset), *pdata++);
34b664a2
JH
3178 buf = pdata;
3179 }
3180 /* put anything remaining in the part_buf */
3181 if (cnt) {
3182 dw_mci_set_part_bytes(host, buf, cnt);
cfbeb59c
MC
3183 /* Push data if we have reached the expected data length */
3184 if ((data->bytes_xfered + init_cnt) ==
3185 (data->blksz * data->blocks))
4e0a5adf 3186 mci_writeq(host, DATA(host->data_offset),
cfbeb59c 3187 host->part_buf);
f95f3850
WN
3188 }
3189}
3190
3191static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
3192{
34b664a2
JH
3193#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
3194 if (unlikely((unsigned long)buf & 0x7)) {
3195 while (cnt >= 8) {
3196 /* pull data from fifo into aligned buffer */
3197 u64 aligned_buf[16];
3198 int len = min(cnt & -8, (int)sizeof(aligned_buf));
3199 int items = len >> 3;
3200 int i;
3201 for (i = 0; i < items; ++i)
4e0a5adf
JC
3202 aligned_buf[i] = mci_readq(host,
3203 DATA(host->data_offset));
34b664a2
JH
3204 /* memcpy from aligned buffer into output buffer */
3205 memcpy(buf, aligned_buf, len);
3206 buf += len;
3207 cnt -= len;
3208 }
3209 } else
3210#endif
3211 {
3212 u64 *pdata = buf;
3213 for (; cnt >= 8; cnt -= 8)
4e0a5adf 3214 *pdata++ = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
3215 buf = pdata;
3216 }
3217 if (cnt) {
4e0a5adf 3218 host->part_buf = mci_readq(host, DATA(host->data_offset));
34b664a2
JH
3219 dw_mci_pull_final_bytes(host, buf, cnt);
3220 }
3221}
f95f3850 3222
34b664a2
JH
3223static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
3224{
3225 int len;
f95f3850 3226
34b664a2
JH
3227 /* get remaining partial bytes */
3228 len = dw_mci_pull_part_bytes(host, buf, cnt);
3229 if (unlikely(len == cnt))
3230 return;
3231 buf += len;
3232 cnt -= len;
3233
3234 /* get the rest of the data */
3235 host->pull_data(host, buf, cnt);
f95f3850
WN
3236}
3237
87a74d39 3238static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
f95f3850 3239{
f9c2a0dc
SJ
3240 struct sg_mapping_iter *sg_miter = &host->sg_miter;
3241 void *buf;
3242 unsigned int offset;
f95f3850
WN
3243 struct mmc_data *data = host->data;
3244 int shift = host->data_shift;
3245 u32 status;
3e4b0d8b 3246 unsigned int len;
f9c2a0dc 3247 unsigned int remain, fcnt;
3c2a0909 3248 u32 temp;
f95f3850
WN
3249
3250 do {
f9c2a0dc
SJ
3251 if (!sg_miter_next(sg_miter))
3252 goto done;
3253
4225fc85 3254 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
3255 buf = sg_miter->addr;
3256 remain = sg_miter->length;
3257 offset = 0;
3258
3259 do {
3260 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
3261 << shift) + host->part_buf_count;
3262 len = min(remain, fcnt);
3263 if (!len)
3264 break;
34b664a2 3265 dw_mci_pull_data(host, (void *)(buf + offset), len);
3e4b0d8b 3266 data->bytes_xfered += len;
f95f3850 3267 offset += len;
f9c2a0dc
SJ
3268 remain -= len;
3269 } while (remain);
f95f3850 3270
e74f3a9c 3271 sg_miter->consumed = offset;
f95f3850
WN
3272 status = mci_readl(host, MINTSTS);
3273 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
87a74d39
KK
3274 /* if the RXDR is ready read again */
3275 } while ((status & SDMMC_INT_RXDR) ||
3276 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
f9c2a0dc
SJ
3277
3278 if (!remain) {
3279 if (!sg_miter_next(sg_miter))
3280 goto done;
3281 sg_miter->consumed = 0;
3282 }
3283 sg_miter_stop(sg_miter);
f95f3850
WN
3284 return;
3285
3286done:
3c2a0909
S
3287
3288 /* Disable RX/TX IRQs */
3289 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
3290 temp = mci_readl(host, INTMASK);
3291 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
3292 mci_writel(host, INTMASK, temp);
3293
f9c2a0dc
SJ
3294 sg_miter_stop(sg_miter);
3295 host->sg = NULL;
f95f3850
WN
3296 smp_wmb();
3297 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
3298}
3299
3300static void dw_mci_write_data_pio(struct dw_mci *host)
3301{
f9c2a0dc
SJ
3302 struct sg_mapping_iter *sg_miter = &host->sg_miter;
3303 void *buf;
3304 unsigned int offset;
f95f3850
WN
3305 struct mmc_data *data = host->data;
3306 int shift = host->data_shift;
3307 u32 status;
3e4b0d8b 3308 unsigned int len;
f9c2a0dc
SJ
3309 unsigned int fifo_depth = host->fifo_depth;
3310 unsigned int remain, fcnt;
3c2a0909 3311 u32 temp;
f95f3850
WN
3312
3313 do {
f9c2a0dc
SJ
3314 if (!sg_miter_next(sg_miter))
3315 goto done;
3316
4225fc85 3317 host->sg = sg_miter->piter.sg;
f9c2a0dc
SJ
3318 buf = sg_miter->addr;
3319 remain = sg_miter->length;
3320 offset = 0;
3321
3322 do {
3323 fcnt = ((fifo_depth -
3324 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
3325 << shift) - host->part_buf_count;
3326 len = min(remain, fcnt);
3327 if (!len)
3328 break;
f95f3850 3329 host->push_data(host, (void *)(buf + offset), len);
3e4b0d8b 3330 data->bytes_xfered += len;
f95f3850 3331 offset += len;
f9c2a0dc
SJ
3332 remain -= len;
3333 } while (remain);
f95f3850 3334
e74f3a9c 3335 sg_miter->consumed = offset;
f95f3850
WN
3336 status = mci_readl(host, MINTSTS);
3337 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
f95f3850 3338 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
f9c2a0dc
SJ
3339
3340 if (!remain) {
3341 if (!sg_miter_next(sg_miter))
3342 goto done;
3343 sg_miter->consumed = 0;
3344 }
3345 sg_miter_stop(sg_miter);
f95f3850
WN
3346 return;
3347
3348done:
3c2a0909
S
3349
3350 /* Disable RX/TX IRQs */
3351 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
3352 temp = mci_readl(host, INTMASK);
3353 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
3354 mci_writel(host, INTMASK, temp);
3355
f9c2a0dc
SJ
3356 sg_miter_stop(sg_miter);
3357 host->sg = NULL;
f95f3850
WN
3358 smp_wmb();
3359 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
3360}
3361
3362static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
3363{
3364 if (!host->cmd_status)
3365 host->cmd_status = status;
3366
3367 smp_wmb();
3368
3369 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3370 tasklet_schedule(&host->tasklet);
3371}
3372
3373static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
3374{
3375 struct dw_mci *host = dev_id;
3c2a0909 3376 u32 status, pending, reg;
1a5c8e1f 3377 int i;
3c2a0909 3378 int ret = IRQ_NONE;
f95f3850 3379
3c2a0909 3380 status = mci_readl(host, RINTSTS);
1fb5f68a
MC
3381 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3382
3383 if (pending) {
f95f3850
WN
3384
3385 /*
3386 * DTO fix - version 2.10a and below, and only if internal DMA
3387 * is configured.
3388 */
3389 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
3390 if (!pending &&
3391 ((mci_readl(host, STATUS) >> 17) & 0x1fff))
3392 pending |= SDMMC_INT_DATA_OVER;
3393 }
3394
3c2a0909
S
3395 if (host->quirks & DW_MCI_QUIRK_NO_DETECT_EBIT &&
3396 host->dir_status == DW_MCI_RECV_STATUS) {
3397 if (status & SDMMC_INT_EBE)
3398 mci_writel(host, RINTSTS, SDMMC_INT_EBE);
3399 }
3400
3401 if (pending & SDMMC_INT_HLE) {
3402 mci_writel(host, RINTSTS, SDMMC_INT_HLE);
3403 dw_mci_debug_cmd_log(host->cmd, host, false,
3404 DW_MCI_FLAG_ERROR, status);
3405 host->cmd_status = pending;
3406 tasklet_schedule(&host->tasklet);
3407 ret = IRQ_HANDLED;
3408 }
3409
f95f3850
WN
3410 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
3411 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
3c2a0909
S
3412 dw_mci_debug_cmd_log(host->cmd, host, false,
3413 DW_MCI_FLAG_ERROR, status);
182c9081 3414 host->cmd_status = pending;
3c2a0909
S
3415 ret = IRQ_HANDLED;
3416 }
3417
3418 if (pending & SDMMC_INT_VOLT_SW) {
3419 u32 cmd = mci_readl(host, CMD);
3420 u32 cmd_up_clk = cmd;
3421 cmd = cmd & 0x3f;
3422 if ((cmd == SD_SWITCH_VOLTAGE) ||
3423 (cmd_up_clk & SDMMC_CMD_UPD_CLK)) {
3424 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SW);
3425 pending &= ~(SDMMC_INT_VOLT_SW);
3426 dw_mci_cmd_interrupt(host, pending);
3427 ret = IRQ_HANDLED;
3428 }
f95f3850
WN
3429 }
3430
3431 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
3c2a0909
S
3432 if (mci_readl(host, RINTSTS) & SDMMC_INT_HTO) {
3433 dev_err(host->dev, "host timeout error\n");
3434 dw_mci_reg_dump(host);
3435 reg = __raw_readl(host->regs + SDMMC_MPSTAT);
3436 if (reg & (BIT(0)))
3437 panic("DMA hang Issue !!!!");
3438 }
f95f3850
WN
3439 /* if there is an error report DATA_ERROR */
3440 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
3c2a0909
S
3441 dw_mci_debug_cmd_log(host->cmd, host, false,
3442 DW_MCI_FLAG_ERROR, status);
182c9081 3443 host->data_status = pending;
f95f3850
WN
3444 smp_wmb();
3445 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3c2a0909
S
3446 if (pending & SDMMC_INT_SBE)
3447 set_bit(EVENT_DATA_COMPLETE,
3448 &host->pending_events);
9b2026a1 3449 tasklet_schedule(&host->tasklet);
3c2a0909 3450 ret = IRQ_HANDLED;
f95f3850
WN
3451 }
3452
3453 if (pending & SDMMC_INT_DATA_OVER) {
3454 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
3c2a0909
S
3455 dw_mci_debug_cmd_log(host->cmd, host, false,
3456 DW_MCI_FLAG_DTO, 0);
f95f3850 3457 if (!host->data_status)
182c9081 3458 host->data_status = pending;
f95f3850
WN
3459 smp_wmb();
3460 if (host->dir_status == DW_MCI_RECV_STATUS) {
3461 if (host->sg != NULL)
87a74d39 3462 dw_mci_read_data_pio(host, true);
f95f3850
WN
3463 }
3464 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3465 tasklet_schedule(&host->tasklet);
3c2a0909 3466 ret = IRQ_HANDLED;
f95f3850
WN
3467 }
3468
3469 if (pending & SDMMC_INT_RXDR) {
3470 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
b40af3aa 3471 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
87a74d39 3472 dw_mci_read_data_pio(host, false);
3c2a0909 3473 ret = IRQ_HANDLED;
f95f3850
WN
3474 }
3475
3476 if (pending & SDMMC_INT_TXDR) {
3477 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
b40af3aa 3478 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
f95f3850 3479 dw_mci_write_data_pio(host);
3c2a0909 3480 ret = IRQ_HANDLED;
f95f3850
WN
3481 }
3482
3483 if (pending & SDMMC_INT_CMD_DONE) {
3484 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
3c2a0909
S
3485 dw_mci_debug_cmd_log(host->cmd, host, false,
3486 DW_MCI_FLAG_CD, 0);
182c9081 3487 dw_mci_cmd_interrupt(host, pending);
3c2a0909 3488 ret = IRQ_HANDLED;
f95f3850
WN
3489 }
3490
3491 if (pending & SDMMC_INT_CD) {
3492 mci_writel(host, RINTSTS, SDMMC_INT_CD);
95dcc2cb 3493 queue_work(host->card_workqueue, &host->card_work);
3c2a0909 3494 ret = IRQ_HANDLED;
f95f3850
WN
3495 }
3496
1a5c8e1f
SH
3497 /* Handle SDIO Interrupts */
3498 for (i = 0; i < host->num_slots; i++) {
3499 struct dw_mci_slot *slot = host->slot[i];
3500 if (pending & SDMMC_INT_SDIO(i)) {
3501 mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
3502 mmc_signal_sdio_irq(slot->mmc);
3c2a0909 3503 ret = IRQ_HANDLED;
1a5c8e1f
SH
3504 }
3505 }
3506
1fb5f68a 3507 }
f95f3850
WN
3508
3509#ifdef CONFIG_MMC_DW_IDMAC
3510 /* Handle DMA interrupts */
3511 pending = mci_readl(host, IDSTS);
3512 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
3513 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
3514 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
f95f3850 3515 host->dma_ops->complete(host);
3c2a0909 3516 ret = IRQ_HANDLED;
f95f3850
WN
3517 }
3518#endif
3519
3c2a0909
S
3520 /* handle queue ready interrupt */
3521 pending = mci_readl(host, SHA_CMD_IS);
3522 if (pending & QRDY_INT) {
3523 u32 qrdy = mci_readl(host, SHA_CMD_IE);
3524 if (qrdy & QRDY_INT_EN) {
3525 set_bit(EVENT_QUEUE_READY, &host->pending_events);
3526 qrdy &= ~QRDY_INT_EN;
3527 mci_writel(host, SHA_CMD_IE, qrdy);
3528 }
3529
3530 /* clear queue ready interrupt */
3531 mci_writel(host, SHA_CMD_IS, QRDY_INT);
3532
3533 if (test_bit(EVENT_QUEUE_READY, &host->pending_events))
3534 tasklet_schedule(&host->tasklet);
3535
3536 ret = IRQ_HANDLED;
3537 }
3538
3539 if (ret == IRQ_NONE)
3540 pr_warn_ratelimited("%s: no interrupts handled, pending %08x %08x\n",
3541 dev_name(host->dev),
3542 mci_readl(host, MINTSTS),
3543 mci_readl(host, IDSTS));
3544
3545 return ret;
3546}
3547
3548static void dw_mci_timeout_timer(unsigned long data)
3549{
3550 struct dw_mci *host = (struct dw_mci *)data;
3551 struct mmc_request *mrq;
3552
3553 if (host && host->mrq_dat) {
3554 mrq = host->mrq_dat;
3555
3556 dev_err(host->dev,
3557 "Timeout waiting for hardware interrupt."
3558 " state = %d\n", host->state_dat);
3559 dw_mci_reg_dump(host);
3560
3561 spin_lock(&host->lock);
3562
3563 host->sg = NULL;
3564 host->data = NULL;
3565 host->cmd = NULL;
3566
3567 switch (host->state_dat) {
3568 case STATE_IDLE:
3569 break;
3570 case STATE_SENDING_CMD:
3571 mrq->cmd->error = -ENOMEDIUM;
3572 if (!mrq->data)
3573 break;
3574 /* fall through */
3575 case STATE_SENDING_DATA:
3576 mrq->data->error = -ENOMEDIUM;
3577 dw_mci_stop_dma(host);
3578 break;
3579 case STATE_DATA_BUSY:
3580 case STATE_DATA_ERROR:
3581 if (mrq->data->error == -EINPROGRESS)
3582 mrq->data->error = -ENOMEDIUM;
3583 /* fall through */
3584 case STATE_SENDING_STOP:
3585 if (mrq->stop)
3586 mrq->stop->error = -ENOMEDIUM;
3587 break;
3588 }
3589
3590 spin_unlock(&host->lock);
3591 dw_mci_fifo_reset(host->dev, host);
3592 dw_mci_ciu_reset(host->dev, host);
3593 spin_lock(&host->lock);
3594
3595 dw_mci_request_end(host, mrq, &host->state_dat);
3596 host->state_cmd = host->state_dat = STATE_IDLE;
3597 spin_unlock(&host->lock);
3598 }
3599}
3600
3601static void dw_mci_tp_mon(struct work_struct *work)
3602{
3603 struct dw_mci *host = container_of(work, struct dw_mci, tp_mon.work);
3604 struct dw_mci_mon_table *tp_tbl = host->pdata->tp_mon_tbl;
3605 s32 mif_lock_value = 0;
3606 s32 cpu_lock_value = 0;
3607#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
3608 s32 kfc_lock_value = 0;
3609#endif
3610
3611 while (tp_tbl->range) {
3612 if (host->transferred_cnt > tp_tbl->range)
3613 break;
3614 tp_tbl++;
3615 }
3616
3617 mif_lock_value = tp_tbl->mif_lock_value;
3618 cpu_lock_value = tp_tbl->cpu_lock_value;
3619#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
3620 kfc_lock_value = tp_tbl->kfc_lock_value;
3621#endif
3622
3623#ifndef CONFIG_ARM_EXYNOS_MP_CPUFREQ
3624 dev_dbg(host->dev, "%d byte/s cnt=%d mif=%d cpu=%d\n",
3625 host->transferred_cnt,
3626 host->cmd_cnt,
3627 mif_lock_value,
3628 cpu_lock_value);
3629#else
3630 dev_dbg(host->dev, "%d byte/s cnt=%d mif=%d cpu=%d kfc=%d\n",
3631 host->transferred_cnt,
3632 host->cmd_cnt,
3633 mif_lock_value,
3634 cpu_lock_value,
3635 kfc_lock_value);
3636#endif
3637
3638 pm_qos_update_request_timeout(&host->pm_qos_mif,
3639 mif_lock_value, 2000000);
3640#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
3641 pm_qos_update_request_timeout(&host->pm_qos_cluster0,
3642 kfc_lock_value, 2000000);
3643#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
3644 pm_qos_update_request_timeout(&host->pm_qos_cluster0,
3645 cpu_lock_value, 2000000);
3646#else
3647 pm_qos_update_request_timeout(&host->pm_qos_cluster1,
3648 cpu_lock_value, 2000000);
3649#endif
3650
3651 host->transferred_cnt = 0;
3652 host->cmd_cnt = 0;
3653 schedule_delayed_work(&host->tp_mon, HZ);
f95f3850
WN
3654}
3655
1791b13e 3656static void dw_mci_work_routine_card(struct work_struct *work)
f95f3850 3657{
1791b13e 3658 struct dw_mci *host = container_of(work, struct dw_mci, card_work);
3c2a0909 3659 const struct dw_mci_drv_data *drv_data = host->drv_data;
f95f3850
WN
3660 int i;
3661
3c2a0909
S
3662 if (drv_data && drv_data->misc_control) {
3663 int ret = 0;
3664 u32 ctrl_window = DW_MCI_TOGGLE_BLOCK_CD;
3665
3666 /*
3667 * Disable blocking card detection
3668 */
3669 ret = drv_data->misc_control(host, CTRL_TOGGLE_WINDOWS,
3670 &ctrl_window);
3671 BUG_ON(ret == -1);
3672 }
3673
f95f3850
WN
3674 for (i = 0; i < host->num_slots; i++) {
3675 struct dw_mci_slot *slot = host->slot[i];
3676 struct mmc_host *mmc = slot->mmc;
3677 struct mmc_request *mrq;
3678 int present;
f95f3850
WN
3679
3680 present = dw_mci_get_cd(mmc);
3681 while (present != slot->last_detect_state) {
3c2a0909 3682 dev_info(&slot->mmc->class_dev, "card %s\n",
f95f3850
WN
3683 present ? "inserted" : "removed");
3684
3c2a0909
S
3685 /* Power up slot (before spin_lock, may sleep) */
3686 if (present != 0 && host->pdata->setpower)
3687 host->pdata->setpower(slot->id, mmc->ocr_avail);
3688
1791b13e
JH
3689 spin_lock_bh(&host->lock);
3690
f95f3850
WN
3691 /* Card change detected */
3692 slot->last_detect_state = present;
3693
1791b13e
JH
3694 /* Mark card as present if applicable */
3695 if (present != 0)
f95f3850 3696 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
f95f3850
WN
3697
3698 /* Clean up queue if present */
3699 mrq = slot->mrq;
3700 if (mrq) {
3c2a0909
S
3701 enum dw_mci_state *state = NULL;
3702 if (mrq == host->mrq_cmd)
3703 state = &host->state_cmd;
3704 else if (mrq == host->mrq_dat)
3705 state = &host->state_dat;
3706 if (state) {
f95f3850
WN
3707 host->data = NULL;
3708 host->cmd = NULL;
3709
3c2a0909 3710 switch (*state) {
f95f3850
WN
3711 case STATE_IDLE:
3712 break;
3713 case STATE_SENDING_CMD:
3714 mrq->cmd->error = -ENOMEDIUM;
3715 if (!mrq->data)
3716 break;
3717 /* fall through */
3718 case STATE_SENDING_DATA:
3719 mrq->data->error = -ENOMEDIUM;
3720 dw_mci_stop_dma(host);
3721 break;
3722 case STATE_DATA_BUSY:
3723 case STATE_DATA_ERROR:
3724 if (mrq->data->error == -EINPROGRESS)
3725 mrq->data->error = -ENOMEDIUM;
f95f3850
WN
3726 /* fall through */
3727 case STATE_SENDING_STOP:
3c2a0909
S
3728 if (mrq->stop)
3729 mrq->stop->error = -ENOMEDIUM;
f95f3850
WN
3730 break;
3731 }
3732
3c2a0909
S
3733 dw_mci_request_end(host, mrq, state);
3734 slot->mrq = NULL;
f95f3850
WN
3735 } else {
3736 list_del(&slot->queue_node);
3737 mrq->cmd->error = -ENOMEDIUM;
3738 if (mrq->data)
3739 mrq->data->error = -ENOMEDIUM;
3740 if (mrq->stop)
3741 mrq->stop->error = -ENOMEDIUM;
3742
3c2a0909 3743 del_timer(&host->timer);
f95f3850
WN
3744 spin_unlock(&host->lock);
3745 mmc_request_done(slot->mmc, mrq);
3746 spin_lock(&host->lock);
3747 }
3748 }
3749
3750 /* Power down slot */
3c2a0909
S
3751 if (host->pdata->use_biu_gate_clock)
3752 atomic_inc_return(&host->biu_en_win);
3753 dw_mci_biu_clk_en(host, false);
3754 if (host->pdata->use_gate_clock)
3755 atomic_inc_return(&host->ciu_en_win);
3756 spin_unlock_bh(&host->lock);
3757 dw_mci_ciu_clk_en(host, false);
3758 spin_lock_bh(&host->lock);
f95f3850 3759 if (present == 0) {
f95f3850
WN
3760 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
3761
3762 /*
3763 * Clear down the FIFO - doing so generates a
3764 * block interrupt, hence setting the
3765 * scatter-gather pointer to NULL.
3766 */
f9c2a0dc 3767 sg_miter_stop(&host->sg_miter);
f95f3850 3768 host->sg = NULL;
3c2a0909
S
3769 dw_mci_ciu_reset(host->dev, host);
3770 dw_mci_fifo_reset(host->dev, host);
f95f3850 3771#ifdef CONFIG_MMC_DW_IDMAC
3c2a0909 3772 dw_mci_idma_reset_dma(host);
f95f3850 3773#endif
3c2a0909
S
3774 } else if (host->cur_slot) {
3775 dw_mci_ciu_reset(host->dev, host);
3776 mci_writel(host, RINTSTS, 0xFFFFFFFF);
f95f3850 3777 }
3c2a0909
S
3778 if (host->pdata->use_gate_clock)
3779 atomic_dec_return(&host->ciu_en_win);
3780 if (host->pdata->use_biu_gate_clock)
3781 atomic_dec_return(&host->biu_en_win);
f95f3850 3782
1791b13e
JH
3783 spin_unlock_bh(&host->lock);
3784
3c2a0909
S
3785 /* Power down slot (after spin_unlock, may sleep) */
3786 if (present == 0 && host->pdata->setpower)
3787 host->pdata->setpower(slot->id, 0);
3788
f95f3850
WN
3789 present = dw_mci_get_cd(mmc);
3790 }
3c2a0909
S
3791
3792 mmc_detect_change(slot->mmc,
3793 msecs_to_jiffies(host->pdata->detect_delay_ms));
3794 }
3795}
3796
3797#if defined(CONFIG_BCM43455) || defined(CONFIG_BCM43455_MODULE) || \
3798 defined(CONFIG_BCM4343) || defined (CONFIG_BCM4343_MODULE) || \
3799 defined(CONFIG_BCM43454) || defined (CONFIG_BCM43454_MODULE)
3800static void dw_mci_notify_change(void *dev, int state)
3801{
3802 struct dw_mci *host = (struct dw_mci *)dev;
3803 unsigned long flags;
3804
3805 if (host) {
3806 spin_lock_irqsave(&host->lock, flags);
3807 if (state) {
3808 printk(KERN_ERR "card inserted\n");
3809 host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
3810 } else {
3811 printk(KERN_ERR "card removed\n");
3812 host->pdata->quirks &= ~DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
3813 }
3814 queue_work(host->card_workqueue, &host->card_work);
3815 spin_unlock_irqrestore(&host->lock, flags);
3816 }
3817}
3818#else /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
3819static void dw_mci_notify_change(struct platform_device *dev, int state)
3820{
3821 struct dw_mci *host = platform_get_drvdata(dev);
3822 unsigned long flags;
3823
3824 if (host) {
3825 spin_lock_irqsave(&host->lock, flags);
3826 if (state) {
3827 dev_dbg(&dev->dev, "card inserted.\n");
3828 host->pdata->quirks |= DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
3829 } else {
3830 dev_dbg(&dev->dev, "card removed.\n");
3831 host->pdata->quirks &= ~DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
3832 }
3833 queue_work(host->card_workqueue, &host->card_work);
3834 spin_unlock_irqrestore(&host->lock, flags);
f95f3850
WN
3835 }
3836}
3c2a0909 3837#endif /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
f95f3850 3838
c91eab4b
TA
3839#ifdef CONFIG_OF
3840/* given a slot id, find out the device node representing that slot */
3841static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3842{
3843 struct device_node *np;
3844 const __be32 *addr;
3845 int len;
3846
3847 if (!dev || !dev->of_node)
3848 return NULL;
3849
3850 for_each_child_of_node(dev->of_node, np) {
3851 addr = of_get_property(np, "reg", &len);
3852 if (!addr || (len < sizeof(int)))
3853 continue;
3854 if (be32_to_cpup(addr) == slot)
3855 return np;
3856 }
3857 return NULL;
3858}
3859
a70aaa64
DA
3860static struct dw_mci_of_slot_quirks {
3861 char *quirk;
3862 int id;
3863} of_slot_quirks[] = {
3864 {
3865 .quirk = "disable-wp",
3866 .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT,
3867 },
3868};
3869
3870static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3871{
3872 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3873 int quirks = 0;
3874 int idx;
3875
3876 /* get quirks */
3877 for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++)
3878 if (of_get_property(np, of_slot_quirks[idx].quirk, NULL))
3879 quirks |= of_slot_quirks[idx].id;
3880
3881 return quirks;
3882}
3883
c91eab4b
TA
3884/* find out bus-width for a given slot */
3885static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3886{
3887 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3888 u32 bus_wd = 1;
3889
3890 if (!np)
3891 return 1;
3892
3893 if (of_property_read_u32(np, "bus-width", &bus_wd))
3894 dev_err(dev, "bus-width property not found, assuming width"
3895 " as 1\n");
3896 return bus_wd;
3897}
55a6ceb2
DA
3898
3899/* find the write protect gpio for a given slot; or -1 if none specified */
3900static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3901{
3902 struct device_node *np = dw_mci_of_find_slot_node(dev, slot);
3903 int gpio;
3904
3905 if (!np)
3906 return -EINVAL;
3907
3c2a0909
S
3908 if (of_get_property(np, "wp-gpios", NULL))
3909 gpio = of_get_named_gpio(np, "wp-gpios", 0);
3910 else
3911 gpio = -1;
55a6ceb2
DA
3912
3913 /* Having a missing entry is valid; return silently */
3914 if (!gpio_is_valid(gpio))
3915 return -EINVAL;
3916
3917 if (devm_gpio_request(dev, gpio, "dw-mci-wp")) {
3918 dev_warn(dev, "gpio [%d] request failed\n", gpio);
3919 return -EINVAL;
3920 }
3921
3922 return gpio;
3923}
c91eab4b 3924#else /* CONFIG_OF */
a70aaa64
DA
3925static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot)
3926{
3927 return 0;
3928}
c91eab4b
TA
3929static u32 dw_mci_of_get_bus_wd(struct device *dev, u8 slot)
3930{
3931 return 1;
3932}
3933static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot)
3934{
3935 return NULL;
3936}
55a6ceb2
DA
3937static int dw_mci_of_get_wp_gpio(struct device *dev, u8 slot)
3938{
3939 return -EINVAL;
3940}
c91eab4b
TA
3941#endif /* CONFIG_OF */
3942
3c2a0909
S
3943static irqreturn_t dw_mci_detect_interrupt(int irq, void *dev_id)
3944{
3945 struct dw_mci *host = dev_id;
3946
3947 queue_work(host->card_workqueue, &host->card_work);
3948
3949 return IRQ_HANDLED;
3950}
3951
36c179a9 3952static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
f95f3850
WN
3953{
3954 struct mmc_host *mmc;
3955 struct dw_mci_slot *slot;
e95baf13 3956 const struct dw_mci_drv_data *drv_data = host->drv_data;
800d78bf 3957 int ctrl_id, ret;
c91eab4b 3958 u8 bus_width;
f95f3850 3959
4a90920c 3960 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
f95f3850
WN
3961 if (!mmc)
3962 return -ENOMEM;
3963
3964 slot = mmc_priv(mmc);
3965 slot->id = id;
3c2a0909
S
3966#ifdef CONFIG_MMC_CLKGATE
3967 mmc->clkgate_delay = 10;
3968#endif
f95f3850
WN
3969 slot->mmc = mmc;
3970 slot->host = host;
c91eab4b 3971 host->slot[id] = slot;
f95f3850 3972
a70aaa64
DA
3973 slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id);
3974
f95f3850
WN
3975 mmc->ops = &dw_mci_ops;
3976 mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
3977 mmc->f_max = host->bus_hz;
3978
3979 if (host->pdata->get_ocr)
3980 mmc->ocr_avail = host->pdata->get_ocr(id);
3981 else
3982 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
3983
3984 /*
3985 * Start with slot power disabled, it will be enabled when a card
3986 * is detected.
3987 */
3988 if (host->pdata->setpower)
3989 host->pdata->setpower(id, 0);
3990
fc3d7720
JC
3991 if (host->pdata->caps)
3992 mmc->caps = host->pdata->caps;
fc3d7720 3993
800d78bf
TA
3994 if (host->dev->of_node) {
3995 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
3996 if (ctrl_id < 0)
3997 ctrl_id = 0;
3998 } else {
3999 ctrl_id = to_platform_device(host->dev)->id;
4000 }
cb27a843
JH
4001 if (drv_data && drv_data->caps)
4002 mmc->caps |= drv_data->caps[ctrl_id];
800d78bf 4003
4f408cc6
SJ
4004 if (host->pdata->caps2)
4005 mmc->caps2 = host->pdata->caps2;
4f408cc6 4006
3c2a0909
S
4007 if (host->pdata->pm_caps) {
4008 mmc->pm_caps = host->pdata->pm_caps;
4009 mmc->pm_flags = mmc->pm_caps;
4010 }
4011
4012 if (host->pdata->dev_drv_str)
4013 mmc->dev_drv_str = host->pdata->dev_drv_str;
4014 else
4015 mmc->dev_drv_str = MMC_DRIVER_TYPE_0;
4016
f95f3850 4017 if (host->pdata->get_bus_wd)
c91eab4b
TA
4018 bus_width = host->pdata->get_bus_wd(slot->id);
4019 else if (host->dev->of_node)
4020 bus_width = dw_mci_of_get_bus_wd(host->dev, slot->id);
4021 else
4022 bus_width = 1;
4023
4024 switch (bus_width) {
4025 case 8:
4026 mmc->caps |= MMC_CAP_8_BIT_DATA;
4027 case 4:
4028 mmc->caps |= MMC_CAP_4_BIT_DATA;
4029 }
f95f3850
WN
4030
4031 if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
6daa7778 4032 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
f95f3850 4033
f95f3850
WN
4034 if (host->pdata->blk_settings) {
4035 mmc->max_segs = host->pdata->blk_settings->max_segs;
4036 mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
4037 mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
4038 mmc->max_req_size = host->pdata->blk_settings->max_req_size;
4039 mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
4040 } else {
4041 /* Useful defaults if platform data is unset. */
a39e5746
JC
4042#ifdef CONFIG_MMC_DW_IDMAC
4043 mmc->max_segs = host->ring_size;
4044 mmc->max_blk_size = 65536;
a39e5746 4045 mmc->max_seg_size = 0x1000;
3c2a0909
S
4046 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
4047 mmc->max_blk_count = mmc->max_req_size / 512;
a39e5746 4048#else
f95f3850
WN
4049 mmc->max_segs = 64;
4050 mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
4051 mmc->max_blk_count = 512;
4052 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
4053 mmc->max_seg_size = mmc->max_req_size;
f95f3850 4054#endif /* CONFIG_MMC_DW_IDMAC */
a39e5746 4055 }
f95f3850 4056
3c2a0909
S
4057 if (host->align_size)
4058 mmc->align_size = host->align_size;
4059
4060 if (!(host->quirks & DW_MMC_QUIRK_FIXED_VOLTAGE)) {
4061 host->vmmc = devm_regulator_get(mmc_dev(mmc), "vmmc");
4062 if (IS_ERR(host->vmmc)) {
4063 pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
4064 host->vmmc = NULL;
4065 } else {
4066 /*
4067 * UHS card should report if switching to 1.8V request
4068 * is supported in card initialization, but starting
4069 * the initialization with 1.8V level the card would
4070 * not report that it doesn't support, that means S18R
4071 * bit is zero. So, when host initialization, SD card
4072 * power supply should be turned off once before card
4073 * initialization
4074 */
4075 if(!(regulator_enable(host->vmmc))){
4076 ret = regulator_disable(host->vmmc);
4077 if(ret){
4078 dev_err(host->dev,
4079 "failed to disable vmmc regulator: %d\n", ret);
4080 }
4081 }
4082 pr_info("%s: vmmc regulator found\n", mmc_hostname(mmc));
4083 ret = regulator_enable(host->vmmc);
4084 if (ret) {
4085 dev_err(host->dev,
4086 "failed to enable vmmc regulator: %d\n", ret);
4087 goto err_setup_bus;
4088 }
4089 }
4090
4091 host->vqmmc = devm_regulator_get(mmc_dev(mmc), "vqmmc");
4092 if (IS_ERR(host->vqmmc)) {
4093 pr_info("%s: no vqmmc regulator found\n", mmc_hostname(mmc));
4094 host->vqmmc = NULL;
4095 } else {
4096 pr_info("%s: vqmmc regulator found\n", mmc_hostname(mmc));
4097 ret = regulator_enable(host->vqmmc);
4098 if (ret) {
4099 dev_err(host->dev,
4100 "failed to enable vqmmc regulator: %d\n", ret);
4101 goto err_setup_bus;
4102 }
f2f942ce
SK
4103 }
4104 }
c07946a3 4105
3c2a0909
S
4106 if (host->pdata->init)
4107 host->pdata->init(id, dw_mci_detect_interrupt, host);
4108
f95f3850
WN
4109 if (dw_mci_get_cd(mmc))
4110 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
4111 else
4112 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
4113
55a6ceb2
DA
4114 slot->wp_gpio = dw_mci_of_get_wp_gpio(host->dev, slot->id);
4115
0cea529d
JC
4116 ret = mmc_add_host(mmc);
4117 if (ret)
4118 goto err_setup_bus;
f95f3850 4119
3c2a0909
S
4120 INIT_LIST_HEAD(&slot->mrq_list);
4121
f95f3850
WN
4122#if defined(CONFIG_DEBUG_FS)
4123 dw_mci_init_debugfs(slot);
4124#endif
4125
4126 /* Card initially undetected */
4127 slot->last_detect_state = 0;
4128
3c2a0909
S
4129#if defined(CONFIG_BCM43455) || defined(CONFIG_BCM43455_MODULE)|| \
4130 defined(CONFIG_BCM4343) || defined(CONFIG_BCM4343_MODULE) || \
4131 defined(CONFIG_BCM43454) || defined(CONFIG_BCM43454_MODULE)
4132 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL) {
4133 printk("%s, set DW_MCI_CD_EXTERNAL \n",mmc_hostname(mmc));
4134 host->pdata->ext_cd_init(&dw_mci_notify_change, (void*)host, mmc);
4135 }
4136#else /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
4137 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL)
4138 host->pdata->ext_cd_init(&dw_mci_notify_change);
4139#endif /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
dd6c4b98
WN
4140 /*
4141 * Card may have been plugged in prior to boot so we
4142 * need to run the detect tasklet
4143 */
95dcc2cb 4144 queue_work(host->card_workqueue, &host->card_work);
dd6c4b98 4145
f95f3850 4146 return 0;
800d78bf
TA
4147
4148err_setup_bus:
4149 mmc_free_host(mmc);
4150 return -EINVAL;
f95f3850
WN
4151}
4152
4153static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
4154{
4155 /* Shutdown detect IRQ */
4156 if (slot->host->pdata->exit)
4157 slot->host->pdata->exit(id);
4158
4159 /* Debugfs stuff is cleaned up by mmc core */
4160 mmc_remove_host(slot->mmc);
4161 slot->host->slot[id] = NULL;
4162 mmc_free_host(slot->mmc);
4163}
4164
4165static void dw_mci_init_dma(struct dw_mci *host)
4166{
3c2a0909
S
4167 if (host->pdata->desc_sz)
4168 host->desc_sz = host->pdata->desc_sz;
4169 else
4170 host->desc_sz = 1;
4171
f95f3850 4172 /* Alloc memory for sg translation */
3c2a0909
S
4173 host->sg_cpu = dmam_alloc_coherent(host->dev,
4174 host->desc_sz * PAGE_SIZE * MMC_DW_IDMAC_MULTIPLIER,
4175 &host->sg_dma, GFP_KERNEL);
f95f3850 4176 if (!host->sg_cpu) {
4a90920c 4177 dev_err(host->dev, "%s: could not alloc DMA memory\n",
f95f3850
WN
4178 __func__);
4179 goto no_dma;
4180 }
4181
4182 /* Determine which DMA interface to use */
4183#ifdef CONFIG_MMC_DW_IDMAC
4184 host->dma_ops = &dw_mci_idmac_ops;
00956ea3 4185 dev_info(host->dev, "Using internal DMA controller.\n");
f95f3850
WN
4186#endif
4187
4188 if (!host->dma_ops)
4189 goto no_dma;
4190
e1631f98
JC
4191 if (host->dma_ops->init && host->dma_ops->start &&
4192 host->dma_ops->stop && host->dma_ops->cleanup) {
f95f3850 4193 if (host->dma_ops->init(host)) {
4a90920c 4194 dev_err(host->dev, "%s: Unable to initialize "
f95f3850
WN
4195 "DMA Controller.\n", __func__);
4196 goto no_dma;
4197 }
4198 } else {
4a90920c 4199 dev_err(host->dev, "DMA initialization not found.\n");
f95f3850
WN
4200 goto no_dma;
4201 }
4202
4203 host->use_dma = 1;
4204 return;
4205
4206no_dma:
4a90920c 4207 dev_info(host->dev, "Using PIO mode.\n");
f95f3850
WN
4208 host->use_dma = 0;
4209 return;
4210}
4211
4212static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
4213{
4214 unsigned long timeout = jiffies + msecs_to_jiffies(500);
4215 unsigned int ctrl;
4216
4217 mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
4218 SDMMC_CTRL_DMA_RESET));
4219
4220 /* wait till resets clear */
4221 do {
4222 ctrl = mci_readl(host, CTRL);
4223 if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
4224 SDMMC_CTRL_DMA_RESET)))
4225 return true;
4226 } while (time_before(jiffies, timeout));
4227
4228 dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
4229
4230 return false;
4231}
4232
3c2a0909
S
4233#define REGISTER_NOTI 0
4234#define UNREGISTER_NOTI 1
4235
4236static void dw_mci_register_notifier(struct dw_mci *host, u32 reg_noti)
4237{
4238 const struct dw_mci_drv_data *drv_data = host->drv_data;
4239
4240 if (reg_noti == REGISTER_NOTI) {
4241 if (drv_data && drv_data->register_notifier)
4242 drv_data->register_notifier(host);
4243 } else if (reg_noti == UNREGISTER_NOTI) {
4244 if (drv_data && drv_data->unregister_notifier)
4245 drv_data->unregister_notifier(host);
4246 }
4247}
4248
c91eab4b
TA
4249#ifdef CONFIG_OF
4250static struct dw_mci_of_quirks {
4251 char *quirk;
4252 int id;
4253} of_quirks[] = {
4254 {
4255 .quirk = "supports-highspeed",
4256 .id = DW_MCI_QUIRK_HIGHSPEED,
4257 }, {
4258 .quirk = "broken-cd",
4259 .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
3c2a0909
S
4260 }, {
4261 .quirk = "bypass-smu",
4262 .id = DW_MCI_QUIRK_BYPASS_SMU,
4263 }, {
4264 .quirk = "fixed_volt",
4265 .id = DW_MMC_QUIRK_FIXED_VOLTAGE,
4266 }, {
4267 .quirk = "sw_data_timeout",
4268 .id = DW_MMC_QUIRK_SW_DATA_TIMEOUT,
4269 }, {
4270 .quirk = "error-retry",
4271 .id = DW_MMC_QUIRK_RETRY_ERROR,
4272 }, {
4273 .quirk = "use-cpu-mode-tuning",
4274 .id = DW_MMC_QUIRK_USE_CPU_MODE_TUNING,
4275 }, {
4276 .quirk = "fix-fmp-size-mismatch",
4277 .id = DW_MMC_QUIRK_FMP_SIZE_MISMATCH,
4278 }, {
4279 .quirk = "not-allow-single-dma",
4280 .id = DW_MMC_QUIRK_NOT_ALLOW_SINGLE_DMA,
4281 }, {
4282 .quirk = "use-smu",
4283 .id = DW_MCI_QUIRK_USE_SMU,
4284 }, {
4285 .quirk = "enable-ulp-mode",
4286 .id = DW_MCI_QUIRK_ENABLE_ULP,
c91eab4b
TA
4287 },
4288};
4289
3c2a0909
S
4290#if defined(CONFIG_BCM43455) || defined(CONFIG_BCM43455_MODULE)|| \
4291 defined(CONFIG_BCM4343) || defined(CONFIG_BCM4343_MODULE) || \
4292 defined(CONFIG_BCM43454) || defined(CONFIG_BCM43454_MODULE)
4293void (*notify_func_callback)(void *dev_id, int state);
4294void *mmc_host_dev = NULL;
4295static DEFINE_MUTEX(notify_mutex_lock);
4296struct mmc_host *wlan_mmc = NULL;
4297static int ext_cd_init_callback(
4298 void (*notify_func)(void *dev_id, int state), void *dev_id, struct mmc_host *mmc)
4299{
4300 printk("Enter %s\n",__FUNCTION__);
4301 mutex_lock(&notify_mutex_lock);
4302 WARN_ON(notify_func_callback);
4303 notify_func_callback = notify_func;
4304 mmc_host_dev = dev_id;
4305 wlan_mmc = mmc;
4306 mutex_unlock(&notify_mutex_lock);
4307
4308 return 0;
4309}
4310
4311static int ext_cd_cleanup_callback(
4312 void (*notify_func)(void *dev_id, int state), void *dev_id)
4313{
4314 printk("Enter %s\n",__FUNCTION__);
4315 mutex_lock(&notify_mutex_lock);
4316 WARN_ON(notify_func_callback);
4317 notify_func_callback = NULL;
4318 mmc_host_dev = NULL;
4319 mutex_unlock(&notify_mutex_lock);
4320
4321 return 0;
4322}
4323#endif /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
4324
c91eab4b
TA
4325static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
4326{
4327 struct dw_mci_board *pdata;
4328 struct device *dev = host->dev;
4329 struct device_node *np = dev->of_node;
e95baf13 4330 const struct dw_mci_drv_data *drv_data = host->drv_data;
3c2a0909
S
4331 int idx, i, ret;
4332 u32 clock_frequency, tp_mon_depth = 0, tp_table_size = 0;
4333 u32 *tp_mon_tbl;
c91eab4b
TA
4334
4335 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
4336 if (!pdata) {
4337 dev_err(dev, "could not allocate memory for pdata\n");
4338 return ERR_PTR(-ENOMEM);
4339 }
4340
4341 /* find out number of slots supported */
4342 if (of_property_read_u32(dev->of_node, "num-slots",
4343 &pdata->num_slots)) {
4344 dev_info(dev, "num-slots property not found, "
4345 "assuming 1 slot is available\n");
4346 pdata->num_slots = 1;
4347 }
4348
4349 /* get quirks */
4350 for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
4351 if (of_get_property(np, of_quirks[idx].quirk, NULL))
4352 pdata->quirks |= of_quirks[idx].id;
4353
4354 if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
4355 dev_info(dev, "fifo-depth property not found, using "
4356 "value of FIFOTH register as default\n");
4357
4358 of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
3c2a0909
S
4359 of_property_read_u32(np, "qos_int_level", &pdata->qos_int_level);
4360 of_property_read_u32(np, "device-driver", &pdata->dev_drv_str);
4361 of_property_read_u32(np, "sw-timeout", &pdata->sw_timeout);
4362 of_property_read_u32(np, "data-timeout", &pdata->data_timeout);
4363 of_property_read_u32(np, "desc-size", &pdata->desc_sz);
4364 of_property_read_u32(np, "tp_mon_depth", &tp_mon_depth);
4365 if (tp_mon_depth) {
4366 tp_mon_tbl = devm_kzalloc(dev, (sizeof(struct dw_mci_mon_table) * tp_mon_depth), GFP_KERNEL);
4367 if (!tp_mon_tbl) {
4368 dev_err(dev, "could not allocate memory for tp_mon_tbl\n");
4369 return ERR_PTR(-ENOMEM);
4370 }
4371 tp_table_size = (sizeof(struct dw_mci_mon_table) / sizeof(unsigned int));
4372 ret = of_property_read_u32_array(np, "tp_mon_table", tp_mon_tbl,
4373 tp_table_size * tp_mon_depth);
4374 if (ret == 0) {
4375 pdata->tp_mon_tbl = (struct dw_mci_mon_table *) tp_mon_tbl;
4376 for (i = 0; i < tp_mon_depth; i++) {
4377#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
4378 dev_info(dev, "TP table info LV %d\n", i);
4379 dev_info(dev, "Range: %d MIF: %d CPU: %d KFC: %d\n",
4380 pdata->tp_mon_tbl[i].range,
4381 pdata->tp_mon_tbl[i].mif_lock_value,
4382 pdata->tp_mon_tbl[i].cpu_lock_value,
4383 pdata->tp_mon_tbl[i].kfc_lock_value);
4384#else
4385 dev_info(dev, "TP table info LV %d\n", i);
4386 dev_info(dev, "Range: %d MIF: %d CPU: %d\n",
4387 pdata->tp_mon_tbl[i].range,
4388 pdata->tp_mon_tbl[i].mif_lock_value,
4389 pdata->tp_mon_tbl[i].cpu_lock_value);
4390#endif
4391 }
4392 }
4393 }
4394
4395 if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
4396 pdata->bus_hz = clock_frequency;
c91eab4b 4397
cb27a843
JH
4398 if (drv_data && drv_data->parse_dt) {
4399 ret = drv_data->parse_dt(host);
800d78bf
TA
4400 if (ret)
4401 return ERR_PTR(ret);
4402 }
4403
3c2a0909
S
4404 /* caps */
4405 if (of_find_property(np, "caps-control", NULL)) {
4406 if (of_find_property(np, "supports-ddr50", NULL))
4407 pdata->caps = MMC_CAP_UHS_DDR50;
4408
4409 if (of_find_property(np, "supports-1-8v-ddr", NULL))
4410 pdata->caps |= MMC_CAP_1_8V_DDR;
4411
4412 if (of_find_property(np, "supports-8-bit", NULL))
4413 pdata->caps |= MMC_CAP_8_BIT_DATA;
4414
4415 if (of_find_property(np, "supports-cmd23", NULL))
4416 pdata->caps |= MMC_CAP_CMD23;
4417
4418 if (of_find_property(np, "supports-sdr104-mode", NULL))
4419 pdata->caps |= MMC_CAP_UHS_SDR104;
4420
4421 if (of_find_property(np, "supports-erase", NULL))
4422 pdata->caps |= MMC_CAP_ERASE;
4423
4424 } else if (drv_data && drv_data->misc_control)
4425 pdata->caps = drv_data->misc_control(host,
4426 CTRL_SET_DEF_CAPS, NULL);
4427
4428 if (of_find_property(np, "supports-sdr50-mode", NULL))
4429 pdata->caps |= MMC_CAP_UHS_SDR50;
4430
4431 if (of_find_property(np, "supports-sdio-irq", NULL))
4432 pdata->caps |= MMC_CAP_SDIO_IRQ;
4433
4434 /* caps2 */
4435 if (of_find_property(np, "extra_tuning", NULL))
4436 pdata->extra_tuning = true;
4437
4438 if (of_find_property(np, "only_once_tune", NULL))
4439 pdata->only_once_tune = true;
4440
ab269128
AK
4441 if (of_find_property(np, "keep-power-in-suspend", NULL))
4442 pdata->pm_caps |= MMC_PM_KEEP_POWER;
4443
4444 if (of_find_property(np, "enable-sdio-wakeup", NULL))
4445 pdata->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
4446
3c2a0909
S
4447 if (of_find_property(np, "enable-cache-control", NULL))
4448 pdata->caps2 |= MMC_CAP2_CACHE_CTRL;
4449
4450 if (of_find_property(np, "supports-poweroff-notification", NULL))
4451 pdata->caps2 |= MMC_CAP2_POWEROFF_NOTIFY;
4452
4453 if (of_find_property(np, "enable-no-sleep-cmd", NULL))
4454 pdata->caps2 |= MMC_CAP2_NO_SLEEP_CMD;
4455
4456 if (of_find_property(np, "supports-hs200-1-8v-mode", NULL))
4457 pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
4458
4459 if (of_find_property(np, "supports-hs200-1-2v-mode", NULL))
4460 pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
4461
4462 if (of_find_property(np, "supports-hs200-mode", NULL))
4463 pdata->caps2 |= MMC_CAP2_HS200;
4464
4465 if (of_find_property(np, "supports-ddr200-1-8v-mode", NULL))
4466 pdata->caps2 |= MMC_CAP2_HS200_1_8V_DDR;
4467
4468 if (of_find_property(np, "supports-ddr200-1-2v-mode", NULL))
4469 pdata->caps2 |= MMC_CAP2_HS200_1_2V_DDR;
4470
4471 if (of_find_property(np, "supports-ddr200-mode", NULL))
4472 pdata->caps2 |= MMC_CAP2_HS200_DDR;
4473
4474 if (of_find_property(np, "supports-ddr200-enhanced-strobe", NULL))
4475 pdata->caps2 |= MMC_CAP2_STROBE_ENHANCED;
4476
4477 if (of_find_property(np, "use-broken-voltage", NULL))
4478 pdata->caps2 |= MMC_CAP2_BROKEN_VOLTAGE;
4479
4480 if (of_find_property(np, "enable-packed-rd", NULL))
4481 pdata->caps2 |= MMC_CAP2_PACKED_RD;
4482
4483 if (of_find_property(np, "enable-packed-wr", NULL))
4484 pdata->caps2 |= MMC_CAP2_PACKED_WR;
4485
4486 if (of_find_property(np, "enable-packed-CMD", NULL))
4487 pdata->caps2 |= MMC_CAP2_PACKED_CMD;
4488
4489 if (of_find_property(np, "enable-cmdq", NULL))
4490 pdata->caps2 |= MMC_CAP2_CMDQ;
4491
4492 if (of_find_property(np, "clock-gate", NULL))
4493 pdata->use_gate_clock = true;
4494
4495 if (of_find_property(np, "biu-clock-gate", NULL))
4496 pdata->use_biu_gate_clock = true;
4497
4498 if (of_find_property(np, "enable-cclk-on-suspend", NULL))
4499 pdata->enable_cclk_on_suspend = true;
4500
4501 if (of_property_read_u32(dev->of_node, "cd-type",
4502 &pdata->cd_type))
4503 pdata->cd_type = DW_MCI_CD_PERMANENT;
4504
4505 if (of_find_property(np, "cd-type-gpio", NULL)) {
4506 pdata->cd_type = DW_MCI_CD_GPIO;
4507 pdata->caps2 |= MMC_CAP2_DETECT_ON_ERR;
4508 }
4509
4510#if defined(CONFIG_BCM43455) || defined(CONFIG_BCM43455_MODULE)|| \
4511 defined(CONFIG_BCM4343) || defined(CONFIG_BCM4343_MODULE) || \
4512 defined(CONFIG_BCM43454) || defined(CONFIG_BCM43454_MODULE)
4513 if (of_find_property(np, "pm-ignore-notify", NULL))
4514 pdata->pm_caps |= MMC_PM_IGNORE_PM_NOTIFY;
4515
4516 if (of_find_property(np, "cd-type-external", NULL)) {
4517 pdata->cd_type = DW_MCI_CD_EXTERNAL;
4518 pdata->ext_cd_init = ext_cd_init_callback;
4519 pdata->ext_cd_cleanup = ext_cd_cleanup_callback;
4520 }
4521#endif /* CONDIG_BCM43455 || CONFIG_BCM43455_MODULE */
4522
c91eab4b
TA
4523 return pdata;
4524}
4525
4526#else /* CONFIG_OF */
4527static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
4528{
4529 return ERR_PTR(-EINVAL);
4530}
4531#endif /* CONFIG_OF */
4532
62ca8034 4533int dw_mci_probe(struct dw_mci *host)
f95f3850 4534{
e95baf13 4535 const struct dw_mci_drv_data *drv_data = host->drv_data;
62ca8034 4536 int width, i, ret = 0;
3c2a0909 4537 u32 fifo_size, qrdy_int, msize, tx_wmark, rx_wmark;
1c2215b7 4538 int init_slots = 0;
f95f3850 4539
3c2a0909
S
4540 if (drv_data && drv_data->misc_control) {
4541 ret = drv_data->misc_control(host, CTRL_INIT_CLOCK, NULL);
4542 if (ret)
4543 return ret;
4544 }
4545
4546 if (drv_data && drv_data->misc_control) {
4547 /*
4548 * Enable blocking card detection
4549 * because I/O process should not be overlapped with
4550 * initial card detection function execution
4551 * of non-removable device. If so, SW timeout may happen.
4552 */
4553 u32 ctrl_window = DW_MCI_TOGGLE_BLOCK_CD |
4554 DW_MCI_TOGGLE_ENABLE;
4555
4556 ret = drv_data->misc_control(host, CTRL_TOGGLE_WINDOWS,
4557 &ctrl_window);
4558 if (ret == -1)
4559 return ret;
4560
4561 ret = drv_data->misc_control(host, CTRL_TURN_ON_2_8V, NULL);
4562 if (ret)
4563 return ret;
4564 }
4565
c91eab4b
TA
4566 if (!host->pdata) {
4567 host->pdata = dw_mci_parse_dt(host);
4568 if (IS_ERR(host->pdata)) {
4569 dev_err(host->dev, "platform data not available\n");
4570 return -EINVAL;
4571 }
f95f3850
WN
4572 }
4573
62ca8034 4574 if (!host->pdata->select_slot && host->pdata->num_slots > 1) {
4a90920c 4575 dev_err(host->dev,
f95f3850 4576 "Platform data must supply select_slot function\n");
62ca8034 4577 return -ENODEV;
f95f3850
WN
4578 }
4579
3c2a0909
S
4580 /*
4581 * Get clock sources
4582 */
780f22af 4583 host->biu_clk = devm_clk_get(host->dev, "biu");
f90a0612
TA
4584 if (IS_ERR(host->biu_clk)) {
4585 dev_dbg(host->dev, "biu clock not available\n");
f90a0612 4586 }
3c2a0909 4587 host->ciu_clk = devm_clk_get(host->dev, "gate_ciu");
f90a0612
TA
4588 if (IS_ERR(host->ciu_clk)) {
4589 dev_dbg(host->dev, "ciu clock not available\n");
3c2a0909 4590 host->bus_hz = host->pdata->bus_hz;
f90a0612 4591 }
3c2a0909
S
4592 host->gate_clk = devm_clk_get(host->dev, "gate_mmc");
4593 if (IS_ERR(host->gate_clk))
4594 dev_dbg(host->dev, "clock for gating not available\n");
f90a0612 4595
3c2a0909
S
4596 /*
4597 * BIU clock enable
4598 */
4599 ret = dw_mci_biu_clk_en(host, true);
4600 if (ret) {
4601 dev_err(host->dev, "failed to enable biu clock\n");
4602 goto err_clk_biu;
4603 }
4604
4605 /*
4606 * CIU clock enable
4607 */
4608 ret = dw_mci_ciu_clk_en(host, true);
4609 if (ret) {
4610 goto err_clk_ciu;
4611 } else {
4612 if (host->pdata->bus_hz) {
4613 ret = clk_set_rate(host->ciu_clk,
4614 host->pdata->bus_hz);
4615 if (ret)
4616 dev_warn(host->dev,
4617 "Unable to set bus rate to %ul\n",
4618 host->pdata->bus_hz);
4619 }
f90a0612 4620 host->bus_hz = clk_get_rate(host->ciu_clk);
3c2a0909 4621 }
f90a0612 4622
cb27a843
JH
4623 if (drv_data && drv_data->setup_clock) {
4624 ret = drv_data->setup_clock(host);
800d78bf
TA
4625 if (ret) {
4626 dev_err(host->dev,
4627 "implementation specific clock setup failed\n");
4628 goto err_clk_ciu;
4629 }
4630 }
4631
f90a0612 4632 if (!host->bus_hz) {
4a90920c 4633 dev_err(host->dev,
f95f3850 4634 "Platform data must supply bus speed\n");
f90a0612
TA
4635 ret = -ENODEV;
4636 goto err_clk_ciu;
f95f3850
WN
4637 }
4638
62ca8034 4639 host->quirks = host->pdata->quirks;
f95f3850
WN
4640
4641 spin_lock_init(&host->lock);
4642 INIT_LIST_HEAD(&host->queue);
4643
3c2a0909
S
4644 if (drv_data && drv_data->cfg_smu)
4645 drv_data->cfg_smu(host);
4646
f95f3850
WN
4647 /*
4648 * Get the host data width - this assumes that HCON has been set with
4649 * the correct values.
4650 */
4651 i = (mci_readl(host, HCON) >> 7) & 0x7;
4652 if (!i) {
4653 host->push_data = dw_mci_push_data16;
4654 host->pull_data = dw_mci_pull_data16;
4655 width = 16;
4656 host->data_shift = 1;
4657 } else if (i == 2) {
4658 host->push_data = dw_mci_push_data64;
4659 host->pull_data = dw_mci_pull_data64;
4660 width = 64;
4661 host->data_shift = 3;
4662 } else {
4663 /* Check for a reserved value, and warn if it is */
4664 WARN((i != 1),
4665 "HCON reports a reserved host data width!\n"
4666 "Defaulting to 32-bit access.\n");
4667 host->push_data = dw_mci_push_data32;
4668 host->pull_data = dw_mci_pull_data32;
4669 width = 32;
4670 host->data_shift = 2;
4671 }
4672
4673 /* Reset all blocks */
4a90920c 4674 if (!mci_wait_reset(host->dev, host))
141a712a
SJ
4675 return -ENODEV;
4676
3c2a0909
S
4677 if (!host->dev->dma_mask)
4678 dma_set_mask(host->dev, DMA_BIT_MASK(32));
4679 init_dma_attrs(&dw_mci_direct_attrs);
4680 dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &dw_mci_direct_attrs);
4681
141a712a
SJ
4682 host->dma_ops = host->pdata->dma_ops;
4683 dw_mci_init_dma(host);
f95f3850
WN
4684
4685 /* Clear the interrupts for the host controller */
4686 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4687 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4688
4689 /* Put in max timeout */
4690 mci_writel(host, TMOUT, 0xFFFFFFFF);
4691
b86d8253
JH
4692 if (!host->pdata->fifo_depth) {
4693 /*
4694 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
4695 * have been overwritten by the bootloader, just like we're
4696 * about to do, so if you know the value for your hardware, you
4697 * should put it in the platform data.
4698 */
4699 fifo_size = mci_readl(host, FIFOTH);
3c2a0909 4700 fifo_size = 1 + ((fifo_size >> SDMMC_FIFOTH_RX_WMARK) & 0xfff);
b86d8253
JH
4701 } else {
4702 fifo_size = host->pdata->fifo_depth;
4703 }
3c2a0909 4704
b86d8253 4705 host->fifo_depth = fifo_size;
3c2a0909
S
4706
4707 WARN_ON(fifo_size < 8);
4708
4709 /*
4710 * HCON[9:7] -> H_DATA_WIDTH
4711 * 000 16 bits
4712 * 001 32 bits
4713 * 010 64 bits
4714 *
4715 * FIFOTH[30:28] -> DW_DMA_Mutiple_Transaction_Size
4716 * msize:
4717 * 000 1 transfers
4718 * 001 4
4719 * 010 8
4720 * 011 16
4721 * 100 32
4722 * 101 64
4723 * 110 128
4724 * 111 256
4725 *
4726 * AHB Master can support 1/4/8/16 burst in DMA.
4727 * So, Max support burst spec is 16 burst.
4728 *
4729 * msize <= 011(16 burst)
4730 * Transaction_Size = msize * H_DATA_WIDTH;
4731 * rx_wmark = Transaction_Size - 1;
4732 * tx_wmark = fifo_size - Transaction_Size;
4733 */
4734 msize = host->data_shift;
4735 msize &= 7;
4736 rx_wmark = ((1 << (msize + 1)) - 1) & 0xfff;
4737 tx_wmark = (fifo_size - (1 << (msize + 1))) & 0xfff;
4738
4739 host->fifoth_val = msize << SDMMC_FIFOTH_DMA_MULTI_TRANS_SIZE;
4740 host->fifoth_val |= (rx_wmark << SDMMC_FIFOTH_RX_WMARK) | tx_wmark;
4741
e61cf118 4742 mci_writel(host, FIFOTH, host->fifoth_val);
f95f3850 4743
3c2a0909
S
4744 dev_info(host->dev, "FIFOTH: 0x %08x", mci_readl(host, FIFOTH));
4745
f95f3850
WN
4746 /* disable clock to CIU */
4747 mci_writel(host, CLKENA, 0);
4748 mci_writel(host, CLKSRC, 0);
4749
63008768
JH
4750 /*
4751 * In 2.40a spec, Data offset is changed.
4752 * Need to check the version-id and set data-offset for DATA register.
4753 */
4754 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
4755 dev_info(host->dev, "Version ID is %04x\n", host->verid);
4756
4757 if (host->verid < DW_MMC_240A)
4758 host->data_offset = DATA_OFFSET;
4759 else
4760 host->data_offset = DATA_240A_OFFSET;
4761
f95f3850 4762 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3c2a0909 4763 host->tasklet_state = 0;
95dcc2cb 4764 host->card_workqueue = alloc_workqueue("dw-mci-card",
1791b13e 4765 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
95dcc2cb 4766 if (!host->card_workqueue)
1791b13e
JH
4767 goto err_dmaunmap;
4768 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
3c2a0909
S
4769
4770 pm_qos_add_request(&host->pm_qos_int, PM_QOS_DEVICE_THROUGHPUT, 0);
4771 if (host->pdata->tp_mon_tbl) {
4772 INIT_DELAYED_WORK(&host->tp_mon, dw_mci_tp_mon);
4773 pm_qos_add_request(&host->pm_qos_mif,
4774 PM_QOS_BUS_THROUGHPUT, 0);
4775#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
4776 pm_qos_add_request(&host->pm_qos_cluster0,
4777 PM_QOS_CLUSTER0_FREQ_MIN, 0);
4778#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
4779 pm_qos_add_request(&host->pm_qos_cluster0,
4780 PM_QOS_CLUSTER0_FREQ_MIN, 0);
4781#else
4782 pm_qos_add_request(&host->pm_qos_cluster1,
4783 PM_QOS_CLUSTER1_FREQ_MIN, 0);
4784#endif
4785 }
4786
780f22af
SJ
4787 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
4788 host->irq_flags, "dw-mci", host);
3c2a0909
S
4789
4790 setup_timer(&host->timer, dw_mci_timeout_timer, (unsigned long)host);
4791 setup_timer(&host->dto_timer, dw_mci_dto_timer, (unsigned long)host);
4792
f95f3850 4793 if (ret)
1791b13e 4794 goto err_workqueue;
f95f3850 4795
f95f3850
WN
4796 if (host->pdata->num_slots)
4797 host->num_slots = host->pdata->num_slots;
4798 else
4799 host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
4800
2da1d7f2
YC
4801 /*
4802 * Enable interrupts for command done, data over, data empty, card det,
4803 * receive ready and error such as transmit, receive timeout, crc error
4804 */
4805 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3c2a0909
S
4806 if (host->pdata->cd_type == DW_MCI_CD_INTERNAL)
4807 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE |
4808 SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR |
2da1d7f2 4809 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3c2a0909
S
4810 else
4811 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE |
4812 SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR | SDMMC_INT_RXDR |
4813 DW_MCI_ERROR_FLAGS);
2da1d7f2
YC
4814 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
4815
3c2a0909
S
4816 /* disable queue ready interrupt */
4817 qrdy_int = mci_readl(host, SHA_CMD_IE);
4818 qrdy_int &= ~QRDY_INT_EN;
4819 mci_writel(host, SHA_CMD_IE, qrdy_int);
4820
2da1d7f2
YC
4821 dev_info(host->dev, "DW MMC controller at irq %d, "
4822 "%d bit host data width, "
4823 "%u deep fifo\n",
4824 host->irq, width, fifo_size);
4825
f95f3850
WN
4826 /* We need at least one slot to succeed */
4827 for (i = 0; i < host->num_slots; i++) {
4828 ret = dw_mci_init_slot(host, i);
1c2215b7
TA
4829 if (ret)
4830 dev_dbg(host->dev, "slot %d init failed\n", i);
4831 else
4832 init_slots++;
4833 }
4834
3c2a0909
S
4835 dw_mci_debug_init(host);
4836
1c2215b7
TA
4837 if (init_slots) {
4838 dev_info(host->dev, "%d slots initialized\n", init_slots);
4839 } else {
4840 dev_dbg(host->dev, "attempted to initialize %d slots, "
4841 "but failed on all\n", host->num_slots);
780f22af 4842 goto err_workqueue;
f95f3850 4843 }
3c2a0909
S
4844 if (host->pdata->cd_type == DW_MCI_CD_GPIO) {
4845 if (drv_data && drv_data->misc_control)
4846 drv_data->misc_control(host, CTRL_REQUEST_EXT_IRQ,
4847 dw_mci_detect_interrupt);
4848 }
f95f3850 4849
f95f3850 4850 if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
4a90920c 4851 dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n");
f95f3850 4852
3c2a0909
S
4853 if (drv_data && drv_data->register_notifier)
4854 dw_mci_register_notifier(host, REGISTER_NOTI);
4855
4856
f95f3850
WN
4857 return 0;
4858
1791b13e 4859err_workqueue:
95dcc2cb 4860 destroy_workqueue(host->card_workqueue);
3c2a0909
S
4861 pm_qos_remove_request(&host->pm_qos_int);
4862 if (host->pdata->tp_mon_tbl) {
4863 pm_qos_remove_request(&host->pm_qos_mif);
4864#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
4865 pm_qos_remove_request(&host->pm_qos_cluster0);
4866#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
4867 pm_qos_remove_request(&host->pm_qos_cluster0);
4868#else
4869 pm_qos_remove_request(&host->pm_qos_cluster1);
4870#endif
4871 }
1791b13e 4872
f95f3850
WN
4873err_dmaunmap:
4874 if (host->use_dma && host->dma_ops->exit)
4875 host->dma_ops->exit(host);
f95f3850 4876
780f22af 4877 if (host->vmmc)
c07946a3 4878 regulator_disable(host->vmmc);
f90a0612
TA
4879
4880err_clk_ciu:
3c2a0909
S
4881 if (!IS_ERR(host->ciu_clk) || !IS_ERR(host->gate_clk))
4882 dw_mci_ciu_clk_dis(host);
f90a0612 4883err_clk_biu:
780f22af 4884 if (!IS_ERR(host->biu_clk))
3c2a0909 4885 dw_mci_biu_clk_dis(host);
780f22af 4886
f95f3850
WN
4887 return ret;
4888}
62ca8034 4889EXPORT_SYMBOL(dw_mci_probe);
f95f3850 4890
62ca8034 4891void dw_mci_remove(struct dw_mci *host)
f95f3850 4892{
3c2a0909 4893 const struct dw_mci_drv_data *drv_data = host->drv_data;
f95f3850
WN
4894 int i;
4895
4896 mci_writel(host, RINTSTS, 0xFFFFFFFF);
4897 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
4898
3c2a0909
S
4899#if defined(CONFIG_BCM43455) || defined(CONFIG_BCM43455_MODULE)|| \
4900 defined(CONFIG_BCM4343) || defined(CONFIG_BCM4343_MODULE) || \
4901 defined(CONFIG_BCM43454) || defined(CONFIG_BCM43454_MODULE)
4902 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL)
4903 host->pdata->ext_cd_cleanup(&dw_mci_notify_change, (void *)host);
4904#else
4905 if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL)
4906 host->pdata->ext_cd_cleanup(&dw_mci_notify_change);
4907#endif /* CONFIG_BCM43455 || CONFIG_BCM43455_MODULE */
4908
f95f3850 4909 for (i = 0; i < host->num_slots; i++) {
4a90920c 4910 dev_dbg(host->dev, "remove slot %d\n", i);
f95f3850
WN
4911 if (host->slot[i])
4912 dw_mci_cleanup_slot(host->slot[i], i);
4913 }
4914
4915 /* disable clock to CIU */
4916 mci_writel(host, CLKENA, 0);
4917 mci_writel(host, CLKSRC, 0);
4918
3c2a0909
S
4919 del_timer_sync(&host->timer);
4920 del_timer_sync(&host->dto_timer);
95dcc2cb 4921 destroy_workqueue(host->card_workqueue);
3c2a0909
S
4922 if (host->pdata->tp_mon_tbl) {
4923 cancel_delayed_work_sync(&host->tp_mon);
4924 pm_qos_remove_request(&host->pm_qos_mif);
4925#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
4926 pm_qos_remove_request(&host->pm_qos_cluster0);
4927#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
4928 pm_qos_remove_request(&host->pm_qos_cluster0);
4929#else
4930 pm_qos_remove_request(&host->pm_qos_cluster1);
4931#endif
4932 }
4933
4934 if (drv_data && drv_data->register_notifier)
4935 dw_mci_register_notifier(host, UNREGISTER_NOTI);
4936
4937 pm_qos_remove_request(&host->pm_qos_int);
f95f3850
WN
4938
4939 if (host->use_dma && host->dma_ops->exit)
4940 host->dma_ops->exit(host);
4941
780f22af 4942 if (host->vmmc)
c07946a3 4943 regulator_disable(host->vmmc);
c07946a3 4944
3c2a0909
S
4945 if (!IS_ERR(host->gate_clk))
4946 dw_mci_ciu_clk_dis(host);
f95f3850 4947}
62ca8034
SH
4948EXPORT_SYMBOL(dw_mci_remove);
4949
4950
f95f3850 4951
6fe8890d 4952#ifdef CONFIG_PM_SLEEP
f95f3850
WN
4953/*
4954 * TODO: we should probably disable the clock to the card in the suspend path.
4955 */
62ca8034 4956int dw_mci_suspend(struct dw_mci *host)
f95f3850 4957{
62ca8034 4958 int i, ret = 0;
3c2a0909
S
4959 u32 clkena;
4960 const struct dw_mci_drv_data *drv_data = host->drv_data;
f95f3850
WN
4961
4962 for (i = 0; i < host->num_slots; i++) {
4963 struct dw_mci_slot *slot = host->slot[i];
4964 if (!slot)
4965 continue;
3c2a0909
S
4966 if (slot->mmc) {
4967 if (host->pdata->use_gate_clock)
4968 atomic_inc_return(&host->ciu_en_win);
4969 dw_mci_ciu_clk_en(host, false);
4970 clkena = mci_readl(host, CLKENA);
4971 clkena &= ~((SDMMC_CLKEN_LOW_PWR) << slot->id);
4972 mci_writel(host, CLKENA, clkena);
4973 dw_mci_update_clock(slot);
4974 if (host->pdata->use_gate_clock)
4975 atomic_dec_return(&host->ciu_en_win);
4976
4977 slot->mmc->pm_flags |= slot->mmc->pm_caps;
4978 ret = mmc_suspend_host(slot->mmc);
4979 if (ret < 0) {
4980 while (--i >= 0) {
4981 slot = host->slot[i];
4982 if (slot)
4983 mmc_resume_host(host->slot[i]->mmc);
4984 }
4985 return ret;
f95f3850 4986 }
f95f3850
WN
4987 }
4988 }
4989
3c2a0909
S
4990 if (host->pdata->tp_mon_tbl &&
4991 (host->pdata->pm_caps & MMC_PM_KEEP_POWER)) {
4992 cancel_delayed_work_sync(&host->tp_mon);
4993 pm_qos_update_request(&host->pm_qos_mif, 0);
4994#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
4995 pm_qos_update_request(&host->pm_qos_cluster0, 0);
4996#elif defined(CONFIG_ARM_EXYNOS_SMP_CPUFREQ)
4997 pm_qos_update_request(&host->pm_qos_cluster0, 0);
4998#else
4999 pm_qos_update_request(&host->pm_qos_cluster1, 0);
5000#endif
5001 host->transferred_cnt = 0;
5002 host->cmd_cnt = 0;
5003 }
5004
5005 if (host->pdata->enable_cclk_on_suspend) {
5006 host->pdata->on_suspend = true;
5007 dw_mci_ciu_clk_en(host, false);
5008 dw_mci_biu_clk_en(host, false);
5009 }
5010
5011 if (drv_data && drv_data->misc_control) {
5012 if (drv_data->misc_control(host, CTRL_CHECK_CD, NULL) == 1)
5013 mdelay(40);
5014 }
c07946a3 5015
f95f3850
WN
5016 return 0;
5017}
62ca8034 5018EXPORT_SYMBOL(dw_mci_suspend);
f95f3850 5019
62ca8034 5020int dw_mci_resume(struct dw_mci *host)
f95f3850 5021{
3c2a0909
S
5022 const struct dw_mci_drv_data *drv_data = host->drv_data;
5023
f95f3850 5024 int i, ret;
3c2a0909
S
5025#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT)
5026 int id;
5027#endif
f95f3850 5028
3c2a0909
S
5029 if (host->pdata->enable_cclk_on_suspend)
5030 host->pdata->on_suspend = false;
5031
5032 host->current_speed = 0;
5033
5034 if (host->pdata->use_biu_gate_clock)
5035 atomic_inc_return(&host->biu_en_win);
5036 dw_mci_biu_clk_en(host, false);
5037
5038 mci_writel(host, DDR200_ENABLE_SHIFT, 0x0);
5039
5040 if (host->pdata->use_gate_clock)
5041 atomic_inc_return(&host->ciu_en_win);
5042 dw_mci_ciu_clk_en(host, false);
1d6c4e0a 5043
4a90920c 5044 if (!mci_wait_reset(host->dev, host)) {
3c2a0909
S
5045 dw_mci_ciu_clk_dis(host);
5046 if (host->pdata->use_gate_clock)
5047 atomic_dec_return(&host->ciu_en_win);
5048 if (host->pdata->use_biu_gate_clock)
5049 atomic_dec_return(&host->biu_en_win);
e61cf118
JC
5050 ret = -ENODEV;
5051 return ret;
5052 }
3c2a0909
S
5053 if (host->pdata->use_gate_clock)
5054 atomic_dec_return(&host->ciu_en_win);
e61cf118 5055
3bfe619d 5056 if (host->use_dma && host->dma_ops->init)
141a712a
SJ
5057 host->dma_ops->init(host);
5058
3c2a0909
S
5059 if (drv_data && drv_data->cfg_smu)
5060 drv_data->cfg_smu(host);
5061
5062#if defined(CONFIG_MMC_DW_FMP_DM_CRYPT)
5063 id = of_alias_get_id(host->dev->of_node, "mshc");
5064 if (!id) {
5065 ret = exynos_smc(SMC_CMD_FMP, FMP_KEY_RESUME, EMMC0_FMP, 0);
5066 if (ret)
5067 dev_err(host->dev, "failed to smc call for FMP: %x\n", ret);
5068 }
5069#endif
5070
e61cf118
JC
5071 /* Restore the old value at FIFOTH register */
5072 mci_writel(host, FIFOTH, host->fifoth_val);
5073
5074 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3c2a0909
S
5075 if (host->pdata->cd_type == DW_MCI_CD_INTERNAL)
5076 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE |
5077 SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
e61cf118 5078 DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
3c2a0909
S
5079 else
5080 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE |
5081 SDMMC_INT_DATA_OVER | SDMMC_INT_TXDR |
5082 DW_MCI_ERROR_FLAGS);
e61cf118
JC
5083 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
5084
3c2a0909
S
5085 /* For unuse clock gating */
5086 dw_mci_ciu_clk_dis(host);
5087
5088 if (host->pdata->use_biu_gate_clock)
5089 atomic_dec_return(&host->biu_en_win);
5090
f95f3850
WN
5091 for (i = 0; i < host->num_slots; i++) {
5092 struct dw_mci_slot *slot = host->slot[i];
3c2a0909 5093 struct mmc_ios ios;
f95f3850
WN
5094 if (!slot)
5095 continue;
3c2a0909
S
5096 if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER &&
5097 dw_mci_get_cd(slot->mmc)) {
5098 memcpy(&ios, &slot->mmc->ios,
5099 sizeof(struct mmc_ios));
5100 ios.timing = MMC_TIMING_LEGACY;
5101 dw_mci_set_ios(slot->mmc, &ios);
ab269128 5102 dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
3c2a0909
S
5103 if (host->pdata->use_gate_clock)
5104 atomic_inc_return(&host->ciu_en_win);
5105 dw_mci_ciu_clk_en(host, false);
ab269128 5106 dw_mci_setup_bus(slot, true);
3c2a0909
S
5107 if (host->pdata->use_gate_clock)
5108 atomic_dec_return(&host->ciu_en_win);
5109 if (drv_data && drv_data->misc_control)
5110 drv_data->misc_control(host,
5111 CTRL_RESTORE_CLKSEL, NULL);
5112 mci_writel(host, CDTHRCTL,
5113 host->cd_rd_thr << 16 | 1);
ab269128
AK
5114 }
5115
3c2a0909
S
5116 if (dw_mci_get_cd(slot->mmc))
5117 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
5118 else
5119 clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
5120
f95f3850
WN
5121 ret = mmc_resume_host(host->slot[i]->mmc);
5122 if (ret < 0)
5123 return ret;
5124 }
3c2a0909
S
5125
5126 if (host->pdata->tp_mon_tbl &&
5127 (host->pdata->pm_caps & MMC_PM_KEEP_POWER)) {
5128 host->transferred_cnt = 0;
5129 host->cmd_cnt = 0;
5130 schedule_delayed_work(&host->tp_mon, HZ);
5131 }
5132
f95f3850
WN
5133 return 0;
5134}
62ca8034 5135EXPORT_SYMBOL(dw_mci_resume);
3c2a0909
S
5136
5137int dw_mci_early_resume(struct dw_mci *host)
5138{
5139 int i;
5140
5141 for (i = 0; i < host->num_slots; i++) {
5142 struct dw_mci_slot *slot = host->slot[i];
5143 if (!slot)
5144 continue;
5145
5146 if (slot->mmc && dw_mci_get_cd(slot->mmc) &&
5147 host->pdata->ext_setpower)
5148 host->pdata->ext_setpower(host,
5149 DW_MMC_EXT_VMMC_ON | DW_MMC_EXT_VQMMC_ON);
5150 }
5151 return 0;
5152}
5153EXPORT_SYMBOL(dw_mci_early_resume);
6fe8890d
JC
5154#endif /* CONFIG_PM_SLEEP */
5155
3c2a0909
S
5156void dw_mci_shutdown(struct dw_mci *host)
5157{
5158 int i;
5159
5160 for (i = 0; i < host->num_slots; i++) {
5161 struct dw_mci_slot *slot = host->slot[i];
5162 if (!slot)
5163 continue;
5164
5165#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
5166 if (mmc_bus_needs_resume(slot->mmc)) {
5167 mmc_resume_bus(slot->mmc);
5168
5169 mdelay(10);
5170 }
5171#endif
5172
5173 if (slot->mmc->card && mmc_card_mmc(slot->mmc->card) &&
5174 slot->mmc->card->ext_csd.power_off_notification == EXT_CSD_POWER_ON) {
5175 mmc_claim_host(slot->mmc);
5176 mmc_poweroff_notify(slot->mmc->card, EXT_CSD_POWER_OFF_SHORT);
5177 mmc_release_host(slot->mmc);
5178 }
5179 }
5180
5181 if (host->pdata->ext_setpower) {
5182 host->pdata->ext_setpower(host, 0);
5183 mdelay(5);
5184 }
5185}
5186EXPORT_SYMBOL(dw_mci_shutdown);
5187
f95f3850
WN
5188static int __init dw_mci_init(void)
5189{
8e1c4e4d 5190 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
62ca8034 5191 return 0;
f95f3850
WN
5192}
5193
5194static void __exit dw_mci_exit(void)
5195{
f95f3850
WN
5196}
5197
5198module_init(dw_mci_init);
5199module_exit(dw_mci_exit);
5200
5201MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
5202MODULE_AUTHOR("NXP Semiconductor VietNam");
5203MODULE_AUTHOR("Imagination Technologies Ltd");
5204MODULE_LICENSE("GPL v2");