mmc: at91_mci: show timeouts
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
6e996ee8
DB
73#include <asm/gpio.h>
74
65dbf343
AV
75#include <asm/mach/mmc.h>
76#include <asm/arch/board.h>
99eeb8df 77#include <asm/arch/cpu.h>
55d8baee 78#include <asm/arch/at91_mci.h>
65dbf343
AV
79
80#define DRIVER_NAME "at91_mci"
81
df05a303
AV
82#define FL_SENT_COMMAND (1 << 0)
83#define FL_SENT_STOP (1 << 1)
65dbf343 84
df05a303
AV
85#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 88
e0b19b83
AV
89#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 91
65dbf343
AV
92
93/*
94 * Low level type for this driver
95 */
96struct at91mci_host
97{
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
e0b19b83 102 void __iomem *baseaddr;
17ea0595 103 int irq;
e0b19b83 104
65dbf343
AV
105 struct at91_mmc_data *board;
106 int present;
107
3dd3b039
AV
108 struct clk *mci_clk;
109
65dbf343
AV
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
e181dce8
MP
128
129 /* Timer for timeouts */
130 struct timer_list timer;
65dbf343
AV
131};
132
e181dce8
MP
133static void at91_timeout_timer(unsigned long data)
134{
135 struct at91mci_host *host;
136
137 host = (struct at91mci_host *)data;
138
139 if (host->request) {
140 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
141
142 if (host->cmd && host->cmd->data) {
143 host->cmd->data->error = -ETIMEDOUT;
144 } else {
145 if (host->cmd)
146 host->cmd->error = -ETIMEDOUT;
147 else
148 host->request->cmd->error = -ETIMEDOUT;
149 }
150
151 mmc_request_done(host->mmc, host->request);
152 }
153}
154
65dbf343
AV
155/*
156 * Copy from sg to a dma block - used for transfers
157 */
e8d04d3d 158static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
159{
160 unsigned int len, i, size;
161 unsigned *dmabuf = host->buffer;
162
163 size = host->total_length;
164 len = data->sg_len;
165
166 /*
167 * Just loop through all entries. Size might not
168 * be the entire list though so make sure that
169 * we do not transfer too much.
170 */
171 for (i = 0; i < len; i++) {
172 struct scatterlist *sg;
173 int amount;
65dbf343
AV
174 unsigned int *sgbuffer;
175
176 sg = &data->sg[i];
177
45711f1a 178 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
65dbf343
AV
179 amount = min(size, sg->length);
180 size -= amount;
65dbf343 181
99eeb8df
AV
182 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
183 int index;
184
185 for (index = 0; index < (amount / 4); index++)
186 *dmabuf++ = swab32(sgbuffer[index]);
187 }
188 else
189 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
190
191 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
192
193 if (size == 0)
194 break;
195 }
196
197 /*
198 * Check that we didn't get a request to transfer
199 * more data than can fit into the SG list.
200 */
201 BUG_ON(size != 0);
202}
203
204/*
205 * Prepare a dma read
206 */
e8d04d3d 207static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
208{
209 int i;
210 struct scatterlist *sg;
211 struct mmc_command *cmd;
212 struct mmc_data *data;
213
b44fb7a0 214 pr_debug("pre dma read\n");
65dbf343
AV
215
216 cmd = host->cmd;
217 if (!cmd) {
b44fb7a0 218 pr_debug("no command\n");
65dbf343
AV
219 return;
220 }
221
222 data = cmd->data;
223 if (!data) {
b44fb7a0 224 pr_debug("no data\n");
65dbf343
AV
225 return;
226 }
227
228 for (i = 0; i < 2; i++) {
229 /* nothing left to transfer */
230 if (host->transfer_index >= data->sg_len) {
b44fb7a0 231 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
232 break;
233 }
234
235 /* Check to see if this needs filling */
236 if (i == 0) {
93a3ddc2 237 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 238 pr_debug("Transfer active in current\n");
65dbf343
AV
239 continue;
240 }
241 }
242 else {
93a3ddc2 243 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 244 pr_debug("Transfer active in next\n");
65dbf343
AV
245 continue;
246 }
247 }
248
249 /* Setup the next transfer */
b44fb7a0 250 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
251
252 sg = &data->sg[host->transfer_index++];
b44fb7a0 253 pr_debug("sg = %p\n", sg);
65dbf343 254
45711f1a 255 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
65dbf343 256
b44fb7a0 257 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
258
259 if (i == 0) {
93a3ddc2 260 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
80f92546 261 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
65dbf343
AV
262 }
263 else {
93a3ddc2 264 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
80f92546 265 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
65dbf343
AV
266 }
267 }
268
b44fb7a0 269 pr_debug("pre dma read done\n");
65dbf343
AV
270}
271
272/*
273 * Handle after a dma read
274 */
e8d04d3d 275static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
276{
277 struct mmc_command *cmd;
278 struct mmc_data *data;
279
b44fb7a0 280 pr_debug("post dma read\n");
65dbf343
AV
281
282 cmd = host->cmd;
283 if (!cmd) {
b44fb7a0 284 pr_debug("no command\n");
65dbf343
AV
285 return;
286 }
287
288 data = cmd->data;
289 if (!data) {
b44fb7a0 290 pr_debug("no data\n");
65dbf343
AV
291 return;
292 }
293
294 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
295 struct scatterlist *sg;
296
b44fb7a0 297 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
298
299 sg = &data->sg[host->in_use_index++];
300
b44fb7a0 301 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
302
303 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
304
65dbf343
AV
305 data->bytes_xfered += sg->length;
306
99eeb8df 307 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 308 unsigned int *buffer;
99eeb8df 309 int index;
65dbf343 310
ed99c541 311 /* Swap the contents of the buffer */
45711f1a 312 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
ed99c541
NF
313 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
314
99eeb8df
AV
315 for (index = 0; index < (sg->length / 4); index++)
316 buffer[index] = swab32(buffer[index]);
ed99c541
NF
317
318 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 319 }
99eeb8df 320
45711f1a 321 flush_dcache_page(sg_page(sg));
65dbf343
AV
322 }
323
324 /* Is there another transfer to trigger? */
325 if (host->transfer_index < data->sg_len)
e8d04d3d 326 at91_mci_pre_dma_read(host);
65dbf343 327 else {
ed99c541 328 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 329 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
330 }
331
b44fb7a0 332 pr_debug("post dma read done\n");
65dbf343
AV
333}
334
335/*
336 * Handle transmitted data
337 */
338static void at91_mci_handle_transmitted(struct at91mci_host *host)
339{
340 struct mmc_command *cmd;
341 struct mmc_data *data;
342
b44fb7a0 343 pr_debug("Handling the transmit\n");
65dbf343
AV
344
345 /* Disable the transfer */
93a3ddc2 346 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
347
348 /* Now wait for cmd ready */
e0b19b83 349 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
350
351 cmd = host->cmd;
352 if (!cmd) return;
353
354 data = cmd->data;
355 if (!data) return;
356
be0192aa 357 if (cmd->data->blocks > 1) {
ed99c541
NF
358 pr_debug("multiple write : wait for BLKE...\n");
359 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
360 } else
361 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
362
65dbf343
AV
363 data->bytes_xfered = host->total_length;
364}
365
ed99c541
NF
366/*Handle after command sent ready*/
367static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
368{
369 if (!host->cmd)
370 return 1;
371 else if (!host->cmd->data) {
372 if (host->flags & FL_SENT_STOP) {
373 /*After multi block write, we must wait for NOTBUSY*/
374 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
375 } else return 1;
376 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
377 /*After sendding multi-block-write command, start DMA transfer*/
378 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
379 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
380 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
381 }
382
383 /* command not completed, have to wait */
384 return 0;
385}
386
387
65dbf343
AV
388/*
389 * Enable the controller
390 */
e0b19b83 391static void at91_mci_enable(struct at91mci_host *host)
65dbf343 392{
ed99c541
NF
393 unsigned int mr;
394
e0b19b83 395 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 396 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 397 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
398 mr = AT91_MCI_PDCMODE | 0x34a;
399
400 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
401 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
402
403 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
404
405 /* use Slot A or B (only one at same time) */
406 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
407}
408
409/*
410 * Disable the controller
411 */
e0b19b83 412static void at91_mci_disable(struct at91mci_host *host)
65dbf343 413{
e0b19b83 414 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
415}
416
417/*
418 * Send a command
65dbf343 419 */
ed99c541 420static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
421{
422 unsigned int cmdr, mr;
423 unsigned int block_length;
424 struct mmc_data *data = cmd->data;
425
426 unsigned int blocks;
427 unsigned int ier = 0;
428
429 host->cmd = cmd;
430
ed99c541 431 /* Needed for leaving busy state before CMD1 */
e0b19b83 432 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 433 pr_debug("Clearing timeout\n");
e0b19b83
AV
434 at91_mci_write(host, AT91_MCI_ARGR, 0);
435 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
436 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 437 /* spin */
e0b19b83 438 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
439 }
440 }
ed99c541 441
65dbf343
AV
442 cmdr = cmd->opcode;
443
444 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
445 cmdr |= AT91_MCI_RSPTYP_NONE;
446 else {
447 /* if a response is expected then allow maximum response latancy */
448 cmdr |= AT91_MCI_MAXLAT;
449 /* set 136 bit response for R2, 48 bit response otherwise */
450 if (mmc_resp_type(cmd) == MMC_RSP_R2)
451 cmdr |= AT91_MCI_RSPTYP_136;
452 else
453 cmdr |= AT91_MCI_RSPTYP_48;
454 }
455
456 if (data) {
1d4de9ed 457
80f92546 458 if ( cpu_is_at91rm9200() && (data->blksz & 0x3) ) {
1d4de9ed
MP
459 pr_debug("Unsupported block size\n");
460 cmd->error = -EINVAL;
461 mmc_request_done(host->mmc, host->request);
462 return;
463 }
464
a3fd4a1b 465 block_length = data->blksz;
65dbf343
AV
466 blocks = data->blocks;
467
468 /* always set data start - also set direction flag for read */
469 if (data->flags & MMC_DATA_READ)
470 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
471 else if (data->flags & MMC_DATA_WRITE)
472 cmdr |= AT91_MCI_TRCMD_START;
473
474 if (data->flags & MMC_DATA_STREAM)
475 cmdr |= AT91_MCI_TRTYP_STREAM;
be0192aa 476 if (data->blocks > 1)
65dbf343
AV
477 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
478 }
479 else {
480 block_length = 0;
481 blocks = 0;
482 }
483
b6cedb38 484 if (host->flags & FL_SENT_STOP)
65dbf343
AV
485 cmdr |= AT91_MCI_TRCMD_STOP;
486
487 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
488 cmdr |= AT91_MCI_OPDCMD;
489
490 /*
491 * Set the arguments and send the command
492 */
f3a8efa9 493 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 494 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
495
496 if (!data) {
93a3ddc2
AV
497 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
498 at91_mci_write(host, ATMEL_PDC_RPR, 0);
499 at91_mci_write(host, ATMEL_PDC_RCR, 0);
500 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
501 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
502 at91_mci_write(host, ATMEL_PDC_TPR, 0);
503 at91_mci_write(host, ATMEL_PDC_TCR, 0);
504 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
505 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
506 ier = AT91_MCI_CMDRDY;
507 } else {
508 /* zero block length and PDC mode */
509 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
80f92546
MP
510 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
511 mr |= (block_length << 16);
512 mr |= AT91_MCI_PDCMODE;
513 at91_mci_write(host, AT91_MCI_MR, mr);
e0b19b83 514
ed99c541
NF
515 /*
516 * Disable the PDC controller
517 */
518 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 519
ed99c541
NF
520 if (cmdr & AT91_MCI_TRCMD_START) {
521 data->bytes_xfered = 0;
522 host->transfer_index = 0;
523 host->in_use_index = 0;
524 if (cmdr & AT91_MCI_TRDIR) {
525 /*
526 * Handle a read
527 */
528 host->buffer = NULL;
529 host->total_length = 0;
530
531 at91_mci_pre_dma_read(host);
532 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
533 }
534 else {
535 /*
536 * Handle a write
537 */
538 host->total_length = block_length * blocks;
539 host->buffer = dma_alloc_coherent(NULL,
540 host->total_length,
541 &host->physical_address, GFP_KERNEL);
542
543 at91_mci_sg_to_dma(host, data);
544
545 pr_debug("Transmitting %d bytes\n", host->total_length);
546
547 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
80f92546
MP
548 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
549 host->total_length : host->total_length / 4);
550
ed99c541
NF
551 ier = AT91_MCI_CMDRDY;
552 }
65dbf343
AV
553 }
554 }
555
556 /*
557 * Send the command and then enable the PDC - not the other way round as
558 * the data sheet says
559 */
560
e0b19b83
AV
561 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
562 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
563
564 if (cmdr & AT91_MCI_TRCMD_START) {
565 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 566 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 567 }
65dbf343 568
ed99c541 569 /* Enable selected interrupts */
df05a303 570 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
571}
572
573/*
574 * Process the next step in the request
575 */
e8d04d3d 576static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
577{
578 if (!(host->flags & FL_SENT_COMMAND)) {
579 host->flags |= FL_SENT_COMMAND;
ed99c541 580 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
581 }
582 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
583 host->flags |= FL_SENT_STOP;
ed99c541 584 at91_mci_send_command(host, host->request->stop);
e181dce8
MP
585 } else {
586 del_timer(&host->timer);
65dbf343 587 mmc_request_done(host->mmc, host->request);
e181dce8 588 }
65dbf343
AV
589}
590
591/*
592 * Handle a command that has been completed
593 */
e8d04d3d 594static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
595{
596 struct mmc_command *cmd = host->cmd;
597 unsigned int status;
598
e0b19b83 599 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 600
e0b19b83
AV
601 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
602 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
603 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
604 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
605
606 if (host->buffer) {
607 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
608 host->buffer = NULL;
609 }
610
e0b19b83 611 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 612
b44fb7a0 613 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
614 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
615
9e3866b5 616 if (status & AT91_MCI_ERRORS) {
b6cedb38 617 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 618 cmd->error = 0;
65dbf343
AV
619 }
620 else {
621 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
17b0429d 622 cmd->error = -ETIMEDOUT;
65dbf343 623 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
17b0429d 624 cmd->error = -EILSEQ;
65dbf343 625 else
17b0429d 626 cmd->error = -EIO;
65dbf343 627
b44fb7a0 628 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
629 cmd->error, cmd->opcode, cmd->retries);
630 }
631 }
632 else
17b0429d 633 cmd->error = 0;
65dbf343 634
e8d04d3d 635 at91_mci_process_next(host);
65dbf343
AV
636}
637
638/*
639 * Handle an MMC request
640 */
641static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
642{
643 struct at91mci_host *host = mmc_priv(mmc);
644 host->request = mrq;
645 host->flags = 0;
646
e181dce8
MP
647 mod_timer(&host->timer, jiffies + HZ);
648
e8d04d3d 649 at91_mci_process_next(host);
65dbf343
AV
650}
651
652/*
653 * Set the IOS
654 */
655static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
656{
657 int clkdiv;
658 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 659 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 660
b44fb7a0 661 host->bus_mode = ios->bus_mode;
65dbf343
AV
662
663 if (ios->clock == 0) {
664 /* Disable the MCI controller */
e0b19b83 665 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
666 clkdiv = 0;
667 }
668 else {
669 /* Enable the MCI controller */
e0b19b83 670 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
671
672 if ((at91_master_clock % (ios->clock * 2)) == 0)
673 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
674 else
675 clkdiv = (at91_master_clock / ios->clock) / 2;
676
b44fb7a0 677 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
678 at91_master_clock / (2 * (clkdiv + 1)));
679 }
680 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 681 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 682 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
683 }
684 else {
b44fb7a0 685 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 686 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
687 }
688
689 /* Set the clock divider */
e0b19b83 690 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
691
692 /* maybe switch power to the card */
b44fb7a0 693 if (host->board->vcc_pin) {
65dbf343
AV
694 switch (ios->power_mode) {
695 case MMC_POWER_OFF:
6e996ee8 696 gpio_set_value(host->board->vcc_pin, 0);
65dbf343
AV
697 break;
698 case MMC_POWER_UP:
6e996ee8 699 gpio_set_value(host->board->vcc_pin, 1);
65dbf343 700 break;
e5c0ef90
MP
701 case MMC_POWER_ON:
702 break;
703 default:
704 WARN_ON(1);
65dbf343
AV
705 }
706 }
707}
708
709/*
710 * Handle an interrupt
711 */
7d12e780 712static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
713{
714 struct at91mci_host *host = devid;
715 int completed = 0;
df05a303 716 unsigned int int_status, int_mask;
65dbf343 717
e0b19b83 718 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 719 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 720
f3a8efa9 721 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 722 int_status & int_mask);
37b758e8 723
df05a303
AV
724 int_status = int_status & int_mask;
725
726 if (int_status & AT91_MCI_ERRORS) {
65dbf343 727 completed = 1;
37b758e8 728
df05a303
AV
729 if (int_status & AT91_MCI_UNRE)
730 pr_debug("MMC: Underrun error\n");
731 if (int_status & AT91_MCI_OVRE)
732 pr_debug("MMC: Overrun error\n");
733 if (int_status & AT91_MCI_DTOE)
734 pr_debug("MMC: Data timeout\n");
735 if (int_status & AT91_MCI_DCRCE)
736 pr_debug("MMC: CRC error in data\n");
737 if (int_status & AT91_MCI_RTOE)
738 pr_debug("MMC: Response timeout\n");
739 if (int_status & AT91_MCI_RENDE)
740 pr_debug("MMC: Response end bit error\n");
741 if (int_status & AT91_MCI_RCRCE)
742 pr_debug("MMC: Response CRC error\n");
743 if (int_status & AT91_MCI_RDIRE)
744 pr_debug("MMC: Response direction error\n");
745 if (int_status & AT91_MCI_RINDE)
746 pr_debug("MMC: Response index error\n");
747 } else {
748 /* Only continue processing if no errors */
65dbf343 749
65dbf343 750 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 751 pr_debug("TX buffer empty\n");
65dbf343
AV
752 at91_mci_handle_transmitted(host);
753 }
754
ed99c541
NF
755 if (int_status & AT91_MCI_ENDRX) {
756 pr_debug("ENDRX\n");
757 at91_mci_post_dma_read(host);
758 }
759
65dbf343 760 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 761 pr_debug("RX buffer full\n");
ed99c541
NF
762 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
763 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
764 completed = 1;
65dbf343
AV
765 }
766
df05a303 767 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 768 pr_debug("Transmit has ended\n");
65dbf343 769
65dbf343 770 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 771 pr_debug("Card is ready\n");
ed99c541 772 completed = 1;
65dbf343
AV
773 }
774
df05a303 775 if (int_status & AT91_MCI_DTIP)
b44fb7a0 776 pr_debug("Data transfer in progress\n");
65dbf343 777
ed99c541 778 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 779 pr_debug("Block transfer has ended\n");
ed99c541
NF
780 completed = 1;
781 }
65dbf343 782
df05a303 783 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 784 pr_debug("Ready to transmit\n");
65dbf343 785
df05a303 786 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 787 pr_debug("Ready to receive\n");
65dbf343
AV
788
789 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 790 pr_debug("Command ready\n");
ed99c541 791 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
792 }
793 }
65dbf343
AV
794
795 if (completed) {
b44fb7a0 796 pr_debug("Completed command\n");
e0b19b83 797 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 798 at91_mci_completed_command(host);
df05a303
AV
799 } else
800 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
801
802 return IRQ_HANDLED;
803}
804
7d12e780 805static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
806{
807 struct at91mci_host *host = _host;
6e996ee8 808 int present = !gpio_get_value(irq_to_gpio(irq));
65dbf343
AV
809
810 /*
811 * we expect this irq on both insert and remove,
812 * and use a short delay to debounce.
813 */
814 if (present != host->present) {
815 host->present = present;
b44fb7a0 816 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
817 present ? "insert" : "remove");
818 if (!present) {
b44fb7a0 819 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 820 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
821 }
822 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
823 }
824 return IRQ_HANDLED;
825}
826
a26b498c 827static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343 828{
65dbf343
AV
829 struct at91mci_host *host = mmc_priv(mmc);
830
08f80bb5
AV
831 if (host->board->wp_pin)
832 return !!gpio_get_value(host->board->wp_pin);
833 /*
834 * Board doesn't support read only detection; let the mmc core
835 * decide what to do.
836 */
837 return -ENOSYS;
65dbf343
AV
838}
839
ab7aefd0 840static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
841 .request = at91_mci_request,
842 .set_ios = at91_mci_set_ios,
843 .get_ro = at91_mci_get_ro,
844};
845
846/*
847 * Probe for the device
848 */
a26b498c 849static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
850{
851 struct mmc_host *mmc;
852 struct at91mci_host *host;
17ea0595 853 struct resource *res;
65dbf343
AV
854 int ret;
855
17ea0595
AV
856 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
857 if (!res)
858 return -ENXIO;
859
860 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
861 return -EBUSY;
862
65dbf343
AV
863 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
864 if (!mmc) {
6e996ee8
DB
865 ret = -ENOMEM;
866 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
867 goto fail6;
65dbf343
AV
868 }
869
870 mmc->ops = &at91_mci_ops;
871 mmc->f_min = 375000;
872 mmc->f_max = 25000000;
873 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
874
fe4a3c7a 875 mmc->max_blk_size = 4095;
55db890a 876 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 877
65dbf343
AV
878 host = mmc_priv(mmc);
879 host->mmc = mmc;
880 host->buffer = NULL;
881 host->bus_mode = 0;
882 host->board = pdev->dev.platform_data;
883 if (host->board->wire4) {
ed99c541
NF
884 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
885 mmc->caps |= MMC_CAP_4_BIT_DATA;
886 else
6e996ee8 887 dev_warn(&pdev->dev, "4 wire bus mode not supported"
ed99c541 888 " - using 1 wire\n");
65dbf343
AV
889 }
890
6e996ee8
DB
891 /*
892 * Reserve GPIOs ... board init code makes sure these pins are set
893 * up as GPIOs with the right direction (input, except for vcc)
894 */
895 if (host->board->det_pin) {
896 ret = gpio_request(host->board->det_pin, "mmc_detect");
897 if (ret < 0) {
898 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
899 goto fail5;
900 }
901 }
902 if (host->board->wp_pin) {
903 ret = gpio_request(host->board->wp_pin, "mmc_wp");
904 if (ret < 0) {
905 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
906 goto fail4;
907 }
908 }
909 if (host->board->vcc_pin) {
910 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
911 if (ret < 0) {
912 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
913 goto fail3;
914 }
915 }
916
65dbf343
AV
917 /*
918 * Get Clock
919 */
3dd3b039
AV
920 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
921 if (IS_ERR(host->mci_clk)) {
6e996ee8
DB
922 ret = -ENODEV;
923 dev_dbg(&pdev->dev, "no mci_clk?\n");
924 goto fail2;
65dbf343 925 }
65dbf343 926
17ea0595
AV
927 /*
928 * Map I/O region
929 */
930 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
931 if (!host->baseaddr) {
6e996ee8
DB
932 ret = -ENOMEM;
933 goto fail1;
17ea0595 934 }
e0b19b83
AV
935
936 /*
937 * Reset hardware
938 */
3dd3b039 939 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
940 at91_mci_disable(host);
941 at91_mci_enable(host);
942
65dbf343
AV
943 /*
944 * Allocate the MCI interrupt
945 */
17ea0595 946 host->irq = platform_get_irq(pdev, 0);
6e996ee8
DB
947 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
948 mmc_hostname(mmc), host);
65dbf343 949 if (ret) {
6e996ee8
DB
950 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
951 goto fail0;
65dbf343
AV
952 }
953
954 platform_set_drvdata(pdev, mmc);
955
956 /*
957 * Add host to MMC layer
958 */
63b66438 959 if (host->board->det_pin) {
6e996ee8 960 host->present = !gpio_get_value(host->board->det_pin);
63b66438 961 }
65dbf343
AV
962 else
963 host->present = -1;
964
965 mmc_add_host(mmc);
966
e181dce8
MP
967 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
968
65dbf343
AV
969 /*
970 * monitor card insertion/removal if we can
971 */
972 if (host->board->det_pin) {
6e996ee8
DB
973 ret = request_irq(gpio_to_irq(host->board->det_pin),
974 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
65dbf343 975 if (ret)
6e996ee8
DB
976 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
977 else
978 device_init_wakeup(&pdev->dev, 1);
65dbf343
AV
979 }
980
f3a8efa9 981 pr_debug("Added MCI driver\n");
65dbf343
AV
982
983 return 0;
6e996ee8
DB
984
985fail0:
986 clk_disable(host->mci_clk);
987 iounmap(host->baseaddr);
988fail1:
989 clk_put(host->mci_clk);
990fail2:
991 if (host->board->vcc_pin)
992 gpio_free(host->board->vcc_pin);
993fail3:
994 if (host->board->wp_pin)
995 gpio_free(host->board->wp_pin);
996fail4:
997 if (host->board->det_pin)
998 gpio_free(host->board->det_pin);
999fail5:
1000 mmc_free_host(mmc);
1001fail6:
1002 release_mem_region(res->start, res->end - res->start + 1);
1003 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1004 return ret;
65dbf343
AV
1005}
1006
1007/*
1008 * Remove a device
1009 */
a26b498c 1010static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
1011{
1012 struct mmc_host *mmc = platform_get_drvdata(pdev);
1013 struct at91mci_host *host;
17ea0595 1014 struct resource *res;
65dbf343
AV
1015
1016 if (!mmc)
1017 return -1;
1018
1019 host = mmc_priv(mmc);
1020
e0cda54e 1021 if (host->board->det_pin) {
6e996ee8
DB
1022 if (device_can_wakeup(&pdev->dev))
1023 free_irq(gpio_to_irq(host->board->det_pin), host);
63b66438 1024 device_init_wakeup(&pdev->dev, 0);
6e996ee8 1025 gpio_free(host->board->det_pin);
65dbf343
AV
1026 }
1027
e0b19b83 1028 at91_mci_disable(host);
e181dce8 1029 del_timer_sync(&host->timer);
17ea0595
AV
1030 mmc_remove_host(mmc);
1031 free_irq(host->irq, host);
65dbf343 1032
3dd3b039
AV
1033 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1034 clk_put(host->mci_clk);
65dbf343 1035
6e996ee8
DB
1036 if (host->board->vcc_pin)
1037 gpio_free(host->board->vcc_pin);
1038 if (host->board->wp_pin)
1039 gpio_free(host->board->wp_pin);
1040
17ea0595
AV
1041 iounmap(host->baseaddr);
1042 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1043 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 1044
17ea0595
AV
1045 mmc_free_host(mmc);
1046 platform_set_drvdata(pdev, NULL);
b44fb7a0 1047 pr_debug("MCI Removed\n");
65dbf343
AV
1048
1049 return 0;
1050}
1051
1052#ifdef CONFIG_PM
1053static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1054{
1055 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1056 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1057 int ret = 0;
1058
e0cda54e 1059 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1060 enable_irq_wake(host->board->det_pin);
1061
65dbf343
AV
1062 if (mmc)
1063 ret = mmc_suspend_host(mmc, state);
1064
1065 return ret;
1066}
1067
1068static int at91_mci_resume(struct platform_device *pdev)
1069{
1070 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1071 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1072 int ret = 0;
1073
e0cda54e 1074 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1075 disable_irq_wake(host->board->det_pin);
1076
65dbf343
AV
1077 if (mmc)
1078 ret = mmc_resume_host(mmc);
1079
1080 return ret;
1081}
1082#else
1083#define at91_mci_suspend NULL
1084#define at91_mci_resume NULL
1085#endif
1086
1087static struct platform_driver at91_mci_driver = {
a26b498c 1088 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1089 .suspend = at91_mci_suspend,
1090 .resume = at91_mci_resume,
1091 .driver = {
1092 .name = DRIVER_NAME,
1093 .owner = THIS_MODULE,
1094 },
1095};
1096
1097static int __init at91_mci_init(void)
1098{
a26b498c 1099 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1100}
1101
1102static void __exit at91_mci_exit(void)
1103{
1104 platform_driver_unregister(&at91_mci_driver);
1105}
1106
1107module_init(at91_mci_init);
1108module_exit(at91_mci_exit);
1109
1110MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1111MODULE_AUTHOR("Nick Randell");
1112MODULE_LICENSE("GPL");
bc65c724 1113MODULE_ALIAS("platform:at91_mci");