[ARM] Move include/asm-arm/arch-* to arch/arm/*/include/mach
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/gpio.h>
74
75 #include <asm/mach/mmc.h>
76 #include <mach/board.h>
77 #include <mach/cpu.h>
78 #include <mach/at91_mci.h>
79
80 #define DRIVER_NAME "at91_mci"
81
82 #define FL_SENT_COMMAND (1 << 0)
83 #define FL_SENT_STOP (1 << 1)
84
85 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
88
89 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
91
92
93 /*
94 * Low level type for this driver
95 */
96 struct at91mci_host
97 {
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
102 void __iomem *baseaddr;
103 int irq;
104
105 struct at91_mmc_data *board;
106 int present;
107
108 struct clk *mci_clk;
109
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
128
129 /* Timer for timeouts */
130 struct timer_list timer;
131 };
132
133 /*
134 * Reset the controller and restore most of the state
135 */
136 static void at91_reset_host(struct at91mci_host *host)
137 {
138 unsigned long flags;
139 u32 mr;
140 u32 sdcr;
141 u32 dtor;
142 u32 imr;
143
144 local_irq_save(flags);
145 imr = at91_mci_read(host, AT91_MCI_IMR);
146
147 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
148
149 /* save current state */
150 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
151 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
152 dtor = at91_mci_read(host, AT91_MCI_DTOR);
153
154 /* reset the controller */
155 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
156
157 /* restore state */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
159 at91_mci_write(host, AT91_MCI_MR, mr);
160 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
161 at91_mci_write(host, AT91_MCI_DTOR, dtor);
162 at91_mci_write(host, AT91_MCI_IER, imr);
163
164 /* make sure sdio interrupts will fire */
165 at91_mci_read(host, AT91_MCI_SR);
166
167 local_irq_restore(flags);
168 }
169
170 static void at91_timeout_timer(unsigned long data)
171 {
172 struct at91mci_host *host;
173
174 host = (struct at91mci_host *)data;
175
176 if (host->request) {
177 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
178
179 if (host->cmd && host->cmd->data) {
180 host->cmd->data->error = -ETIMEDOUT;
181 } else {
182 if (host->cmd)
183 host->cmd->error = -ETIMEDOUT;
184 else
185 host->request->cmd->error = -ETIMEDOUT;
186 }
187
188 at91_reset_host(host);
189 mmc_request_done(host->mmc, host->request);
190 }
191 }
192
193 /*
194 * Copy from sg to a dma block - used for transfers
195 */
196 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
197 {
198 unsigned int len, i, size;
199 unsigned *dmabuf = host->buffer;
200
201 size = data->blksz * data->blocks;
202 len = data->sg_len;
203
204 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
205 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
206 if (host->total_length == 12)
207 memset(dmabuf, 0, 12);
208
209 /*
210 * Just loop through all entries. Size might not
211 * be the entire list though so make sure that
212 * we do not transfer too much.
213 */
214 for (i = 0; i < len; i++) {
215 struct scatterlist *sg;
216 int amount;
217 unsigned int *sgbuffer;
218
219 sg = &data->sg[i];
220
221 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
222 amount = min(size, sg->length);
223 size -= amount;
224
225 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
226 int index;
227
228 for (index = 0; index < (amount / 4); index++)
229 *dmabuf++ = swab32(sgbuffer[index]);
230 } else {
231 memcpy(dmabuf, sgbuffer, amount);
232 dmabuf += amount;
233 }
234
235 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
236
237 if (size == 0)
238 break;
239 }
240
241 /*
242 * Check that we didn't get a request to transfer
243 * more data than can fit into the SG list.
244 */
245 BUG_ON(size != 0);
246 }
247
248 /*
249 * Prepare a dma read
250 */
251 static void at91_mci_pre_dma_read(struct at91mci_host *host)
252 {
253 int i;
254 struct scatterlist *sg;
255 struct mmc_command *cmd;
256 struct mmc_data *data;
257
258 pr_debug("pre dma read\n");
259
260 cmd = host->cmd;
261 if (!cmd) {
262 pr_debug("no command\n");
263 return;
264 }
265
266 data = cmd->data;
267 if (!data) {
268 pr_debug("no data\n");
269 return;
270 }
271
272 for (i = 0; i < 2; i++) {
273 /* nothing left to transfer */
274 if (host->transfer_index >= data->sg_len) {
275 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
276 break;
277 }
278
279 /* Check to see if this needs filling */
280 if (i == 0) {
281 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
282 pr_debug("Transfer active in current\n");
283 continue;
284 }
285 }
286 else {
287 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
288 pr_debug("Transfer active in next\n");
289 continue;
290 }
291 }
292
293 /* Setup the next transfer */
294 pr_debug("Using transfer index %d\n", host->transfer_index);
295
296 sg = &data->sg[host->transfer_index++];
297 pr_debug("sg = %p\n", sg);
298
299 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
300
301 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
302
303 if (i == 0) {
304 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
305 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
306 }
307 else {
308 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
309 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
310 }
311 }
312
313 pr_debug("pre dma read done\n");
314 }
315
316 /*
317 * Handle after a dma read
318 */
319 static void at91_mci_post_dma_read(struct at91mci_host *host)
320 {
321 struct mmc_command *cmd;
322 struct mmc_data *data;
323
324 pr_debug("post dma read\n");
325
326 cmd = host->cmd;
327 if (!cmd) {
328 pr_debug("no command\n");
329 return;
330 }
331
332 data = cmd->data;
333 if (!data) {
334 pr_debug("no data\n");
335 return;
336 }
337
338 while (host->in_use_index < host->transfer_index) {
339 struct scatterlist *sg;
340
341 pr_debug("finishing index %d\n", host->in_use_index);
342
343 sg = &data->sg[host->in_use_index++];
344
345 pr_debug("Unmapping page %08X\n", sg->dma_address);
346
347 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
348
349 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
350 unsigned int *buffer;
351 int index;
352
353 /* Swap the contents of the buffer */
354 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
355 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
356
357 for (index = 0; index < (sg->length / 4); index++)
358 buffer[index] = swab32(buffer[index]);
359
360 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
361 }
362
363 flush_dcache_page(sg_page(sg));
364
365 data->bytes_xfered += sg->length;
366 }
367
368 /* Is there another transfer to trigger? */
369 if (host->transfer_index < data->sg_len)
370 at91_mci_pre_dma_read(host);
371 else {
372 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
373 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
374 }
375
376 pr_debug("post dma read done\n");
377 }
378
379 /*
380 * Handle transmitted data
381 */
382 static void at91_mci_handle_transmitted(struct at91mci_host *host)
383 {
384 struct mmc_command *cmd;
385 struct mmc_data *data;
386
387 pr_debug("Handling the transmit\n");
388
389 /* Disable the transfer */
390 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
391
392 /* Now wait for cmd ready */
393 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
394
395 cmd = host->cmd;
396 if (!cmd) return;
397
398 data = cmd->data;
399 if (!data) return;
400
401 if (cmd->data->blocks > 1) {
402 pr_debug("multiple write : wait for BLKE...\n");
403 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
404 } else
405 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
406 }
407
408 /*
409 * Update bytes tranfered count during a write operation
410 */
411 static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
412 {
413 struct mmc_data *data;
414
415 /* always deal with the effective request (and not the current cmd) */
416
417 if (host->request->cmd && host->request->cmd->error != 0)
418 return;
419
420 if (host->request->data) {
421 data = host->request->data;
422 if (data->flags & MMC_DATA_WRITE) {
423 /* card is in IDLE mode now */
424 pr_debug("-> bytes_xfered %d, total_length = %d\n",
425 data->bytes_xfered, host->total_length);
426 data->bytes_xfered = data->blksz * data->blocks;
427 }
428 }
429 }
430
431
432 /*Handle after command sent ready*/
433 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
434 {
435 if (!host->cmd)
436 return 1;
437 else if (!host->cmd->data) {
438 if (host->flags & FL_SENT_STOP) {
439 /*After multi block write, we must wait for NOTBUSY*/
440 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
441 } else return 1;
442 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
443 /*After sendding multi-block-write command, start DMA transfer*/
444 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
445 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
446 }
447
448 /* command not completed, have to wait */
449 return 0;
450 }
451
452
453 /*
454 * Enable the controller
455 */
456 static void at91_mci_enable(struct at91mci_host *host)
457 {
458 unsigned int mr;
459
460 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
461 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
462 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
463 mr = AT91_MCI_PDCMODE | 0x34a;
464
465 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
466 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
467
468 at91_mci_write(host, AT91_MCI_MR, mr);
469
470 /* use Slot A or B (only one at same time) */
471 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
472 }
473
474 /*
475 * Disable the controller
476 */
477 static void at91_mci_disable(struct at91mci_host *host)
478 {
479 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
480 }
481
482 /*
483 * Send a command
484 */
485 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
486 {
487 unsigned int cmdr, mr;
488 unsigned int block_length;
489 struct mmc_data *data = cmd->data;
490
491 unsigned int blocks;
492 unsigned int ier = 0;
493
494 host->cmd = cmd;
495
496 /* Needed for leaving busy state before CMD1 */
497 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
498 pr_debug("Clearing timeout\n");
499 at91_mci_write(host, AT91_MCI_ARGR, 0);
500 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
501 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
502 /* spin */
503 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
504 }
505 }
506
507 cmdr = cmd->opcode;
508
509 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
510 cmdr |= AT91_MCI_RSPTYP_NONE;
511 else {
512 /* if a response is expected then allow maximum response latancy */
513 cmdr |= AT91_MCI_MAXLAT;
514 /* set 136 bit response for R2, 48 bit response otherwise */
515 if (mmc_resp_type(cmd) == MMC_RSP_R2)
516 cmdr |= AT91_MCI_RSPTYP_136;
517 else
518 cmdr |= AT91_MCI_RSPTYP_48;
519 }
520
521 if (data) {
522
523 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
524 if (data->blksz & 0x3) {
525 pr_debug("Unsupported block size\n");
526 cmd->error = -EINVAL;
527 mmc_request_done(host->mmc, host->request);
528 return;
529 }
530 if (data->flags & MMC_DATA_STREAM) {
531 pr_debug("Stream commands not supported\n");
532 cmd->error = -EINVAL;
533 mmc_request_done(host->mmc, host->request);
534 return;
535 }
536 }
537
538 block_length = data->blksz;
539 blocks = data->blocks;
540
541 /* always set data start - also set direction flag for read */
542 if (data->flags & MMC_DATA_READ)
543 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
544 else if (data->flags & MMC_DATA_WRITE)
545 cmdr |= AT91_MCI_TRCMD_START;
546
547 if (data->flags & MMC_DATA_STREAM)
548 cmdr |= AT91_MCI_TRTYP_STREAM;
549 if (data->blocks > 1)
550 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
551 }
552 else {
553 block_length = 0;
554 blocks = 0;
555 }
556
557 if (host->flags & FL_SENT_STOP)
558 cmdr |= AT91_MCI_TRCMD_STOP;
559
560 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
561 cmdr |= AT91_MCI_OPDCMD;
562
563 /*
564 * Set the arguments and send the command
565 */
566 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
567 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
568
569 if (!data) {
570 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
571 at91_mci_write(host, ATMEL_PDC_RPR, 0);
572 at91_mci_write(host, ATMEL_PDC_RCR, 0);
573 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
574 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
575 at91_mci_write(host, ATMEL_PDC_TPR, 0);
576 at91_mci_write(host, ATMEL_PDC_TCR, 0);
577 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
578 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
579 ier = AT91_MCI_CMDRDY;
580 } else {
581 /* zero block length and PDC mode */
582 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
583 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
584 mr |= (block_length << 16);
585 mr |= AT91_MCI_PDCMODE;
586 at91_mci_write(host, AT91_MCI_MR, mr);
587
588 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
589 at91_mci_write(host, AT91_MCI_BLKR,
590 AT91_MCI_BLKR_BCNT(blocks) |
591 AT91_MCI_BLKR_BLKLEN(block_length));
592
593 /*
594 * Disable the PDC controller
595 */
596 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
597
598 if (cmdr & AT91_MCI_TRCMD_START) {
599 data->bytes_xfered = 0;
600 host->transfer_index = 0;
601 host->in_use_index = 0;
602 if (cmdr & AT91_MCI_TRDIR) {
603 /*
604 * Handle a read
605 */
606 host->buffer = NULL;
607 host->total_length = 0;
608
609 at91_mci_pre_dma_read(host);
610 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
611 }
612 else {
613 /*
614 * Handle a write
615 */
616 host->total_length = block_length * blocks;
617 /*
618 * AT91SAM926[0/3] Data Write Operation and
619 * number of bytes erratum
620 */
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12)
623 host->total_length = 12;
624 host->buffer = dma_alloc_coherent(NULL,
625 host->total_length,
626 &host->physical_address, GFP_KERNEL);
627
628 at91_mci_sg_to_dma(host, data);
629
630 pr_debug("Transmitting %d bytes\n", host->total_length);
631
632 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
633 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
634 host->total_length : host->total_length / 4);
635
636 ier = AT91_MCI_CMDRDY;
637 }
638 }
639 }
640
641 /*
642 * Send the command and then enable the PDC - not the other way round as
643 * the data sheet says
644 */
645
646 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
647 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
648
649 if (cmdr & AT91_MCI_TRCMD_START) {
650 if (cmdr & AT91_MCI_TRDIR)
651 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
652 }
653
654 /* Enable selected interrupts */
655 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
656 }
657
658 /*
659 * Process the next step in the request
660 */
661 static void at91_mci_process_next(struct at91mci_host *host)
662 {
663 if (!(host->flags & FL_SENT_COMMAND)) {
664 host->flags |= FL_SENT_COMMAND;
665 at91_mci_send_command(host, host->request->cmd);
666 }
667 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
668 host->flags |= FL_SENT_STOP;
669 at91_mci_send_command(host, host->request->stop);
670 } else {
671 del_timer(&host->timer);
672 /* the at91rm9200 mci controller hangs after some transfers,
673 * and the workaround is to reset it after each transfer.
674 */
675 if (cpu_is_at91rm9200())
676 at91_reset_host(host);
677 mmc_request_done(host->mmc, host->request);
678 }
679 }
680
681 /*
682 * Handle a command that has been completed
683 */
684 static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
685 {
686 struct mmc_command *cmd = host->cmd;
687 struct mmc_data *data = cmd->data;
688
689 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
690
691 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
692 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
693 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
694 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
695
696 if (host->buffer) {
697 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
698 host->buffer = NULL;
699 }
700
701 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
702 status, at91_mci_read(host, AT91_MCI_SR),
703 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
704
705 if (status & AT91_MCI_ERRORS) {
706 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
707 cmd->error = 0;
708 }
709 else {
710 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
711 if (data) {
712 if (status & AT91_MCI_DTOE)
713 data->error = -ETIMEDOUT;
714 else if (status & AT91_MCI_DCRCE)
715 data->error = -EILSEQ;
716 }
717 } else {
718 if (status & AT91_MCI_RTOE)
719 cmd->error = -ETIMEDOUT;
720 else if (status & AT91_MCI_RCRCE)
721 cmd->error = -EILSEQ;
722 else
723 cmd->error = -EIO;
724 }
725
726 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
727 cmd->error, data ? data->error : 0,
728 cmd->opcode, cmd->retries);
729 }
730 }
731 else
732 cmd->error = 0;
733
734 at91_mci_process_next(host);
735 }
736
737 /*
738 * Handle an MMC request
739 */
740 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
741 {
742 struct at91mci_host *host = mmc_priv(mmc);
743 host->request = mrq;
744 host->flags = 0;
745
746 mod_timer(&host->timer, jiffies + HZ);
747
748 at91_mci_process_next(host);
749 }
750
751 /*
752 * Set the IOS
753 */
754 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
755 {
756 int clkdiv;
757 struct at91mci_host *host = mmc_priv(mmc);
758 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
759
760 host->bus_mode = ios->bus_mode;
761
762 if (ios->clock == 0) {
763 /* Disable the MCI controller */
764 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
765 clkdiv = 0;
766 }
767 else {
768 /* Enable the MCI controller */
769 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
770
771 if ((at91_master_clock % (ios->clock * 2)) == 0)
772 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
773 else
774 clkdiv = (at91_master_clock / ios->clock) / 2;
775
776 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
777 at91_master_clock / (2 * (clkdiv + 1)));
778 }
779 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
780 pr_debug("MMC: Setting controller bus width to 4\n");
781 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
782 }
783 else {
784 pr_debug("MMC: Setting controller bus width to 1\n");
785 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
786 }
787
788 /* Set the clock divider */
789 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
790
791 /* maybe switch power to the card */
792 if (host->board->vcc_pin) {
793 switch (ios->power_mode) {
794 case MMC_POWER_OFF:
795 gpio_set_value(host->board->vcc_pin, 0);
796 break;
797 case MMC_POWER_UP:
798 gpio_set_value(host->board->vcc_pin, 1);
799 break;
800 case MMC_POWER_ON:
801 break;
802 default:
803 WARN_ON(1);
804 }
805 }
806 }
807
808 /*
809 * Handle an interrupt
810 */
811 static irqreturn_t at91_mci_irq(int irq, void *devid)
812 {
813 struct at91mci_host *host = devid;
814 int completed = 0;
815 unsigned int int_status, int_mask;
816
817 int_status = at91_mci_read(host, AT91_MCI_SR);
818 int_mask = at91_mci_read(host, AT91_MCI_IMR);
819
820 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
821 int_status & int_mask);
822
823 int_status = int_status & int_mask;
824
825 if (int_status & AT91_MCI_ERRORS) {
826 completed = 1;
827
828 if (int_status & AT91_MCI_UNRE)
829 pr_debug("MMC: Underrun error\n");
830 if (int_status & AT91_MCI_OVRE)
831 pr_debug("MMC: Overrun error\n");
832 if (int_status & AT91_MCI_DTOE)
833 pr_debug("MMC: Data timeout\n");
834 if (int_status & AT91_MCI_DCRCE)
835 pr_debug("MMC: CRC error in data\n");
836 if (int_status & AT91_MCI_RTOE)
837 pr_debug("MMC: Response timeout\n");
838 if (int_status & AT91_MCI_RENDE)
839 pr_debug("MMC: Response end bit error\n");
840 if (int_status & AT91_MCI_RCRCE)
841 pr_debug("MMC: Response CRC error\n");
842 if (int_status & AT91_MCI_RDIRE)
843 pr_debug("MMC: Response direction error\n");
844 if (int_status & AT91_MCI_RINDE)
845 pr_debug("MMC: Response index error\n");
846 } else {
847 /* Only continue processing if no errors */
848
849 if (int_status & AT91_MCI_TXBUFE) {
850 pr_debug("TX buffer empty\n");
851 at91_mci_handle_transmitted(host);
852 }
853
854 if (int_status & AT91_MCI_ENDRX) {
855 pr_debug("ENDRX\n");
856 at91_mci_post_dma_read(host);
857 }
858
859 if (int_status & AT91_MCI_RXBUFF) {
860 pr_debug("RX buffer full\n");
861 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
862 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
863 completed = 1;
864 }
865
866 if (int_status & AT91_MCI_ENDTX)
867 pr_debug("Transmit has ended\n");
868
869 if (int_status & AT91_MCI_NOTBUSY) {
870 pr_debug("Card is ready\n");
871 at91_mci_update_bytes_xfered(host);
872 completed = 1;
873 }
874
875 if (int_status & AT91_MCI_DTIP)
876 pr_debug("Data transfer in progress\n");
877
878 if (int_status & AT91_MCI_BLKE) {
879 pr_debug("Block transfer has ended\n");
880 if (host->request->data && host->request->data->blocks > 1) {
881 /* multi block write : complete multi write
882 * command and send stop */
883 completed = 1;
884 } else {
885 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
886 }
887 }
888
889 if (int_status & AT91_MCI_SDIOIRQA)
890 mmc_signal_sdio_irq(host->mmc);
891
892 if (int_status & AT91_MCI_SDIOIRQB)
893 mmc_signal_sdio_irq(host->mmc);
894
895 if (int_status & AT91_MCI_TXRDY)
896 pr_debug("Ready to transmit\n");
897
898 if (int_status & AT91_MCI_RXRDY)
899 pr_debug("Ready to receive\n");
900
901 if (int_status & AT91_MCI_CMDRDY) {
902 pr_debug("Command ready\n");
903 completed = at91_mci_handle_cmdrdy(host);
904 }
905 }
906
907 if (completed) {
908 pr_debug("Completed command\n");
909 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
910 at91_mci_completed_command(host, int_status);
911 } else
912 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
913
914 return IRQ_HANDLED;
915 }
916
917 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
918 {
919 struct at91mci_host *host = _host;
920 int present = !gpio_get_value(irq_to_gpio(irq));
921
922 /*
923 * we expect this irq on both insert and remove,
924 * and use a short delay to debounce.
925 */
926 if (present != host->present) {
927 host->present = present;
928 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
929 present ? "insert" : "remove");
930 if (!present) {
931 pr_debug("****** Resetting SD-card bus width ******\n");
932 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
933 }
934 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
935 }
936 return IRQ_HANDLED;
937 }
938
939 static int at91_mci_get_ro(struct mmc_host *mmc)
940 {
941 struct at91mci_host *host = mmc_priv(mmc);
942
943 if (host->board->wp_pin)
944 return !!gpio_get_value(host->board->wp_pin);
945 /*
946 * Board doesn't support read only detection; let the mmc core
947 * decide what to do.
948 */
949 return -ENOSYS;
950 }
951
952 static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
953 {
954 struct at91mci_host *host = mmc_priv(mmc);
955
956 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
957 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
958 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
959 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
960
961 }
962
963 static const struct mmc_host_ops at91_mci_ops = {
964 .request = at91_mci_request,
965 .set_ios = at91_mci_set_ios,
966 .get_ro = at91_mci_get_ro,
967 .enable_sdio_irq = at91_mci_enable_sdio_irq,
968 };
969
970 /*
971 * Probe for the device
972 */
973 static int __init at91_mci_probe(struct platform_device *pdev)
974 {
975 struct mmc_host *mmc;
976 struct at91mci_host *host;
977 struct resource *res;
978 int ret;
979
980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 if (!res)
982 return -ENXIO;
983
984 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
985 return -EBUSY;
986
987 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
988 if (!mmc) {
989 ret = -ENOMEM;
990 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
991 goto fail6;
992 }
993
994 mmc->ops = &at91_mci_ops;
995 mmc->f_min = 375000;
996 mmc->f_max = 25000000;
997 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
998 mmc->caps = MMC_CAP_SDIO_IRQ;
999
1000 mmc->max_blk_size = 4095;
1001 mmc->max_blk_count = mmc->max_req_size;
1002
1003 host = mmc_priv(mmc);
1004 host->mmc = mmc;
1005 host->buffer = NULL;
1006 host->bus_mode = 0;
1007 host->board = pdev->dev.platform_data;
1008 if (host->board->wire4) {
1009 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
1010 mmc->caps |= MMC_CAP_4_BIT_DATA;
1011 else
1012 dev_warn(&pdev->dev, "4 wire bus mode not supported"
1013 " - using 1 wire\n");
1014 }
1015
1016 /*
1017 * Reserve GPIOs ... board init code makes sure these pins are set
1018 * up as GPIOs with the right direction (input, except for vcc)
1019 */
1020 if (host->board->det_pin) {
1021 ret = gpio_request(host->board->det_pin, "mmc_detect");
1022 if (ret < 0) {
1023 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
1024 goto fail5;
1025 }
1026 }
1027 if (host->board->wp_pin) {
1028 ret = gpio_request(host->board->wp_pin, "mmc_wp");
1029 if (ret < 0) {
1030 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
1031 goto fail4;
1032 }
1033 }
1034 if (host->board->vcc_pin) {
1035 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
1036 if (ret < 0) {
1037 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
1038 goto fail3;
1039 }
1040 }
1041
1042 /*
1043 * Get Clock
1044 */
1045 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
1046 if (IS_ERR(host->mci_clk)) {
1047 ret = -ENODEV;
1048 dev_dbg(&pdev->dev, "no mci_clk?\n");
1049 goto fail2;
1050 }
1051
1052 /*
1053 * Map I/O region
1054 */
1055 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
1056 if (!host->baseaddr) {
1057 ret = -ENOMEM;
1058 goto fail1;
1059 }
1060
1061 /*
1062 * Reset hardware
1063 */
1064 clk_enable(host->mci_clk); /* Enable the peripheral clock */
1065 at91_mci_disable(host);
1066 at91_mci_enable(host);
1067
1068 /*
1069 * Allocate the MCI interrupt
1070 */
1071 host->irq = platform_get_irq(pdev, 0);
1072 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
1073 mmc_hostname(mmc), host);
1074 if (ret) {
1075 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
1076 goto fail0;
1077 }
1078
1079 platform_set_drvdata(pdev, mmc);
1080
1081 /*
1082 * Add host to MMC layer
1083 */
1084 if (host->board->det_pin) {
1085 host->present = !gpio_get_value(host->board->det_pin);
1086 }
1087 else
1088 host->present = -1;
1089
1090 mmc_add_host(mmc);
1091
1092 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1093
1094 /*
1095 * monitor card insertion/removal if we can
1096 */
1097 if (host->board->det_pin) {
1098 ret = request_irq(gpio_to_irq(host->board->det_pin),
1099 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
1100 if (ret)
1101 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
1102 else
1103 device_init_wakeup(&pdev->dev, 1);
1104 }
1105
1106 pr_debug("Added MCI driver\n");
1107
1108 return 0;
1109
1110 fail0:
1111 clk_disable(host->mci_clk);
1112 iounmap(host->baseaddr);
1113 fail1:
1114 clk_put(host->mci_clk);
1115 fail2:
1116 if (host->board->vcc_pin)
1117 gpio_free(host->board->vcc_pin);
1118 fail3:
1119 if (host->board->wp_pin)
1120 gpio_free(host->board->wp_pin);
1121 fail4:
1122 if (host->board->det_pin)
1123 gpio_free(host->board->det_pin);
1124 fail5:
1125 mmc_free_host(mmc);
1126 fail6:
1127 release_mem_region(res->start, res->end - res->start + 1);
1128 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1129 return ret;
1130 }
1131
1132 /*
1133 * Remove a device
1134 */
1135 static int __exit at91_mci_remove(struct platform_device *pdev)
1136 {
1137 struct mmc_host *mmc = platform_get_drvdata(pdev);
1138 struct at91mci_host *host;
1139 struct resource *res;
1140
1141 if (!mmc)
1142 return -1;
1143
1144 host = mmc_priv(mmc);
1145
1146 if (host->board->det_pin) {
1147 if (device_can_wakeup(&pdev->dev))
1148 free_irq(gpio_to_irq(host->board->det_pin), host);
1149 device_init_wakeup(&pdev->dev, 0);
1150 gpio_free(host->board->det_pin);
1151 }
1152
1153 at91_mci_disable(host);
1154 del_timer_sync(&host->timer);
1155 mmc_remove_host(mmc);
1156 free_irq(host->irq, host);
1157
1158 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1159 clk_put(host->mci_clk);
1160
1161 if (host->board->vcc_pin)
1162 gpio_free(host->board->vcc_pin);
1163 if (host->board->wp_pin)
1164 gpio_free(host->board->wp_pin);
1165
1166 iounmap(host->baseaddr);
1167 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1168 release_mem_region(res->start, res->end - res->start + 1);
1169
1170 mmc_free_host(mmc);
1171 platform_set_drvdata(pdev, NULL);
1172 pr_debug("MCI Removed\n");
1173
1174 return 0;
1175 }
1176
1177 #ifdef CONFIG_PM
1178 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1179 {
1180 struct mmc_host *mmc = platform_get_drvdata(pdev);
1181 struct at91mci_host *host = mmc_priv(mmc);
1182 int ret = 0;
1183
1184 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1185 enable_irq_wake(host->board->det_pin);
1186
1187 if (mmc)
1188 ret = mmc_suspend_host(mmc, state);
1189
1190 return ret;
1191 }
1192
1193 static int at91_mci_resume(struct platform_device *pdev)
1194 {
1195 struct mmc_host *mmc = platform_get_drvdata(pdev);
1196 struct at91mci_host *host = mmc_priv(mmc);
1197 int ret = 0;
1198
1199 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1200 disable_irq_wake(host->board->det_pin);
1201
1202 if (mmc)
1203 ret = mmc_resume_host(mmc);
1204
1205 return ret;
1206 }
1207 #else
1208 #define at91_mci_suspend NULL
1209 #define at91_mci_resume NULL
1210 #endif
1211
1212 static struct platform_driver at91_mci_driver = {
1213 .remove = __exit_p(at91_mci_remove),
1214 .suspend = at91_mci_suspend,
1215 .resume = at91_mci_resume,
1216 .driver = {
1217 .name = DRIVER_NAME,
1218 .owner = THIS_MODULE,
1219 },
1220 };
1221
1222 static int __init at91_mci_init(void)
1223 {
1224 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1225 }
1226
1227 static void __exit at91_mci_exit(void)
1228 {
1229 platform_driver_unregister(&at91_mci_driver);
1230 }
1231
1232 module_init(at91_mci_init);
1233 module_exit(at91_mci_exit);
1234
1235 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1236 MODULE_AUTHOR("Nick Randell");
1237 MODULE_LICENSE("GPL");
1238 MODULE_ALIAS("platform:at91_mci");