Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
1 /*
2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 /*
14 This is the AT91 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54 */
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/ioport.h>
60 #include <linux/platform_device.h>
61 #include <linux/interrupt.h>
62 #include <linux/blkdev.h>
63 #include <linux/delay.h>
64 #include <linux/err.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/clk.h>
67 #include <linux/atmel_pdc.h>
68
69 #include <linux/mmc/host.h>
70
71 #include <asm/io.h>
72 #include <asm/irq.h>
73 #include <asm/gpio.h>
74
75 #include <asm/mach/mmc.h>
76 #include <mach/board.h>
77 #include <mach/cpu.h>
78 #include <mach/at91_mci.h>
79
80 #define DRIVER_NAME "at91_mci"
81
82 #define FL_SENT_COMMAND (1 << 0)
83 #define FL_SENT_STOP (1 << 1)
84
85 #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
88
89 #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90 #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
91
92
93 /*
94 * Low level type for this driver
95 */
96 struct at91mci_host
97 {
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
102 void __iomem *baseaddr;
103 int irq;
104
105 struct at91_mmc_data *board;
106 int present;
107
108 struct clk *mci_clk;
109
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
128
129 /* Timer for timeouts */
130 struct timer_list timer;
131 };
132
133 /*
134 * Reset the controller and restore most of the state
135 */
136 static void at91_reset_host(struct at91mci_host *host)
137 {
138 unsigned long flags;
139 u32 mr;
140 u32 sdcr;
141 u32 dtor;
142 u32 imr;
143
144 local_irq_save(flags);
145 imr = at91_mci_read(host, AT91_MCI_IMR);
146
147 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
148
149 /* save current state */
150 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
151 sdcr = at91_mci_read(host, AT91_MCI_SDCR);
152 dtor = at91_mci_read(host, AT91_MCI_DTOR);
153
154 /* reset the controller */
155 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
156
157 /* restore state */
158 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
159 at91_mci_write(host, AT91_MCI_MR, mr);
160 at91_mci_write(host, AT91_MCI_SDCR, sdcr);
161 at91_mci_write(host, AT91_MCI_DTOR, dtor);
162 at91_mci_write(host, AT91_MCI_IER, imr);
163
164 /* make sure sdio interrupts will fire */
165 at91_mci_read(host, AT91_MCI_SR);
166
167 local_irq_restore(flags);
168 }
169
170 static void at91_timeout_timer(unsigned long data)
171 {
172 struct at91mci_host *host;
173
174 host = (struct at91mci_host *)data;
175
176 if (host->request) {
177 dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
178
179 if (host->cmd && host->cmd->data) {
180 host->cmd->data->error = -ETIMEDOUT;
181 } else {
182 if (host->cmd)
183 host->cmd->error = -ETIMEDOUT;
184 else
185 host->request->cmd->error = -ETIMEDOUT;
186 }
187
188 at91_reset_host(host);
189 mmc_request_done(host->mmc, host->request);
190 }
191 }
192
193 /*
194 * Copy from sg to a dma block - used for transfers
195 */
196 static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
197 {
198 unsigned int len, i, size;
199 unsigned *dmabuf = host->buffer;
200
201 size = data->blksz * data->blocks;
202 len = data->sg_len;
203
204 /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
205 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
206 if (host->total_length == 12)
207 memset(dmabuf, 0, 12);
208
209 /*
210 * Just loop through all entries. Size might not
211 * be the entire list though so make sure that
212 * we do not transfer too much.
213 */
214 for (i = 0; i < len; i++) {
215 struct scatterlist *sg;
216 int amount;
217 unsigned int *sgbuffer;
218
219 sg = &data->sg[i];
220
221 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
222 amount = min(size, sg->length);
223 size -= amount;
224
225 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
226 int index;
227
228 for (index = 0; index < (amount / 4); index++)
229 *dmabuf++ = swab32(sgbuffer[index]);
230 } else {
231 memcpy(dmabuf, sgbuffer, amount);
232 dmabuf += amount;
233 }
234
235 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
236
237 if (size == 0)
238 break;
239 }
240
241 /*
242 * Check that we didn't get a request to transfer
243 * more data than can fit into the SG list.
244 */
245 BUG_ON(size != 0);
246 }
247
248 /*
249 * Prepare a dma read
250 */
251 static void at91_mci_pre_dma_read(struct at91mci_host *host)
252 {
253 int i;
254 struct scatterlist *sg;
255 struct mmc_command *cmd;
256 struct mmc_data *data;
257
258 pr_debug("pre dma read\n");
259
260 cmd = host->cmd;
261 if (!cmd) {
262 pr_debug("no command\n");
263 return;
264 }
265
266 data = cmd->data;
267 if (!data) {
268 pr_debug("no data\n");
269 return;
270 }
271
272 for (i = 0; i < 2; i++) {
273 /* nothing left to transfer */
274 if (host->transfer_index >= data->sg_len) {
275 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
276 break;
277 }
278
279 /* Check to see if this needs filling */
280 if (i == 0) {
281 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
282 pr_debug("Transfer active in current\n");
283 continue;
284 }
285 }
286 else {
287 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
288 pr_debug("Transfer active in next\n");
289 continue;
290 }
291 }
292
293 /* Setup the next transfer */
294 pr_debug("Using transfer index %d\n", host->transfer_index);
295
296 sg = &data->sg[host->transfer_index++];
297 pr_debug("sg = %p\n", sg);
298
299 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
300
301 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
302
303 if (i == 0) {
304 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
305 at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
306 }
307 else {
308 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
309 at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
310 }
311 }
312
313 pr_debug("pre dma read done\n");
314 }
315
316 /*
317 * Handle after a dma read
318 */
319 static void at91_mci_post_dma_read(struct at91mci_host *host)
320 {
321 struct mmc_command *cmd;
322 struct mmc_data *data;
323
324 pr_debug("post dma read\n");
325
326 cmd = host->cmd;
327 if (!cmd) {
328 pr_debug("no command\n");
329 return;
330 }
331
332 data = cmd->data;
333 if (!data) {
334 pr_debug("no data\n");
335 return;
336 }
337
338 while (host->in_use_index < host->transfer_index) {
339 struct scatterlist *sg;
340
341 pr_debug("finishing index %d\n", host->in_use_index);
342
343 sg = &data->sg[host->in_use_index++];
344
345 pr_debug("Unmapping page %08X\n", sg->dma_address);
346
347 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
348
349 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
350 unsigned int *buffer;
351 int index;
352
353 /* Swap the contents of the buffer */
354 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
355 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
356
357 for (index = 0; index < (sg->length / 4); index++)
358 buffer[index] = swab32(buffer[index]);
359
360 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
361 }
362
363 flush_dcache_page(sg_page(sg));
364
365 data->bytes_xfered += sg->length;
366 }
367
368 /* Is there another transfer to trigger? */
369 if (host->transfer_index < data->sg_len)
370 at91_mci_pre_dma_read(host);
371 else {
372 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
373 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
374 }
375
376 pr_debug("post dma read done\n");
377 }
378
379 /*
380 * Handle transmitted data
381 */
382 static void at91_mci_handle_transmitted(struct at91mci_host *host)
383 {
384 struct mmc_command *cmd;
385 struct mmc_data *data;
386
387 pr_debug("Handling the transmit\n");
388
389 /* Disable the transfer */
390 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
391
392 /* Now wait for cmd ready */
393 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
394
395 cmd = host->cmd;
396 if (!cmd) return;
397
398 data = cmd->data;
399 if (!data) return;
400
401 if (cmd->data->blocks > 1) {
402 pr_debug("multiple write : wait for BLKE...\n");
403 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
404 } else
405 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
406 }
407
408 /*
409 * Update bytes tranfered count during a write operation
410 */
411 static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
412 {
413 struct mmc_data *data;
414
415 /* always deal with the effective request (and not the current cmd) */
416
417 if (host->request->cmd && host->request->cmd->error != 0)
418 return;
419
420 if (host->request->data) {
421 data = host->request->data;
422 if (data->flags & MMC_DATA_WRITE) {
423 /* card is in IDLE mode now */
424 pr_debug("-> bytes_xfered %d, total_length = %d\n",
425 data->bytes_xfered, host->total_length);
426 data->bytes_xfered = data->blksz * data->blocks;
427 }
428 }
429 }
430
431
432 /*Handle after command sent ready*/
433 static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
434 {
435 if (!host->cmd)
436 return 1;
437 else if (!host->cmd->data) {
438 if (host->flags & FL_SENT_STOP) {
439 /*After multi block write, we must wait for NOTBUSY*/
440 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
441 } else return 1;
442 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
443 /*After sendding multi-block-write command, start DMA transfer*/
444 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
445 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
446 }
447
448 /* command not completed, have to wait */
449 return 0;
450 }
451
452
453 /*
454 * Enable the controller
455 */
456 static void at91_mci_enable(struct at91mci_host *host)
457 {
458 unsigned int mr;
459
460 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
461 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
462 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
463 mr = AT91_MCI_PDCMODE | 0x34a;
464
465 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
466 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
467
468 at91_mci_write(host, AT91_MCI_MR, mr);
469
470 /* use Slot A or B (only one at same time) */
471 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
472 }
473
474 /*
475 * Disable the controller
476 */
477 static void at91_mci_disable(struct at91mci_host *host)
478 {
479 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
480 }
481
482 /*
483 * Send a command
484 */
485 static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
486 {
487 unsigned int cmdr, mr;
488 unsigned int block_length;
489 struct mmc_data *data = cmd->data;
490
491 unsigned int blocks;
492 unsigned int ier = 0;
493
494 host->cmd = cmd;
495
496 /* Needed for leaving busy state before CMD1 */
497 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
498 pr_debug("Clearing timeout\n");
499 at91_mci_write(host, AT91_MCI_ARGR, 0);
500 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
501 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
502 /* spin */
503 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
504 }
505 }
506
507 cmdr = cmd->opcode;
508
509 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
510 cmdr |= AT91_MCI_RSPTYP_NONE;
511 else {
512 /* if a response is expected then allow maximum response latancy */
513 cmdr |= AT91_MCI_MAXLAT;
514 /* set 136 bit response for R2, 48 bit response otherwise */
515 if (mmc_resp_type(cmd) == MMC_RSP_R2)
516 cmdr |= AT91_MCI_RSPTYP_136;
517 else
518 cmdr |= AT91_MCI_RSPTYP_48;
519 }
520
521 if (data) {
522
523 if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
524 if (data->blksz & 0x3) {
525 pr_debug("Unsupported block size\n");
526 cmd->error = -EINVAL;
527 mmc_request_done(host->mmc, host->request);
528 return;
529 }
530 if (data->flags & MMC_DATA_STREAM) {
531 pr_debug("Stream commands not supported\n");
532 cmd->error = -EINVAL;
533 mmc_request_done(host->mmc, host->request);
534 return;
535 }
536 }
537
538 block_length = data->blksz;
539 blocks = data->blocks;
540
541 /* always set data start - also set direction flag for read */
542 if (data->flags & MMC_DATA_READ)
543 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
544 else if (data->flags & MMC_DATA_WRITE)
545 cmdr |= AT91_MCI_TRCMD_START;
546
547 if (data->flags & MMC_DATA_STREAM)
548 cmdr |= AT91_MCI_TRTYP_STREAM;
549 if (data->blocks > 1)
550 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
551 }
552 else {
553 block_length = 0;
554 blocks = 0;
555 }
556
557 if (host->flags & FL_SENT_STOP)
558 cmdr |= AT91_MCI_TRCMD_STOP;
559
560 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
561 cmdr |= AT91_MCI_OPDCMD;
562
563 /*
564 * Set the arguments and send the command
565 */
566 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
567 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
568
569 if (!data) {
570 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
571 at91_mci_write(host, ATMEL_PDC_RPR, 0);
572 at91_mci_write(host, ATMEL_PDC_RCR, 0);
573 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
574 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
575 at91_mci_write(host, ATMEL_PDC_TPR, 0);
576 at91_mci_write(host, ATMEL_PDC_TCR, 0);
577 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
578 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
579 ier = AT91_MCI_CMDRDY;
580 } else {
581 /* zero block length and PDC mode */
582 mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
583 mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
584 mr |= (block_length << 16);
585 mr |= AT91_MCI_PDCMODE;
586 at91_mci_write(host, AT91_MCI_MR, mr);
587
588 if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
589 at91_mci_write(host, AT91_MCI_BLKR,
590 AT91_MCI_BLKR_BCNT(blocks) |
591 AT91_MCI_BLKR_BLKLEN(block_length));
592
593 /*
594 * Disable the PDC controller
595 */
596 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
597
598 if (cmdr & AT91_MCI_TRCMD_START) {
599 data->bytes_xfered = 0;
600 host->transfer_index = 0;
601 host->in_use_index = 0;
602 if (cmdr & AT91_MCI_TRDIR) {
603 /*
604 * Handle a read
605 */
606 host->buffer = NULL;
607 host->total_length = 0;
608
609 at91_mci_pre_dma_read(host);
610 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
611 }
612 else {
613 /*
614 * Handle a write
615 */
616 host->total_length = block_length * blocks;
617 /*
618 * AT91SAM926[0/3] Data Write Operation and
619 * number of bytes erratum
620 */
621 if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
622 if (host->total_length < 12)
623 host->total_length = 12;
624
625 host->buffer = kmalloc(host->total_length, GFP_KERNEL);
626 if (!host->buffer) {
627 pr_debug("Can't alloc tx buffer\n");
628 cmd->error = -ENOMEM;
629 mmc_request_done(host->mmc, host->request);
630 return;
631 }
632
633 at91_mci_sg_to_dma(host, data);
634
635 host->physical_address = dma_map_single(NULL,
636 host->buffer, host->total_length,
637 DMA_TO_DEVICE);
638
639 pr_debug("Transmitting %d bytes\n", host->total_length);
640
641 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
642 at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
643 host->total_length : host->total_length / 4);
644
645 ier = AT91_MCI_CMDRDY;
646 }
647 }
648 }
649
650 /*
651 * Send the command and then enable the PDC - not the other way round as
652 * the data sheet says
653 */
654
655 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
656 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
657
658 if (cmdr & AT91_MCI_TRCMD_START) {
659 if (cmdr & AT91_MCI_TRDIR)
660 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
661 }
662
663 /* Enable selected interrupts */
664 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
665 }
666
667 /*
668 * Process the next step in the request
669 */
670 static void at91_mci_process_next(struct at91mci_host *host)
671 {
672 if (!(host->flags & FL_SENT_COMMAND)) {
673 host->flags |= FL_SENT_COMMAND;
674 at91_mci_send_command(host, host->request->cmd);
675 }
676 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
677 host->flags |= FL_SENT_STOP;
678 at91_mci_send_command(host, host->request->stop);
679 } else {
680 del_timer(&host->timer);
681 /* the at91rm9200 mci controller hangs after some transfers,
682 * and the workaround is to reset it after each transfer.
683 */
684 if (cpu_is_at91rm9200())
685 at91_reset_host(host);
686 mmc_request_done(host->mmc, host->request);
687 }
688 }
689
690 /*
691 * Handle a command that has been completed
692 */
693 static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
694 {
695 struct mmc_command *cmd = host->cmd;
696 struct mmc_data *data = cmd->data;
697
698 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
699
700 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
701 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
702 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
703 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
704
705 if (host->buffer) {
706 dma_unmap_single(NULL,
707 host->physical_address, host->total_length,
708 DMA_TO_DEVICE);
709 kfree(host->buffer);
710 host->buffer = NULL;
711 }
712
713 pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
714 status, at91_mci_read(host, AT91_MCI_SR),
715 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
716
717 if (status & AT91_MCI_ERRORS) {
718 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
719 cmd->error = 0;
720 }
721 else {
722 if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
723 if (data) {
724 if (status & AT91_MCI_DTOE)
725 data->error = -ETIMEDOUT;
726 else if (status & AT91_MCI_DCRCE)
727 data->error = -EILSEQ;
728 }
729 } else {
730 if (status & AT91_MCI_RTOE)
731 cmd->error = -ETIMEDOUT;
732 else if (status & AT91_MCI_RCRCE)
733 cmd->error = -EILSEQ;
734 else
735 cmd->error = -EIO;
736 }
737
738 pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
739 cmd->error, data ? data->error : 0,
740 cmd->opcode, cmd->retries);
741 }
742 }
743 else
744 cmd->error = 0;
745
746 at91_mci_process_next(host);
747 }
748
749 /*
750 * Handle an MMC request
751 */
752 static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
753 {
754 struct at91mci_host *host = mmc_priv(mmc);
755 host->request = mrq;
756 host->flags = 0;
757
758 mod_timer(&host->timer, jiffies + HZ);
759
760 at91_mci_process_next(host);
761 }
762
763 /*
764 * Set the IOS
765 */
766 static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
767 {
768 int clkdiv;
769 struct at91mci_host *host = mmc_priv(mmc);
770 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
771
772 host->bus_mode = ios->bus_mode;
773
774 if (ios->clock == 0) {
775 /* Disable the MCI controller */
776 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
777 clkdiv = 0;
778 }
779 else {
780 /* Enable the MCI controller */
781 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
782
783 if ((at91_master_clock % (ios->clock * 2)) == 0)
784 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
785 else
786 clkdiv = (at91_master_clock / ios->clock) / 2;
787
788 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
789 at91_master_clock / (2 * (clkdiv + 1)));
790 }
791 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
792 pr_debug("MMC: Setting controller bus width to 4\n");
793 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
794 }
795 else {
796 pr_debug("MMC: Setting controller bus width to 1\n");
797 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
798 }
799
800 /* Set the clock divider */
801 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
802
803 /* maybe switch power to the card */
804 if (host->board->vcc_pin) {
805 switch (ios->power_mode) {
806 case MMC_POWER_OFF:
807 gpio_set_value(host->board->vcc_pin, 0);
808 break;
809 case MMC_POWER_UP:
810 gpio_set_value(host->board->vcc_pin, 1);
811 break;
812 case MMC_POWER_ON:
813 break;
814 default:
815 WARN_ON(1);
816 }
817 }
818 }
819
820 /*
821 * Handle an interrupt
822 */
823 static irqreturn_t at91_mci_irq(int irq, void *devid)
824 {
825 struct at91mci_host *host = devid;
826 int completed = 0;
827 unsigned int int_status, int_mask;
828
829 int_status = at91_mci_read(host, AT91_MCI_SR);
830 int_mask = at91_mci_read(host, AT91_MCI_IMR);
831
832 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
833 int_status & int_mask);
834
835 int_status = int_status & int_mask;
836
837 if (int_status & AT91_MCI_ERRORS) {
838 completed = 1;
839
840 if (int_status & AT91_MCI_UNRE)
841 pr_debug("MMC: Underrun error\n");
842 if (int_status & AT91_MCI_OVRE)
843 pr_debug("MMC: Overrun error\n");
844 if (int_status & AT91_MCI_DTOE)
845 pr_debug("MMC: Data timeout\n");
846 if (int_status & AT91_MCI_DCRCE)
847 pr_debug("MMC: CRC error in data\n");
848 if (int_status & AT91_MCI_RTOE)
849 pr_debug("MMC: Response timeout\n");
850 if (int_status & AT91_MCI_RENDE)
851 pr_debug("MMC: Response end bit error\n");
852 if (int_status & AT91_MCI_RCRCE)
853 pr_debug("MMC: Response CRC error\n");
854 if (int_status & AT91_MCI_RDIRE)
855 pr_debug("MMC: Response direction error\n");
856 if (int_status & AT91_MCI_RINDE)
857 pr_debug("MMC: Response index error\n");
858 } else {
859 /* Only continue processing if no errors */
860
861 if (int_status & AT91_MCI_TXBUFE) {
862 pr_debug("TX buffer empty\n");
863 at91_mci_handle_transmitted(host);
864 }
865
866 if (int_status & AT91_MCI_ENDRX) {
867 pr_debug("ENDRX\n");
868 at91_mci_post_dma_read(host);
869 }
870
871 if (int_status & AT91_MCI_RXBUFF) {
872 pr_debug("RX buffer full\n");
873 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
874 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
875 completed = 1;
876 }
877
878 if (int_status & AT91_MCI_ENDTX)
879 pr_debug("Transmit has ended\n");
880
881 if (int_status & AT91_MCI_NOTBUSY) {
882 pr_debug("Card is ready\n");
883 at91_mci_update_bytes_xfered(host);
884 completed = 1;
885 }
886
887 if (int_status & AT91_MCI_DTIP)
888 pr_debug("Data transfer in progress\n");
889
890 if (int_status & AT91_MCI_BLKE) {
891 pr_debug("Block transfer has ended\n");
892 if (host->request->data && host->request->data->blocks > 1) {
893 /* multi block write : complete multi write
894 * command and send stop */
895 completed = 1;
896 } else {
897 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
898 }
899 }
900
901 if (int_status & AT91_MCI_SDIOIRQA)
902 mmc_signal_sdio_irq(host->mmc);
903
904 if (int_status & AT91_MCI_SDIOIRQB)
905 mmc_signal_sdio_irq(host->mmc);
906
907 if (int_status & AT91_MCI_TXRDY)
908 pr_debug("Ready to transmit\n");
909
910 if (int_status & AT91_MCI_RXRDY)
911 pr_debug("Ready to receive\n");
912
913 if (int_status & AT91_MCI_CMDRDY) {
914 pr_debug("Command ready\n");
915 completed = at91_mci_handle_cmdrdy(host);
916 }
917 }
918
919 if (completed) {
920 pr_debug("Completed command\n");
921 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
922 at91_mci_completed_command(host, int_status);
923 } else
924 at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
925
926 return IRQ_HANDLED;
927 }
928
929 static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
930 {
931 struct at91mci_host *host = _host;
932 int present = !gpio_get_value(irq_to_gpio(irq));
933
934 /*
935 * we expect this irq on both insert and remove,
936 * and use a short delay to debounce.
937 */
938 if (present != host->present) {
939 host->present = present;
940 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
941 present ? "insert" : "remove");
942 if (!present) {
943 pr_debug("****** Resetting SD-card bus width ******\n");
944 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
945 }
946 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
947 }
948 return IRQ_HANDLED;
949 }
950
951 static int at91_mci_get_ro(struct mmc_host *mmc)
952 {
953 struct at91mci_host *host = mmc_priv(mmc);
954
955 if (host->board->wp_pin)
956 return !!gpio_get_value(host->board->wp_pin);
957 /*
958 * Board doesn't support read only detection; let the mmc core
959 * decide what to do.
960 */
961 return -ENOSYS;
962 }
963
964 static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
965 {
966 struct at91mci_host *host = mmc_priv(mmc);
967
968 pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
969 host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
970 at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
971 host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
972
973 }
974
975 static const struct mmc_host_ops at91_mci_ops = {
976 .request = at91_mci_request,
977 .set_ios = at91_mci_set_ios,
978 .get_ro = at91_mci_get_ro,
979 .enable_sdio_irq = at91_mci_enable_sdio_irq,
980 };
981
982 /*
983 * Probe for the device
984 */
985 static int __init at91_mci_probe(struct platform_device *pdev)
986 {
987 struct mmc_host *mmc;
988 struct at91mci_host *host;
989 struct resource *res;
990 int ret;
991
992 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
993 if (!res)
994 return -ENXIO;
995
996 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
997 return -EBUSY;
998
999 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
1000 if (!mmc) {
1001 ret = -ENOMEM;
1002 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
1003 goto fail6;
1004 }
1005
1006 mmc->ops = &at91_mci_ops;
1007 mmc->f_min = 375000;
1008 mmc->f_max = 25000000;
1009 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1010 mmc->caps = MMC_CAP_SDIO_IRQ;
1011
1012 mmc->max_blk_size = 4095;
1013 mmc->max_blk_count = mmc->max_req_size;
1014
1015 host = mmc_priv(mmc);
1016 host->mmc = mmc;
1017 host->buffer = NULL;
1018 host->bus_mode = 0;
1019 host->board = pdev->dev.platform_data;
1020 if (host->board->wire4) {
1021 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
1022 mmc->caps |= MMC_CAP_4_BIT_DATA;
1023 else
1024 dev_warn(&pdev->dev, "4 wire bus mode not supported"
1025 " - using 1 wire\n");
1026 }
1027
1028 /*
1029 * Reserve GPIOs ... board init code makes sure these pins are set
1030 * up as GPIOs with the right direction (input, except for vcc)
1031 */
1032 if (host->board->det_pin) {
1033 ret = gpio_request(host->board->det_pin, "mmc_detect");
1034 if (ret < 0) {
1035 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
1036 goto fail5;
1037 }
1038 }
1039 if (host->board->wp_pin) {
1040 ret = gpio_request(host->board->wp_pin, "mmc_wp");
1041 if (ret < 0) {
1042 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
1043 goto fail4;
1044 }
1045 }
1046 if (host->board->vcc_pin) {
1047 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
1048 if (ret < 0) {
1049 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
1050 goto fail3;
1051 }
1052 }
1053
1054 /*
1055 * Get Clock
1056 */
1057 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
1058 if (IS_ERR(host->mci_clk)) {
1059 ret = -ENODEV;
1060 dev_dbg(&pdev->dev, "no mci_clk?\n");
1061 goto fail2;
1062 }
1063
1064 /*
1065 * Map I/O region
1066 */
1067 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
1068 if (!host->baseaddr) {
1069 ret = -ENOMEM;
1070 goto fail1;
1071 }
1072
1073 /*
1074 * Reset hardware
1075 */
1076 clk_enable(host->mci_clk); /* Enable the peripheral clock */
1077 at91_mci_disable(host);
1078 at91_mci_enable(host);
1079
1080 /*
1081 * Allocate the MCI interrupt
1082 */
1083 host->irq = platform_get_irq(pdev, 0);
1084 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
1085 mmc_hostname(mmc), host);
1086 if (ret) {
1087 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
1088 goto fail0;
1089 }
1090
1091 setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
1092
1093 platform_set_drvdata(pdev, mmc);
1094
1095 /*
1096 * Add host to MMC layer
1097 */
1098 if (host->board->det_pin) {
1099 host->present = !gpio_get_value(host->board->det_pin);
1100 }
1101 else
1102 host->present = -1;
1103
1104 mmc_add_host(mmc);
1105
1106 /*
1107 * monitor card insertion/removal if we can
1108 */
1109 if (host->board->det_pin) {
1110 ret = request_irq(gpio_to_irq(host->board->det_pin),
1111 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
1112 if (ret)
1113 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
1114 else
1115 device_init_wakeup(&pdev->dev, 1);
1116 }
1117
1118 pr_debug("Added MCI driver\n");
1119
1120 return 0;
1121
1122 fail0:
1123 clk_disable(host->mci_clk);
1124 iounmap(host->baseaddr);
1125 fail1:
1126 clk_put(host->mci_clk);
1127 fail2:
1128 if (host->board->vcc_pin)
1129 gpio_free(host->board->vcc_pin);
1130 fail3:
1131 if (host->board->wp_pin)
1132 gpio_free(host->board->wp_pin);
1133 fail4:
1134 if (host->board->det_pin)
1135 gpio_free(host->board->det_pin);
1136 fail5:
1137 mmc_free_host(mmc);
1138 fail6:
1139 release_mem_region(res->start, res->end - res->start + 1);
1140 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
1141 return ret;
1142 }
1143
1144 /*
1145 * Remove a device
1146 */
1147 static int __exit at91_mci_remove(struct platform_device *pdev)
1148 {
1149 struct mmc_host *mmc = platform_get_drvdata(pdev);
1150 struct at91mci_host *host;
1151 struct resource *res;
1152
1153 if (!mmc)
1154 return -1;
1155
1156 host = mmc_priv(mmc);
1157
1158 if (host->board->det_pin) {
1159 if (device_can_wakeup(&pdev->dev))
1160 free_irq(gpio_to_irq(host->board->det_pin), host);
1161 device_init_wakeup(&pdev->dev, 0);
1162 gpio_free(host->board->det_pin);
1163 }
1164
1165 at91_mci_disable(host);
1166 del_timer_sync(&host->timer);
1167 mmc_remove_host(mmc);
1168 free_irq(host->irq, host);
1169
1170 clk_disable(host->mci_clk); /* Disable the peripheral clock */
1171 clk_put(host->mci_clk);
1172
1173 if (host->board->vcc_pin)
1174 gpio_free(host->board->vcc_pin);
1175 if (host->board->wp_pin)
1176 gpio_free(host->board->wp_pin);
1177
1178 iounmap(host->baseaddr);
1179 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1180 release_mem_region(res->start, res->end - res->start + 1);
1181
1182 mmc_free_host(mmc);
1183 platform_set_drvdata(pdev, NULL);
1184 pr_debug("MCI Removed\n");
1185
1186 return 0;
1187 }
1188
1189 #ifdef CONFIG_PM
1190 static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1191 {
1192 struct mmc_host *mmc = platform_get_drvdata(pdev);
1193 struct at91mci_host *host = mmc_priv(mmc);
1194 int ret = 0;
1195
1196 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1197 enable_irq_wake(host->board->det_pin);
1198
1199 if (mmc)
1200 ret = mmc_suspend_host(mmc, state);
1201
1202 return ret;
1203 }
1204
1205 static int at91_mci_resume(struct platform_device *pdev)
1206 {
1207 struct mmc_host *mmc = platform_get_drvdata(pdev);
1208 struct at91mci_host *host = mmc_priv(mmc);
1209 int ret = 0;
1210
1211 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
1212 disable_irq_wake(host->board->det_pin);
1213
1214 if (mmc)
1215 ret = mmc_resume_host(mmc);
1216
1217 return ret;
1218 }
1219 #else
1220 #define at91_mci_suspend NULL
1221 #define at91_mci_resume NULL
1222 #endif
1223
1224 static struct platform_driver at91_mci_driver = {
1225 .remove = __exit_p(at91_mci_remove),
1226 .suspend = at91_mci_suspend,
1227 .resume = at91_mci_resume,
1228 .driver = {
1229 .name = DRIVER_NAME,
1230 .owner = THIS_MODULE,
1231 },
1232 };
1233
1234 static int __init at91_mci_init(void)
1235 {
1236 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
1237 }
1238
1239 static void __exit at91_mci_exit(void)
1240 {
1241 platform_driver_unregister(&at91_mci_driver);
1242 }
1243
1244 module_init(at91_mci_init);
1245 module_exit(at91_mci_exit);
1246
1247 MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1248 MODULE_AUTHOR("Nick Randell");
1249 MODULE_LICENSE("GPL");
1250 MODULE_ALIAS("platform:at91_mci");