AT91 MMC 4 : Interrupt handler cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / at91_mci.c
CommitLineData
65dbf343
AV
1/*
2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91RM9200 MCI Driver
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
14 This is the AT91RM9200 MCI driver that has been tested with both MMC cards
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
41 Due to a bug in the controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers.
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67
68#include <linux/mmc/host.h>
69#include <linux/mmc/protocol.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
75#include <asm/arch/gpio.h>
55d8baee
AV
76#include <asm/arch/at91_mci.h>
77#include <asm/arch/at91_pdc.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
df05a303
AV
83#define FL_SENT_COMMAND (1 << 0)
84#define FL_SENT_STOP (1 << 1)
65dbf343 85
df05a303
AV
86#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 89
e0b19b83
AV
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 92
65dbf343
AV
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
e0b19b83 103 void __iomem *baseaddr;
17ea0595 104 int irq;
e0b19b83 105
65dbf343
AV
106 struct at91_mmc_data *board;
107 int present;
108
3dd3b039
AV
109 struct clk *mci_clk;
110
65dbf343
AV
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129};
130
131/*
132 * Copy from sg to a dma block - used for transfers
133 */
134static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
135{
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
150 int index;
151 unsigned int *sgbuffer;
152
153 sg = &data->sg[i];
154
155 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
156 amount = min(size, sg->length);
157 size -= amount;
158 amount /= 4;
159
160 for (index = 0; index < amount; index++)
161 *dmabuf++ = swab32(sgbuffer[index]);
162
163 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
164
165 if (size == 0)
166 break;
167 }
168
169 /*
170 * Check that we didn't get a request to transfer
171 * more data than can fit into the SG list.
172 */
173 BUG_ON(size != 0);
174}
175
176/*
177 * Prepare a dma read
178 */
179static void at91mci_pre_dma_read(struct at91mci_host *host)
180{
181 int i;
182 struct scatterlist *sg;
183 struct mmc_command *cmd;
184 struct mmc_data *data;
185
b44fb7a0 186 pr_debug("pre dma read\n");
65dbf343
AV
187
188 cmd = host->cmd;
189 if (!cmd) {
b44fb7a0 190 pr_debug("no command\n");
65dbf343
AV
191 return;
192 }
193
194 data = cmd->data;
195 if (!data) {
b44fb7a0 196 pr_debug("no data\n");
65dbf343
AV
197 return;
198 }
199
200 for (i = 0; i < 2; i++) {
201 /* nothing left to transfer */
202 if (host->transfer_index >= data->sg_len) {
b44fb7a0 203 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
204 break;
205 }
206
207 /* Check to see if this needs filling */
208 if (i == 0) {
e0b19b83 209 if (at91_mci_read(host, AT91_PDC_RCR) != 0) {
b44fb7a0 210 pr_debug("Transfer active in current\n");
65dbf343
AV
211 continue;
212 }
213 }
214 else {
e0b19b83 215 if (at91_mci_read(host, AT91_PDC_RNCR) != 0) {
b44fb7a0 216 pr_debug("Transfer active in next\n");
65dbf343
AV
217 continue;
218 }
219 }
220
221 /* Setup the next transfer */
b44fb7a0 222 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
223
224 sg = &data->sg[host->transfer_index++];
b44fb7a0 225 pr_debug("sg = %p\n", sg);
65dbf343
AV
226
227 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
228
b44fb7a0 229 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
230
231 if (i == 0) {
e0b19b83
AV
232 at91_mci_write(host, AT91_PDC_RPR, sg->dma_address);
233 at91_mci_write(host, AT91_PDC_RCR, sg->length / 4);
65dbf343
AV
234 }
235 else {
e0b19b83
AV
236 at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address);
237 at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4);
65dbf343
AV
238 }
239 }
240
b44fb7a0 241 pr_debug("pre dma read done\n");
65dbf343
AV
242}
243
244/*
245 * Handle after a dma read
246 */
247static void at91mci_post_dma_read(struct at91mci_host *host)
248{
249 struct mmc_command *cmd;
250 struct mmc_data *data;
251
b44fb7a0 252 pr_debug("post dma read\n");
65dbf343
AV
253
254 cmd = host->cmd;
255 if (!cmd) {
b44fb7a0 256 pr_debug("no command\n");
65dbf343
AV
257 return;
258 }
259
260 data = cmd->data;
261 if (!data) {
b44fb7a0 262 pr_debug("no data\n");
65dbf343
AV
263 return;
264 }
265
266 while (host->in_use_index < host->transfer_index) {
267 unsigned int *buffer;
268 int index;
269 int len;
270
271 struct scatterlist *sg;
272
b44fb7a0 273 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
274
275 sg = &data->sg[host->in_use_index++];
276
b44fb7a0 277 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
278
279 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
280
281 /* Swap the contents of the buffer */
282 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
b44fb7a0 283 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
65dbf343
AV
284
285 data->bytes_xfered += sg->length;
286
287 len = sg->length / 4;
288
289 for (index = 0; index < len; index++) {
290 buffer[index] = swab32(buffer[index]);
291 }
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
293 flush_dcache_page(sg->page);
294 }
295
296 /* Is there another transfer to trigger? */
297 if (host->transfer_index < data->sg_len)
298 at91mci_pre_dma_read(host);
299 else {
e0b19b83
AV
300 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
301 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
302 }
303
b44fb7a0 304 pr_debug("post dma read done\n");
65dbf343
AV
305}
306
307/*
308 * Handle transmitted data
309 */
310static void at91_mci_handle_transmitted(struct at91mci_host *host)
311{
312 struct mmc_command *cmd;
313 struct mmc_data *data;
314
b44fb7a0 315 pr_debug("Handling the transmit\n");
65dbf343
AV
316
317 /* Disable the transfer */
e0b19b83 318 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
319
320 /* Now wait for cmd ready */
e0b19b83
AV
321 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
322 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
65dbf343
AV
323
324 cmd = host->cmd;
325 if (!cmd) return;
326
327 data = cmd->data;
328 if (!data) return;
329
330 data->bytes_xfered = host->total_length;
331}
332
333/*
334 * Enable the controller
335 */
e0b19b83 336static void at91_mci_enable(struct at91mci_host *host)
65dbf343 337{
e0b19b83
AV
338 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
339 at91_mci_write(host, AT91_MCI_IDR, 0xFFFFFFFF);
340 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
341 at91_mci_write(host, AT91_MCI_MR, 0x834A);
342 at91_mci_write(host, AT91_MCI_SDCR, 0x0);
65dbf343
AV
343}
344
345/*
346 * Disable the controller
347 */
e0b19b83 348static void at91_mci_disable(struct at91mci_host *host)
65dbf343 349{
e0b19b83 350 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
351}
352
353/*
354 * Send a command
355 * return the interrupts to enable
356 */
357static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
358{
359 unsigned int cmdr, mr;
360 unsigned int block_length;
361 struct mmc_data *data = cmd->data;
362
363 unsigned int blocks;
364 unsigned int ier = 0;
365
366 host->cmd = cmd;
367
368 /* Not sure if this is needed */
369#if 0
e0b19b83 370 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 371 pr_debug("Clearing timeout\n");
e0b19b83
AV
372 at91_mci_write(host, AT91_MCI_ARGR, 0);
373 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
374 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 375 /* spin */
e0b19b83 376 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
377 }
378 }
379#endif
380 cmdr = cmd->opcode;
381
382 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
383 cmdr |= AT91_MCI_RSPTYP_NONE;
384 else {
385 /* if a response is expected then allow maximum response latancy */
386 cmdr |= AT91_MCI_MAXLAT;
387 /* set 136 bit response for R2, 48 bit response otherwise */
388 if (mmc_resp_type(cmd) == MMC_RSP_R2)
389 cmdr |= AT91_MCI_RSPTYP_136;
390 else
391 cmdr |= AT91_MCI_RSPTYP_48;
392 }
393
394 if (data) {
a3fd4a1b 395 block_length = data->blksz;
65dbf343
AV
396 blocks = data->blocks;
397
398 /* always set data start - also set direction flag for read */
399 if (data->flags & MMC_DATA_READ)
400 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
401 else if (data->flags & MMC_DATA_WRITE)
402 cmdr |= AT91_MCI_TRCMD_START;
403
404 if (data->flags & MMC_DATA_STREAM)
405 cmdr |= AT91_MCI_TRTYP_STREAM;
406 if (data->flags & MMC_DATA_MULTI)
407 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
408 }
409 else {
410 block_length = 0;
411 blocks = 0;
412 }
413
414 if (cmd->opcode == MMC_STOP_TRANSMISSION)
415 cmdr |= AT91_MCI_TRCMD_STOP;
416
417 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
418 cmdr |= AT91_MCI_OPDCMD;
419
420 /*
421 * Set the arguments and send the command
422 */
b44fb7a0 423 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08lX)\n",
e0b19b83 424 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
425
426 if (!data) {
e0b19b83
AV
427 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
428 at91_mci_write(host, AT91_PDC_RPR, 0);
429 at91_mci_write(host, AT91_PDC_RCR, 0);
430 at91_mci_write(host, AT91_PDC_RNPR, 0);
431 at91_mci_write(host, AT91_PDC_RNCR, 0);
432 at91_mci_write(host, AT91_PDC_TPR, 0);
433 at91_mci_write(host, AT91_PDC_TCR, 0);
434 at91_mci_write(host, AT91_PDC_TNPR, 0);
435 at91_mci_write(host, AT91_PDC_TNCR, 0);
436
437 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
438 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
439 return AT91_MCI_CMDRDY;
440 }
441
e0b19b83
AV
442 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
443 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
65dbf343
AV
444
445 /*
446 * Disable the PDC controller
447 */
e0b19b83 448 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
449
450 if (cmdr & AT91_MCI_TRCMD_START) {
451 data->bytes_xfered = 0;
452 host->transfer_index = 0;
453 host->in_use_index = 0;
454 if (cmdr & AT91_MCI_TRDIR) {
455 /*
456 * Handle a read
457 */
458 host->buffer = NULL;
459 host->total_length = 0;
460
461 at91mci_pre_dma_read(host);
462 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
463 }
464 else {
465 /*
466 * Handle a write
467 */
468 host->total_length = block_length * blocks;
469 host->buffer = dma_alloc_coherent(NULL,
470 host->total_length,
471 &host->physical_address, GFP_KERNEL);
472
473 at91mci_sg_to_dma(host, data);
474
b44fb7a0 475 pr_debug("Transmitting %d bytes\n", host->total_length);
65dbf343 476
e0b19b83
AV
477 at91_mci_write(host, AT91_PDC_TPR, host->physical_address);
478 at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4);
65dbf343
AV
479 ier = AT91_MCI_TXBUFE;
480 }
481 }
482
483 /*
484 * Send the command and then enable the PDC - not the other way round as
485 * the data sheet says
486 */
487
e0b19b83
AV
488 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
489 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
490
491 if (cmdr & AT91_MCI_TRCMD_START) {
492 if (cmdr & AT91_MCI_TRDIR)
e0b19b83 493 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN);
65dbf343 494 else
e0b19b83 495 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN);
65dbf343
AV
496 }
497 return ier;
498}
499
500/*
501 * Wait for a command to complete
502 */
503static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
504{
505 unsigned int ier;
506
507 ier = at91_mci_send_command(host, cmd);
508
b44fb7a0 509 pr_debug("setting ier to %08X\n", ier);
65dbf343
AV
510
511 /* Stop on errors or the required value */
df05a303 512 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
513}
514
515/*
516 * Process the next step in the request
517 */
518static void at91mci_process_next(struct at91mci_host *host)
519{
520 if (!(host->flags & FL_SENT_COMMAND)) {
521 host->flags |= FL_SENT_COMMAND;
522 at91mci_process_command(host, host->request->cmd);
523 }
524 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
525 host->flags |= FL_SENT_STOP;
526 at91mci_process_command(host, host->request->stop);
527 }
528 else
529 mmc_request_done(host->mmc, host->request);
530}
531
532/*
533 * Handle a command that has been completed
534 */
535static void at91mci_completed_command(struct at91mci_host *host)
536{
537 struct mmc_command *cmd = host->cmd;
538 unsigned int status;
539
e0b19b83 540 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 541
e0b19b83
AV
542 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
543 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
544 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
545 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
546
547 if (host->buffer) {
548 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
549 host->buffer = NULL;
550 }
551
e0b19b83 552 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 553
b44fb7a0 554 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
555 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
556
557 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
558 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
559 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
560 if ((status & AT91_MCI_RCRCE) &&
561 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
562 cmd->error = MMC_ERR_NONE;
563 }
564 else {
565 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
566 cmd->error = MMC_ERR_TIMEOUT;
567 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
568 cmd->error = MMC_ERR_BADCRC;
569 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
570 cmd->error = MMC_ERR_FIFO;
571 else
572 cmd->error = MMC_ERR_FAILED;
573
b44fb7a0 574 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
575 cmd->error, cmd->opcode, cmd->retries);
576 }
577 }
578 else
579 cmd->error = MMC_ERR_NONE;
580
581 at91mci_process_next(host);
582}
583
584/*
585 * Handle an MMC request
586 */
587static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
588{
589 struct at91mci_host *host = mmc_priv(mmc);
590 host->request = mrq;
591 host->flags = 0;
592
593 at91mci_process_next(host);
594}
595
596/*
597 * Set the IOS
598 */
599static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
600{
601 int clkdiv;
602 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 603 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 604
b44fb7a0 605 host->bus_mode = ios->bus_mode;
65dbf343
AV
606
607 if (ios->clock == 0) {
608 /* Disable the MCI controller */
e0b19b83 609 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
610 clkdiv = 0;
611 }
612 else {
613 /* Enable the MCI controller */
e0b19b83 614 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
615
616 if ((at91_master_clock % (ios->clock * 2)) == 0)
617 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
618 else
619 clkdiv = (at91_master_clock / ios->clock) / 2;
620
b44fb7a0 621 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
622 at91_master_clock / (2 * (clkdiv + 1)));
623 }
624 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 625 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 626 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
627 }
628 else {
b44fb7a0 629 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 630 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
631 }
632
633 /* Set the clock divider */
e0b19b83 634 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
635
636 /* maybe switch power to the card */
b44fb7a0 637 if (host->board->vcc_pin) {
65dbf343
AV
638 switch (ios->power_mode) {
639 case MMC_POWER_OFF:
640 at91_set_gpio_output(host->board->vcc_pin, 0);
641 break;
642 case MMC_POWER_UP:
643 case MMC_POWER_ON:
644 at91_set_gpio_output(host->board->vcc_pin, 1);
645 break;
646 }
647 }
648}
649
650/*
651 * Handle an interrupt
652 */
7d12e780 653static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
654{
655 struct at91mci_host *host = devid;
656 int completed = 0;
df05a303 657 unsigned int int_status, int_mask;
65dbf343 658
e0b19b83 659 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303
AV
660 int_mask = at91_mci_read(host, AT91_MCI_IMR);
661
662 pr_debug("MCI irq: status = %08X, %08lX, %08lX\n", int_status, int_mask,
663 int_status & int_mask);
664
665 int_status = int_status & int_mask;
666
667 if (int_status & AT91_MCI_ERRORS) {
65dbf343 668 completed = 1;
df05a303
AV
669
670 if (int_status & AT91_MCI_UNRE)
671 pr_debug("MMC: Underrun error\n");
672 if (int_status & AT91_MCI_OVRE)
673 pr_debug("MMC: Overrun error\n");
674 if (int_status & AT91_MCI_DTOE)
675 pr_debug("MMC: Data timeout\n");
676 if (int_status & AT91_MCI_DCRCE)
677 pr_debug("MMC: CRC error in data\n");
678 if (int_status & AT91_MCI_RTOE)
679 pr_debug("MMC: Response timeout\n");
680 if (int_status & AT91_MCI_RENDE)
681 pr_debug("MMC: Response end bit error\n");
682 if (int_status & AT91_MCI_RCRCE)
683 pr_debug("MMC: Response CRC error\n");
684 if (int_status & AT91_MCI_RDIRE)
685 pr_debug("MMC: Response direction error\n");
686 if (int_status & AT91_MCI_RINDE)
687 pr_debug("MMC: Response index error\n");
688 } else {
689 /* Only continue processing if no errors */
65dbf343 690
65dbf343 691 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 692 pr_debug("TX buffer empty\n");
65dbf343
AV
693 at91_mci_handle_transmitted(host);
694 }
695
696 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 697 pr_debug("RX buffer full\n");
e0b19b83 698 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
699 }
700
df05a303 701 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 702 pr_debug("Transmit has ended\n");
65dbf343
AV
703
704 if (int_status & AT91_MCI_ENDRX) {
b44fb7a0 705 pr_debug("Receive has ended\n");
65dbf343
AV
706 at91mci_post_dma_read(host);
707 }
708
709 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 710 pr_debug("Card is ready\n");
e0b19b83 711 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
712 }
713
df05a303 714 if (int_status & AT91_MCI_DTIP)
b44fb7a0 715 pr_debug("Data transfer in progress\n");
65dbf343 716
df05a303 717 if (int_status & AT91_MCI_BLKE)
b44fb7a0 718 pr_debug("Block transfer has ended\n");
65dbf343 719
df05a303 720 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 721 pr_debug("Ready to transmit\n");
65dbf343 722
df05a303 723 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 724 pr_debug("Ready to receive\n");
65dbf343
AV
725
726 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 727 pr_debug("Command ready\n");
65dbf343
AV
728 completed = 1;
729 }
730 }
65dbf343
AV
731
732 if (completed) {
b44fb7a0 733 pr_debug("Completed command\n");
e0b19b83 734 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 735 at91mci_completed_command(host);
df05a303
AV
736 } else
737 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
738
739 return IRQ_HANDLED;
740}
741
7d12e780 742static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
743{
744 struct at91mci_host *host = _host;
745 int present = !at91_get_gpio_value(irq);
746
747 /*
748 * we expect this irq on both insert and remove,
749 * and use a short delay to debounce.
750 */
751 if (present != host->present) {
752 host->present = present;
b44fb7a0 753 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
754 present ? "insert" : "remove");
755 if (!present) {
b44fb7a0 756 pr_debug("****** Resetting SD-card bus width ******\n");
e0b19b83 757 at91_mci_write(host, AT91_MCI_SDCR, 0);
65dbf343
AV
758 }
759 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
760 }
761 return IRQ_HANDLED;
762}
763
764int at91_mci_get_ro(struct mmc_host *mmc)
765{
766 int read_only = 0;
767 struct at91mci_host *host = mmc_priv(mmc);
768
769 if (host->board->wp_pin) {
770 read_only = at91_get_gpio_value(host->board->wp_pin);
771 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
772 (read_only ? "read-only" : "read-write") );
773 }
774 else {
775 printk(KERN_WARNING "%s: host does not support reading read-only "
776 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
777 }
778 return read_only;
779}
780
ab7aefd0 781static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
782 .request = at91_mci_request,
783 .set_ios = at91_mci_set_ios,
784 .get_ro = at91_mci_get_ro,
785};
786
787/*
788 * Probe for the device
789 */
790static int at91_mci_probe(struct platform_device *pdev)
791{
792 struct mmc_host *mmc;
793 struct at91mci_host *host;
17ea0595 794 struct resource *res;
65dbf343
AV
795 int ret;
796
b44fb7a0 797 pr_debug("Probe MCI devices\n");
65dbf343 798
17ea0595
AV
799 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
800 if (!res)
801 return -ENXIO;
802
803 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
804 return -EBUSY;
805
65dbf343
AV
806 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
807 if (!mmc) {
b44fb7a0 808 pr_debug("Failed to allocate mmc host\n");
17ea0595 809 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
810 return -ENOMEM;
811 }
812
813 mmc->ops = &at91_mci_ops;
814 mmc->f_min = 375000;
815 mmc->f_max = 25000000;
816 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
42431acb 817 mmc->caps = MMC_CAP_BYTEBLOCK;
65dbf343
AV
818
819 host = mmc_priv(mmc);
820 host->mmc = mmc;
821 host->buffer = NULL;
822 host->bus_mode = 0;
823 host->board = pdev->dev.platform_data;
824 if (host->board->wire4) {
825#ifdef SUPPORT_4WIRE
826 mmc->caps |= MMC_CAP_4_BIT_DATA;
827#else
828 printk("MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
829#endif
830 }
831
832 /*
833 * Get Clock
834 */
3dd3b039
AV
835 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
836 if (IS_ERR(host->mci_clk)) {
65dbf343 837 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 838 mmc_free_host(mmc);
17ea0595 839 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
840 return -ENODEV;
841 }
65dbf343 842
17ea0595
AV
843 /*
844 * Map I/O region
845 */
846 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
847 if (!host->baseaddr) {
3dd3b039 848 clk_put(host->mci_clk);
17ea0595
AV
849 mmc_free_host(mmc);
850 release_mem_region(res->start, res->end - res->start + 1);
851 return -ENOMEM;
852 }
e0b19b83
AV
853
854 /*
855 * Reset hardware
856 */
3dd3b039 857 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
858 at91_mci_disable(host);
859 at91_mci_enable(host);
860
65dbf343
AV
861 /*
862 * Allocate the MCI interrupt
863 */
17ea0595
AV
864 host->irq = platform_get_irq(pdev, 0);
865 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 866 if (ret) {
b44fb7a0 867 printk(KERN_ERR "Failed to request MCI interrupt\n");
3dd3b039
AV
868 clk_disable(host->mci_clk);
869 clk_put(host->mci_clk);
b44fb7a0 870 mmc_free_host(mmc);
17ea0595
AV
871 iounmap(host->baseaddr);
872 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
873 return ret;
874 }
875
876 platform_set_drvdata(pdev, mmc);
877
878 /*
879 * Add host to MMC layer
880 */
881 if (host->board->det_pin)
882 host->present = !at91_get_gpio_value(host->board->det_pin);
883 else
884 host->present = -1;
885
886 mmc_add_host(mmc);
887
888 /*
889 * monitor card insertion/removal if we can
890 */
891 if (host->board->det_pin) {
892 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 893 0, DRIVER_NAME, host);
65dbf343 894 if (ret)
b44fb7a0 895 printk(KERN_ERR "couldn't allocate MMC detect irq\n");
65dbf343
AV
896 }
897
b44fb7a0 898 pr_debug(KERN_INFO "Added MCI driver\n");
65dbf343
AV
899
900 return 0;
901}
902
903/*
904 * Remove a device
905 */
906static int at91_mci_remove(struct platform_device *pdev)
907{
908 struct mmc_host *mmc = platform_get_drvdata(pdev);
909 struct at91mci_host *host;
17ea0595 910 struct resource *res;
65dbf343
AV
911
912 if (!mmc)
913 return -1;
914
915 host = mmc_priv(mmc);
916
917 if (host->present != -1) {
918 free_irq(host->board->det_pin, host);
919 cancel_delayed_work(&host->mmc->detect);
920 }
921
e0b19b83 922 at91_mci_disable(host);
17ea0595
AV
923 mmc_remove_host(mmc);
924 free_irq(host->irq, host);
65dbf343 925
3dd3b039
AV
926 clk_disable(host->mci_clk); /* Disable the peripheral clock */
927 clk_put(host->mci_clk);
65dbf343 928
17ea0595
AV
929 iounmap(host->baseaddr);
930 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
931 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 932
17ea0595
AV
933 mmc_free_host(mmc);
934 platform_set_drvdata(pdev, NULL);
b44fb7a0 935 pr_debug("MCI Removed\n");
65dbf343
AV
936
937 return 0;
938}
939
940#ifdef CONFIG_PM
941static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
942{
943 struct mmc_host *mmc = platform_get_drvdata(pdev);
944 int ret = 0;
945
946 if (mmc)
947 ret = mmc_suspend_host(mmc, state);
948
949 return ret;
950}
951
952static int at91_mci_resume(struct platform_device *pdev)
953{
954 struct mmc_host *mmc = platform_get_drvdata(pdev);
955 int ret = 0;
956
957 if (mmc)
958 ret = mmc_resume_host(mmc);
959
960 return ret;
961}
962#else
963#define at91_mci_suspend NULL
964#define at91_mci_resume NULL
965#endif
966
967static struct platform_driver at91_mci_driver = {
968 .probe = at91_mci_probe,
969 .remove = at91_mci_remove,
970 .suspend = at91_mci_suspend,
971 .resume = at91_mci_resume,
972 .driver = {
973 .name = DRIVER_NAME,
974 .owner = THIS_MODULE,
975 },
976};
977
978static int __init at91_mci_init(void)
979{
980 return platform_driver_register(&at91_mci_driver);
981}
982
983static void __exit at91_mci_exit(void)
984{
985 platform_driver_unregister(&at91_mci_driver);
986}
987
988module_init(at91_mci_init);
989module_exit(at91_mci_exit);
990
991MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
992MODULE_AUTHOR("Nick Randell");
993MODULE_LICENSE("GPL");