mmc: at91_mci typo
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
99eeb8df 2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
99eeb8df 75#include <asm/arch/cpu.h>
65dbf343 76#include <asm/arch/gpio.h>
55d8baee 77#include <asm/arch/at91_mci.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
81#undef SUPPORT_4WIRE
82
df05a303
AV
83#define FL_SENT_COMMAND (1 << 0)
84#define FL_SENT_STOP (1 << 1)
65dbf343 85
df05a303
AV
86#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
87 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
88 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 89
e0b19b83
AV
90#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
91#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 92
65dbf343
AV
93
94/*
95 * Low level type for this driver
96 */
97struct at91mci_host
98{
99 struct mmc_host *mmc;
100 struct mmc_command *cmd;
101 struct mmc_request *request;
102
e0b19b83 103 void __iomem *baseaddr;
17ea0595 104 int irq;
e0b19b83 105
65dbf343
AV
106 struct at91_mmc_data *board;
107 int present;
108
3dd3b039
AV
109 struct clk *mci_clk;
110
65dbf343
AV
111 /*
112 * Flag indicating when the command has been sent. This is used to
113 * work out whether or not to send the stop
114 */
115 unsigned int flags;
116 /* flag for current bus settings */
117 u32 bus_mode;
118
119 /* DMA buffer used for transmitting */
120 unsigned int* buffer;
121 dma_addr_t physical_address;
122 unsigned int total_length;
123
124 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
125 int in_use_index;
126
127 /* Latest in the scatterlist that has been enabled for transfer */
128 int transfer_index;
129};
130
131/*
132 * Copy from sg to a dma block - used for transfers
133 */
e8d04d3d 134static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
135{
136 unsigned int len, i, size;
137 unsigned *dmabuf = host->buffer;
138
139 size = host->total_length;
140 len = data->sg_len;
141
142 /*
143 * Just loop through all entries. Size might not
144 * be the entire list though so make sure that
145 * we do not transfer too much.
146 */
147 for (i = 0; i < len; i++) {
148 struct scatterlist *sg;
149 int amount;
65dbf343
AV
150 unsigned int *sgbuffer;
151
152 sg = &data->sg[i];
153
154 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
155 amount = min(size, sg->length);
156 size -= amount;
65dbf343 157
99eeb8df
AV
158 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
159 int index;
160
161 for (index = 0; index < (amount / 4); index++)
162 *dmabuf++ = swab32(sgbuffer[index]);
163 }
164 else
165 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
166
167 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
168
169 if (size == 0)
170 break;
171 }
172
173 /*
174 * Check that we didn't get a request to transfer
175 * more data than can fit into the SG list.
176 */
177 BUG_ON(size != 0);
178}
179
180/*
181 * Prepare a dma read
182 */
e8d04d3d 183static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
184{
185 int i;
186 struct scatterlist *sg;
187 struct mmc_command *cmd;
188 struct mmc_data *data;
189
b44fb7a0 190 pr_debug("pre dma read\n");
65dbf343
AV
191
192 cmd = host->cmd;
193 if (!cmd) {
b44fb7a0 194 pr_debug("no command\n");
65dbf343
AV
195 return;
196 }
197
198 data = cmd->data;
199 if (!data) {
b44fb7a0 200 pr_debug("no data\n");
65dbf343
AV
201 return;
202 }
203
204 for (i = 0; i < 2; i++) {
205 /* nothing left to transfer */
206 if (host->transfer_index >= data->sg_len) {
b44fb7a0 207 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
208 break;
209 }
210
211 /* Check to see if this needs filling */
212 if (i == 0) {
93a3ddc2 213 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 214 pr_debug("Transfer active in current\n");
65dbf343
AV
215 continue;
216 }
217 }
218 else {
93a3ddc2 219 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 220 pr_debug("Transfer active in next\n");
65dbf343
AV
221 continue;
222 }
223 }
224
225 /* Setup the next transfer */
b44fb7a0 226 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
227
228 sg = &data->sg[host->transfer_index++];
b44fb7a0 229 pr_debug("sg = %p\n", sg);
65dbf343
AV
230
231 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
232
b44fb7a0 233 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
234
235 if (i == 0) {
93a3ddc2
AV
236 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
237 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
238 }
239 else {
93a3ddc2
AV
240 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
241 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
242 }
243 }
244
b44fb7a0 245 pr_debug("pre dma read done\n");
65dbf343
AV
246}
247
248/*
249 * Handle after a dma read
250 */
e8d04d3d 251static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
252{
253 struct mmc_command *cmd;
254 struct mmc_data *data;
255
b44fb7a0 256 pr_debug("post dma read\n");
65dbf343
AV
257
258 cmd = host->cmd;
259 if (!cmd) {
b44fb7a0 260 pr_debug("no command\n");
65dbf343
AV
261 return;
262 }
263
264 data = cmd->data;
265 if (!data) {
b44fb7a0 266 pr_debug("no data\n");
65dbf343
AV
267 return;
268 }
269
270 while (host->in_use_index < host->transfer_index) {
271 unsigned int *buffer;
65dbf343
AV
272
273 struct scatterlist *sg;
274
b44fb7a0 275 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
276
277 sg = &data->sg[host->in_use_index++];
278
b44fb7a0 279 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
280
281 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
282
283 /* Swap the contents of the buffer */
284 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
b44fb7a0 285 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
65dbf343
AV
286
287 data->bytes_xfered += sg->length;
288
99eeb8df
AV
289 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
290 int index;
65dbf343 291
99eeb8df
AV
292 for (index = 0; index < (sg->length / 4); index++)
293 buffer[index] = swab32(buffer[index]);
65dbf343 294 }
99eeb8df 295
65dbf343
AV
296 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
297 flush_dcache_page(sg->page);
298 }
299
300 /* Is there another transfer to trigger? */
301 if (host->transfer_index < data->sg_len)
e8d04d3d 302 at91_mci_pre_dma_read(host);
65dbf343 303 else {
e0b19b83 304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
93a3ddc2 305 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
306 }
307
b44fb7a0 308 pr_debug("post dma read done\n");
65dbf343
AV
309}
310
311/*
312 * Handle transmitted data
313 */
314static void at91_mci_handle_transmitted(struct at91mci_host *host)
315{
316 struct mmc_command *cmd;
317 struct mmc_data *data;
318
b44fb7a0 319 pr_debug("Handling the transmit\n");
65dbf343
AV
320
321 /* Disable the transfer */
93a3ddc2 322 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
323
324 /* Now wait for cmd ready */
e0b19b83
AV
325 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
326 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
65dbf343
AV
327
328 cmd = host->cmd;
329 if (!cmd) return;
330
331 data = cmd->data;
332 if (!data) return;
333
334 data->bytes_xfered = host->total_length;
335}
336
337/*
338 * Enable the controller
339 */
e0b19b83 340static void at91_mci_enable(struct at91mci_host *host)
65dbf343 341{
e0b19b83 342 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 343 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 344 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
f3a8efa9 345 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
99eeb8df
AV
346
347 /* use Slot A or B (only one at same time) */
348 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
349}
350
351/*
352 * Disable the controller
353 */
e0b19b83 354static void at91_mci_disable(struct at91mci_host *host)
65dbf343 355{
e0b19b83 356 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
357}
358
359/*
360 * Send a command
361 * return the interrupts to enable
362 */
363static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
364{
365 unsigned int cmdr, mr;
366 unsigned int block_length;
367 struct mmc_data *data = cmd->data;
368
369 unsigned int blocks;
370 unsigned int ier = 0;
371
372 host->cmd = cmd;
373
374 /* Not sure if this is needed */
375#if 0
e0b19b83 376 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 377 pr_debug("Clearing timeout\n");
e0b19b83
AV
378 at91_mci_write(host, AT91_MCI_ARGR, 0);
379 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
380 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 381 /* spin */
e0b19b83 382 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
383 }
384 }
385#endif
386 cmdr = cmd->opcode;
387
388 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
389 cmdr |= AT91_MCI_RSPTYP_NONE;
390 else {
391 /* if a response is expected then allow maximum response latancy */
392 cmdr |= AT91_MCI_MAXLAT;
393 /* set 136 bit response for R2, 48 bit response otherwise */
394 if (mmc_resp_type(cmd) == MMC_RSP_R2)
395 cmdr |= AT91_MCI_RSPTYP_136;
396 else
397 cmdr |= AT91_MCI_RSPTYP_48;
398 }
399
400 if (data) {
a3fd4a1b 401 block_length = data->blksz;
65dbf343
AV
402 blocks = data->blocks;
403
404 /* always set data start - also set direction flag for read */
405 if (data->flags & MMC_DATA_READ)
406 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
407 else if (data->flags & MMC_DATA_WRITE)
408 cmdr |= AT91_MCI_TRCMD_START;
409
410 if (data->flags & MMC_DATA_STREAM)
411 cmdr |= AT91_MCI_TRTYP_STREAM;
412 if (data->flags & MMC_DATA_MULTI)
413 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
414 }
415 else {
416 block_length = 0;
417 blocks = 0;
418 }
419
b6cedb38 420 if (host->flags & FL_SENT_STOP)
65dbf343
AV
421 cmdr |= AT91_MCI_TRCMD_STOP;
422
423 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
424 cmdr |= AT91_MCI_OPDCMD;
425
426 /*
427 * Set the arguments and send the command
428 */
f3a8efa9 429 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 430 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
431
432 if (!data) {
93a3ddc2
AV
433 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
434 at91_mci_write(host, ATMEL_PDC_RPR, 0);
435 at91_mci_write(host, ATMEL_PDC_RCR, 0);
436 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
437 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
438 at91_mci_write(host, ATMEL_PDC_TPR, 0);
439 at91_mci_write(host, ATMEL_PDC_TCR, 0);
440 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
441 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
e0b19b83
AV
442
443 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
444 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
445 return AT91_MCI_CMDRDY;
446 }
447
e0b19b83
AV
448 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
449 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
65dbf343
AV
450
451 /*
452 * Disable the PDC controller
453 */
93a3ddc2 454 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
455
456 if (cmdr & AT91_MCI_TRCMD_START) {
457 data->bytes_xfered = 0;
458 host->transfer_index = 0;
459 host->in_use_index = 0;
460 if (cmdr & AT91_MCI_TRDIR) {
461 /*
462 * Handle a read
463 */
464 host->buffer = NULL;
465 host->total_length = 0;
466
e8d04d3d 467 at91_mci_pre_dma_read(host);
65dbf343
AV
468 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
469 }
470 else {
471 /*
472 * Handle a write
473 */
474 host->total_length = block_length * blocks;
475 host->buffer = dma_alloc_coherent(NULL,
476 host->total_length,
477 &host->physical_address, GFP_KERNEL);
478
e8d04d3d 479 at91_mci_sg_to_dma(host, data);
65dbf343 480
b44fb7a0 481 pr_debug("Transmitting %d bytes\n", host->total_length);
65dbf343 482
93a3ddc2
AV
483 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
484 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
65dbf343
AV
485 ier = AT91_MCI_TXBUFE;
486 }
487 }
488
489 /*
490 * Send the command and then enable the PDC - not the other way round as
491 * the data sheet says
492 */
493
e0b19b83
AV
494 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
495 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
496
497 if (cmdr & AT91_MCI_TRCMD_START) {
498 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 499 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 500 else
93a3ddc2 501 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
65dbf343
AV
502 }
503 return ier;
504}
505
506/*
507 * Wait for a command to complete
508 */
e8d04d3d 509static void at91_mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
510{
511 unsigned int ier;
512
513 ier = at91_mci_send_command(host, cmd);
514
b44fb7a0 515 pr_debug("setting ier to %08X\n", ier);
65dbf343
AV
516
517 /* Stop on errors or the required value */
df05a303 518 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
519}
520
521/*
522 * Process the next step in the request
523 */
e8d04d3d 524static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
525{
526 if (!(host->flags & FL_SENT_COMMAND)) {
527 host->flags |= FL_SENT_COMMAND;
e8d04d3d 528 at91_mci_process_command(host, host->request->cmd);
65dbf343
AV
529 }
530 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
531 host->flags |= FL_SENT_STOP;
e8d04d3d 532 at91_mci_process_command(host, host->request->stop);
65dbf343
AV
533 }
534 else
535 mmc_request_done(host->mmc, host->request);
536}
537
538/*
539 * Handle a command that has been completed
540 */
e8d04d3d 541static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
542{
543 struct mmc_command *cmd = host->cmd;
544 unsigned int status;
545
e0b19b83 546 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 547
e0b19b83
AV
548 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
549 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
550 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
551 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
552
553 if (host->buffer) {
554 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
555 host->buffer = NULL;
556 }
557
e0b19b83 558 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 559
b44fb7a0 560 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
561 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
562
563 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
564 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
565 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
b6cedb38 566 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
65dbf343
AV
567 cmd->error = MMC_ERR_NONE;
568 }
569 else {
570 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
571 cmd->error = MMC_ERR_TIMEOUT;
572 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
573 cmd->error = MMC_ERR_BADCRC;
574 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
575 cmd->error = MMC_ERR_FIFO;
576 else
577 cmd->error = MMC_ERR_FAILED;
578
b44fb7a0 579 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
580 cmd->error, cmd->opcode, cmd->retries);
581 }
582 }
583 else
584 cmd->error = MMC_ERR_NONE;
585
e8d04d3d 586 at91_mci_process_next(host);
65dbf343
AV
587}
588
589/*
590 * Handle an MMC request
591 */
592static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
593{
594 struct at91mci_host *host = mmc_priv(mmc);
595 host->request = mrq;
596 host->flags = 0;
597
e8d04d3d 598 at91_mci_process_next(host);
65dbf343
AV
599}
600
601/*
602 * Set the IOS
603 */
604static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
605{
606 int clkdiv;
607 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 608 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 609
b44fb7a0 610 host->bus_mode = ios->bus_mode;
65dbf343
AV
611
612 if (ios->clock == 0) {
613 /* Disable the MCI controller */
e0b19b83 614 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
615 clkdiv = 0;
616 }
617 else {
618 /* Enable the MCI controller */
e0b19b83 619 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
620
621 if ((at91_master_clock % (ios->clock * 2)) == 0)
622 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
623 else
624 clkdiv = (at91_master_clock / ios->clock) / 2;
625
b44fb7a0 626 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
627 at91_master_clock / (2 * (clkdiv + 1)));
628 }
629 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 630 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 631 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
632 }
633 else {
b44fb7a0 634 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 635 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
636 }
637
638 /* Set the clock divider */
e0b19b83 639 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
640
641 /* maybe switch power to the card */
b44fb7a0 642 if (host->board->vcc_pin) {
65dbf343
AV
643 switch (ios->power_mode) {
644 case MMC_POWER_OFF:
99eeb8df 645 at91_set_gpio_value(host->board->vcc_pin, 0);
65dbf343
AV
646 break;
647 case MMC_POWER_UP:
648 case MMC_POWER_ON:
99eeb8df 649 at91_set_gpio_value(host->board->vcc_pin, 1);
65dbf343
AV
650 break;
651 }
652 }
653}
654
655/*
656 * Handle an interrupt
657 */
7d12e780 658static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
659{
660 struct at91mci_host *host = devid;
661 int completed = 0;
df05a303 662 unsigned int int_status, int_mask;
65dbf343 663
e0b19b83 664 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303
AV
665 int_mask = at91_mci_read(host, AT91_MCI_IMR);
666
f3a8efa9 667 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303
AV
668 int_status & int_mask);
669
670 int_status = int_status & int_mask;
671
672 if (int_status & AT91_MCI_ERRORS) {
65dbf343 673 completed = 1;
df05a303
AV
674
675 if (int_status & AT91_MCI_UNRE)
676 pr_debug("MMC: Underrun error\n");
677 if (int_status & AT91_MCI_OVRE)
678 pr_debug("MMC: Overrun error\n");
679 if (int_status & AT91_MCI_DTOE)
680 pr_debug("MMC: Data timeout\n");
681 if (int_status & AT91_MCI_DCRCE)
682 pr_debug("MMC: CRC error in data\n");
683 if (int_status & AT91_MCI_RTOE)
684 pr_debug("MMC: Response timeout\n");
685 if (int_status & AT91_MCI_RENDE)
686 pr_debug("MMC: Response end bit error\n");
687 if (int_status & AT91_MCI_RCRCE)
688 pr_debug("MMC: Response CRC error\n");
689 if (int_status & AT91_MCI_RDIRE)
690 pr_debug("MMC: Response direction error\n");
691 if (int_status & AT91_MCI_RINDE)
692 pr_debug("MMC: Response index error\n");
693 } else {
694 /* Only continue processing if no errors */
65dbf343 695
65dbf343 696 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 697 pr_debug("TX buffer empty\n");
65dbf343
AV
698 at91_mci_handle_transmitted(host);
699 }
700
701 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 702 pr_debug("RX buffer full\n");
e0b19b83 703 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
704 }
705
df05a303 706 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 707 pr_debug("Transmit has ended\n");
65dbf343
AV
708
709 if (int_status & AT91_MCI_ENDRX) {
b44fb7a0 710 pr_debug("Receive has ended\n");
e8d04d3d 711 at91_mci_post_dma_read(host);
65dbf343
AV
712 }
713
714 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 715 pr_debug("Card is ready\n");
e0b19b83 716 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
717 }
718
df05a303 719 if (int_status & AT91_MCI_DTIP)
b44fb7a0 720 pr_debug("Data transfer in progress\n");
65dbf343 721
df05a303 722 if (int_status & AT91_MCI_BLKE)
b44fb7a0 723 pr_debug("Block transfer has ended\n");
65dbf343 724
df05a303 725 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 726 pr_debug("Ready to transmit\n");
65dbf343 727
df05a303 728 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 729 pr_debug("Ready to receive\n");
65dbf343
AV
730
731 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 732 pr_debug("Command ready\n");
65dbf343
AV
733 completed = 1;
734 }
735 }
65dbf343
AV
736
737 if (completed) {
b44fb7a0 738 pr_debug("Completed command\n");
e0b19b83 739 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 740 at91_mci_completed_command(host);
df05a303
AV
741 } else
742 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
743
744 return IRQ_HANDLED;
745}
746
7d12e780 747static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
748{
749 struct at91mci_host *host = _host;
750 int present = !at91_get_gpio_value(irq);
751
752 /*
753 * we expect this irq on both insert and remove,
754 * and use a short delay to debounce.
755 */
756 if (present != host->present) {
757 host->present = present;
b44fb7a0 758 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
759 present ? "insert" : "remove");
760 if (!present) {
b44fb7a0 761 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 762 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
763 }
764 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
765 }
766 return IRQ_HANDLED;
767}
768
a26b498c 769static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343
AV
770{
771 int read_only = 0;
772 struct at91mci_host *host = mmc_priv(mmc);
773
774 if (host->board->wp_pin) {
775 read_only = at91_get_gpio_value(host->board->wp_pin);
776 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
777 (read_only ? "read-only" : "read-write") );
778 }
779 else {
780 printk(KERN_WARNING "%s: host does not support reading read-only "
781 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
782 }
783 return read_only;
784}
785
ab7aefd0 786static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
787 .request = at91_mci_request,
788 .set_ios = at91_mci_set_ios,
789 .get_ro = at91_mci_get_ro,
790};
791
792/*
793 * Probe for the device
794 */
a26b498c 795static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
796{
797 struct mmc_host *mmc;
798 struct at91mci_host *host;
17ea0595 799 struct resource *res;
65dbf343
AV
800 int ret;
801
b44fb7a0 802 pr_debug("Probe MCI devices\n");
65dbf343 803
17ea0595
AV
804 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
805 if (!res)
806 return -ENXIO;
807
808 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
809 return -EBUSY;
810
65dbf343
AV
811 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
812 if (!mmc) {
b44fb7a0 813 pr_debug("Failed to allocate mmc host\n");
17ea0595 814 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
815 return -ENOMEM;
816 }
817
818 mmc->ops = &at91_mci_ops;
819 mmc->f_min = 375000;
820 mmc->f_max = 25000000;
821 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
42431acb 822 mmc->caps = MMC_CAP_BYTEBLOCK;
65dbf343 823
fe4a3c7a 824 mmc->max_blk_size = 4095;
55db890a 825 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 826
65dbf343
AV
827 host = mmc_priv(mmc);
828 host->mmc = mmc;
829 host->buffer = NULL;
830 host->bus_mode = 0;
831 host->board = pdev->dev.platform_data;
832 if (host->board->wire4) {
833#ifdef SUPPORT_4WIRE
834 mmc->caps |= MMC_CAP_4_BIT_DATA;
835#else
f3a8efa9 836 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
65dbf343
AV
837#endif
838 }
839
840 /*
841 * Get Clock
842 */
3dd3b039
AV
843 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
844 if (IS_ERR(host->mci_clk)) {
65dbf343 845 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 846 mmc_free_host(mmc);
17ea0595 847 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
848 return -ENODEV;
849 }
65dbf343 850
17ea0595
AV
851 /*
852 * Map I/O region
853 */
854 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
855 if (!host->baseaddr) {
3dd3b039 856 clk_put(host->mci_clk);
17ea0595
AV
857 mmc_free_host(mmc);
858 release_mem_region(res->start, res->end - res->start + 1);
859 return -ENOMEM;
860 }
e0b19b83
AV
861
862 /*
863 * Reset hardware
864 */
3dd3b039 865 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
866 at91_mci_disable(host);
867 at91_mci_enable(host);
868
65dbf343
AV
869 /*
870 * Allocate the MCI interrupt
871 */
17ea0595
AV
872 host->irq = platform_get_irq(pdev, 0);
873 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 874 if (ret) {
f3a8efa9 875 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
3dd3b039
AV
876 clk_disable(host->mci_clk);
877 clk_put(host->mci_clk);
b44fb7a0 878 mmc_free_host(mmc);
17ea0595
AV
879 iounmap(host->baseaddr);
880 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
881 return ret;
882 }
883
884 platform_set_drvdata(pdev, mmc);
885
886 /*
887 * Add host to MMC layer
888 */
889 if (host->board->det_pin)
890 host->present = !at91_get_gpio_value(host->board->det_pin);
891 else
892 host->present = -1;
893
894 mmc_add_host(mmc);
895
896 /*
897 * monitor card insertion/removal if we can
898 */
899 if (host->board->det_pin) {
900 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 901 0, DRIVER_NAME, host);
65dbf343 902 if (ret)
f3a8efa9 903 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
65dbf343
AV
904 }
905
f3a8efa9 906 pr_debug("Added MCI driver\n");
65dbf343
AV
907
908 return 0;
909}
910
911/*
912 * Remove a device
913 */
a26b498c 914static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
915{
916 struct mmc_host *mmc = platform_get_drvdata(pdev);
917 struct at91mci_host *host;
17ea0595 918 struct resource *res;
65dbf343
AV
919
920 if (!mmc)
921 return -1;
922
923 host = mmc_priv(mmc);
924
925 if (host->present != -1) {
926 free_irq(host->board->det_pin, host);
927 cancel_delayed_work(&host->mmc->detect);
928 }
929
e0b19b83 930 at91_mci_disable(host);
17ea0595
AV
931 mmc_remove_host(mmc);
932 free_irq(host->irq, host);
65dbf343 933
3dd3b039
AV
934 clk_disable(host->mci_clk); /* Disable the peripheral clock */
935 clk_put(host->mci_clk);
65dbf343 936
17ea0595
AV
937 iounmap(host->baseaddr);
938 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 940
17ea0595
AV
941 mmc_free_host(mmc);
942 platform_set_drvdata(pdev, NULL);
b44fb7a0 943 pr_debug("MCI Removed\n");
65dbf343
AV
944
945 return 0;
946}
947
948#ifdef CONFIG_PM
949static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
950{
951 struct mmc_host *mmc = platform_get_drvdata(pdev);
952 int ret = 0;
953
954 if (mmc)
955 ret = mmc_suspend_host(mmc, state);
956
957 return ret;
958}
959
960static int at91_mci_resume(struct platform_device *pdev)
961{
962 struct mmc_host *mmc = platform_get_drvdata(pdev);
963 int ret = 0;
964
965 if (mmc)
966 ret = mmc_resume_host(mmc);
967
968 return ret;
969}
970#else
971#define at91_mci_suspend NULL
972#define at91_mci_resume NULL
973#endif
974
975static struct platform_driver at91_mci_driver = {
a26b498c 976 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
977 .suspend = at91_mci_suspend,
978 .resume = at91_mci_resume,
979 .driver = {
980 .name = DRIVER_NAME,
981 .owner = THIS_MODULE,
982 },
983};
984
985static int __init at91_mci_init(void)
986{
a26b498c 987 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
988}
989
990static void __exit at91_mci_exit(void)
991{
992 platform_driver_unregister(&at91_mci_driver);
993}
994
995module_init(at91_mci_init);
996module_exit(at91_mci_exit);
997
998MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
999MODULE_AUTHOR("Nick Randell");
1000MODULE_LICENSE("GPL");