mmc: possible leak in mmc_read_ext_csd
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
99eeb8df 75#include <asm/arch/cpu.h>
65dbf343 76#include <asm/arch/gpio.h>
55d8baee 77#include <asm/arch/at91_mci.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
df05a303
AV
81#define FL_SENT_COMMAND (1 << 0)
82#define FL_SENT_STOP (1 << 1)
65dbf343 83
df05a303
AV
84#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 87
e0b19b83
AV
88#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 90
65dbf343
AV
91
92/*
93 * Low level type for this driver
94 */
95struct at91mci_host
96{
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
e0b19b83 101 void __iomem *baseaddr;
17ea0595 102 int irq;
e0b19b83 103
65dbf343
AV
104 struct at91_mmc_data *board;
105 int present;
106
3dd3b039
AV
107 struct clk *mci_clk;
108
65dbf343
AV
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127};
128
129/*
130 * Copy from sg to a dma block - used for transfers
131 */
e8d04d3d 132static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
133{
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
65dbf343
AV
148 unsigned int *sgbuffer;
149
150 sg = &data->sg[i];
151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length);
154 size -= amount;
65dbf343 155
99eeb8df
AV
156 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
157 int index;
158
159 for (index = 0; index < (amount / 4); index++)
160 *dmabuf++ = swab32(sgbuffer[index]);
161 }
162 else
163 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
164
165 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
166
167 if (size == 0)
168 break;
169 }
170
171 /*
172 * Check that we didn't get a request to transfer
173 * more data than can fit into the SG list.
174 */
175 BUG_ON(size != 0);
176}
177
178/*
179 * Prepare a dma read
180 */
e8d04d3d 181static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
182{
183 int i;
184 struct scatterlist *sg;
185 struct mmc_command *cmd;
186 struct mmc_data *data;
187
b44fb7a0 188 pr_debug("pre dma read\n");
65dbf343
AV
189
190 cmd = host->cmd;
191 if (!cmd) {
b44fb7a0 192 pr_debug("no command\n");
65dbf343
AV
193 return;
194 }
195
196 data = cmd->data;
197 if (!data) {
b44fb7a0 198 pr_debug("no data\n");
65dbf343
AV
199 return;
200 }
201
202 for (i = 0; i < 2; i++) {
203 /* nothing left to transfer */
204 if (host->transfer_index >= data->sg_len) {
b44fb7a0 205 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
206 break;
207 }
208
209 /* Check to see if this needs filling */
210 if (i == 0) {
93a3ddc2 211 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 212 pr_debug("Transfer active in current\n");
65dbf343
AV
213 continue;
214 }
215 }
216 else {
93a3ddc2 217 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 218 pr_debug("Transfer active in next\n");
65dbf343
AV
219 continue;
220 }
221 }
222
223 /* Setup the next transfer */
b44fb7a0 224 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
225
226 sg = &data->sg[host->transfer_index++];
b44fb7a0 227 pr_debug("sg = %p\n", sg);
65dbf343
AV
228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
230
b44fb7a0 231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
232
233 if (i == 0) {
93a3ddc2
AV
234 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
235 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
236 }
237 else {
93a3ddc2
AV
238 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
239 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
240 }
241 }
242
b44fb7a0 243 pr_debug("pre dma read done\n");
65dbf343
AV
244}
245
246/*
247 * Handle after a dma read
248 */
e8d04d3d 249static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
250{
251 struct mmc_command *cmd;
252 struct mmc_data *data;
253
b44fb7a0 254 pr_debug("post dma read\n");
65dbf343
AV
255
256 cmd = host->cmd;
257 if (!cmd) {
b44fb7a0 258 pr_debug("no command\n");
65dbf343
AV
259 return;
260 }
261
262 data = cmd->data;
263 if (!data) {
b44fb7a0 264 pr_debug("no data\n");
65dbf343
AV
265 return;
266 }
267
268 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
269 struct scatterlist *sg;
270
b44fb7a0 271 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
272
273 sg = &data->sg[host->in_use_index++];
274
b44fb7a0 275 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
65dbf343
AV
279 data->bytes_xfered += sg->length;
280
99eeb8df 281 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 282 unsigned int *buffer;
99eeb8df 283 int index;
65dbf343 284
ed99c541
NF
285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288
99eeb8df
AV
289 for (index = 0; index < (sg->length / 4); index++)
290 buffer[index] = swab32(buffer[index]);
ed99c541
NF
291
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 293 }
99eeb8df 294
65dbf343
AV
295 flush_dcache_page(sg->page);
296 }
297
298 /* Is there another transfer to trigger? */
299 if (host->transfer_index < data->sg_len)
e8d04d3d 300 at91_mci_pre_dma_read(host);
65dbf343 301 else {
ed99c541 302 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 303 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
304 }
305
b44fb7a0 306 pr_debug("post dma read done\n");
65dbf343
AV
307}
308
309/*
310 * Handle transmitted data
311 */
312static void at91_mci_handle_transmitted(struct at91mci_host *host)
313{
314 struct mmc_command *cmd;
315 struct mmc_data *data;
316
b44fb7a0 317 pr_debug("Handling the transmit\n");
65dbf343
AV
318
319 /* Disable the transfer */
93a3ddc2 320 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
321
322 /* Now wait for cmd ready */
e0b19b83 323 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
324
325 cmd = host->cmd;
326 if (!cmd) return;
327
328 data = cmd->data;
329 if (!data) return;
330
be0192aa 331 if (cmd->data->blocks > 1) {
ed99c541
NF
332 pr_debug("multiple write : wait for BLKE...\n");
333 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
334 } else
335 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
336
65dbf343
AV
337 data->bytes_xfered = host->total_length;
338}
339
ed99c541
NF
340/*Handle after command sent ready*/
341static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
342{
343 if (!host->cmd)
344 return 1;
345 else if (!host->cmd->data) {
346 if (host->flags & FL_SENT_STOP) {
347 /*After multi block write, we must wait for NOTBUSY*/
348 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
349 } else return 1;
350 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
351 /*After sendding multi-block-write command, start DMA transfer*/
352 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
355 }
356
357 /* command not completed, have to wait */
358 return 0;
359}
360
361
65dbf343
AV
362/*
363 * Enable the controller
364 */
e0b19b83 365static void at91_mci_enable(struct at91mci_host *host)
65dbf343 366{
ed99c541
NF
367 unsigned int mr;
368
e0b19b83 369 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 370 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 371 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
372 mr = AT91_MCI_PDCMODE | 0x34a;
373
374 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
375 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
376
377 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
378
379 /* use Slot A or B (only one at same time) */
380 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
381}
382
383/*
384 * Disable the controller
385 */
e0b19b83 386static void at91_mci_disable(struct at91mci_host *host)
65dbf343 387{
e0b19b83 388 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
389}
390
391/*
392 * Send a command
65dbf343 393 */
ed99c541 394static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
395{
396 unsigned int cmdr, mr;
397 unsigned int block_length;
398 struct mmc_data *data = cmd->data;
399
400 unsigned int blocks;
401 unsigned int ier = 0;
402
403 host->cmd = cmd;
404
ed99c541 405 /* Needed for leaving busy state before CMD1 */
e0b19b83 406 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 407 pr_debug("Clearing timeout\n");
e0b19b83
AV
408 at91_mci_write(host, AT91_MCI_ARGR, 0);
409 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
410 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 411 /* spin */
e0b19b83 412 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
413 }
414 }
ed99c541 415
65dbf343
AV
416 cmdr = cmd->opcode;
417
418 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
419 cmdr |= AT91_MCI_RSPTYP_NONE;
420 else {
421 /* if a response is expected then allow maximum response latancy */
422 cmdr |= AT91_MCI_MAXLAT;
423 /* set 136 bit response for R2, 48 bit response otherwise */
424 if (mmc_resp_type(cmd) == MMC_RSP_R2)
425 cmdr |= AT91_MCI_RSPTYP_136;
426 else
427 cmdr |= AT91_MCI_RSPTYP_48;
428 }
429
430 if (data) {
1d4de9ed
MP
431
432 if ( data->blksz & 0x3 ) {
433 pr_debug("Unsupported block size\n");
434 cmd->error = -EINVAL;
435 mmc_request_done(host->mmc, host->request);
436 return;
437 }
438
a3fd4a1b 439 block_length = data->blksz;
65dbf343
AV
440 blocks = data->blocks;
441
442 /* always set data start - also set direction flag for read */
443 if (data->flags & MMC_DATA_READ)
444 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
445 else if (data->flags & MMC_DATA_WRITE)
446 cmdr |= AT91_MCI_TRCMD_START;
447
448 if (data->flags & MMC_DATA_STREAM)
449 cmdr |= AT91_MCI_TRTYP_STREAM;
be0192aa 450 if (data->blocks > 1)
65dbf343
AV
451 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
452 }
453 else {
454 block_length = 0;
455 blocks = 0;
456 }
457
b6cedb38 458 if (host->flags & FL_SENT_STOP)
65dbf343
AV
459 cmdr |= AT91_MCI_TRCMD_STOP;
460
461 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
462 cmdr |= AT91_MCI_OPDCMD;
463
464 /*
465 * Set the arguments and send the command
466 */
f3a8efa9 467 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 468 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
469
470 if (!data) {
93a3ddc2
AV
471 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
472 at91_mci_write(host, ATMEL_PDC_RPR, 0);
473 at91_mci_write(host, ATMEL_PDC_RCR, 0);
474 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
475 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
476 at91_mci_write(host, ATMEL_PDC_TPR, 0);
477 at91_mci_write(host, ATMEL_PDC_TCR, 0);
478 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
479 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
480 ier = AT91_MCI_CMDRDY;
481 } else {
482 /* zero block length and PDC mode */
483 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
484 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
e0b19b83 485
ed99c541
NF
486 /*
487 * Disable the PDC controller
488 */
489 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 490
ed99c541
NF
491 if (cmdr & AT91_MCI_TRCMD_START) {
492 data->bytes_xfered = 0;
493 host->transfer_index = 0;
494 host->in_use_index = 0;
495 if (cmdr & AT91_MCI_TRDIR) {
496 /*
497 * Handle a read
498 */
499 host->buffer = NULL;
500 host->total_length = 0;
501
502 at91_mci_pre_dma_read(host);
503 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
504 }
505 else {
506 /*
507 * Handle a write
508 */
509 host->total_length = block_length * blocks;
510 host->buffer = dma_alloc_coherent(NULL,
511 host->total_length,
512 &host->physical_address, GFP_KERNEL);
513
514 at91_mci_sg_to_dma(host, data);
515
516 pr_debug("Transmitting %d bytes\n", host->total_length);
517
518 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
519 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
520 ier = AT91_MCI_CMDRDY;
521 }
65dbf343
AV
522 }
523 }
524
525 /*
526 * Send the command and then enable the PDC - not the other way round as
527 * the data sheet says
528 */
529
e0b19b83
AV
530 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
531 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
532
533 if (cmdr & AT91_MCI_TRCMD_START) {
534 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 535 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 536 }
65dbf343 537
ed99c541 538 /* Enable selected interrupts */
df05a303 539 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
540}
541
542/*
543 * Process the next step in the request
544 */
e8d04d3d 545static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
546{
547 if (!(host->flags & FL_SENT_COMMAND)) {
548 host->flags |= FL_SENT_COMMAND;
ed99c541 549 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
550 }
551 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
552 host->flags |= FL_SENT_STOP;
ed99c541 553 at91_mci_send_command(host, host->request->stop);
65dbf343
AV
554 }
555 else
556 mmc_request_done(host->mmc, host->request);
557}
558
559/*
560 * Handle a command that has been completed
561 */
e8d04d3d 562static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
563{
564 struct mmc_command *cmd = host->cmd;
565 unsigned int status;
566
e0b19b83 567 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 568
e0b19b83
AV
569 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
570 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
571 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
572 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
573
574 if (host->buffer) {
575 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
576 host->buffer = NULL;
577 }
578
e0b19b83 579 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 580
b44fb7a0 581 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
582 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
583
584 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
585 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
586 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
b6cedb38 587 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 588 cmd->error = 0;
65dbf343
AV
589 }
590 else {
591 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
17b0429d 592 cmd->error = -ETIMEDOUT;
65dbf343 593 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
17b0429d 594 cmd->error = -EILSEQ;
65dbf343 595 else
17b0429d 596 cmd->error = -EIO;
65dbf343 597
b44fb7a0 598 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
599 cmd->error, cmd->opcode, cmd->retries);
600 }
601 }
602 else
17b0429d 603 cmd->error = 0;
65dbf343 604
e8d04d3d 605 at91_mci_process_next(host);
65dbf343
AV
606}
607
608/*
609 * Handle an MMC request
610 */
611static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
612{
613 struct at91mci_host *host = mmc_priv(mmc);
614 host->request = mrq;
615 host->flags = 0;
616
e8d04d3d 617 at91_mci_process_next(host);
65dbf343
AV
618}
619
620/*
621 * Set the IOS
622 */
623static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
624{
625 int clkdiv;
626 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 627 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 628
b44fb7a0 629 host->bus_mode = ios->bus_mode;
65dbf343
AV
630
631 if (ios->clock == 0) {
632 /* Disable the MCI controller */
e0b19b83 633 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
634 clkdiv = 0;
635 }
636 else {
637 /* Enable the MCI controller */
e0b19b83 638 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
639
640 if ((at91_master_clock % (ios->clock * 2)) == 0)
641 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
642 else
643 clkdiv = (at91_master_clock / ios->clock) / 2;
644
b44fb7a0 645 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
646 at91_master_clock / (2 * (clkdiv + 1)));
647 }
648 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 649 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 650 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
651 }
652 else {
b44fb7a0 653 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 654 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
655 }
656
657 /* Set the clock divider */
e0b19b83 658 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
659
660 /* maybe switch power to the card */
b44fb7a0 661 if (host->board->vcc_pin) {
65dbf343
AV
662 switch (ios->power_mode) {
663 case MMC_POWER_OFF:
99eeb8df 664 at91_set_gpio_value(host->board->vcc_pin, 0);
65dbf343
AV
665 break;
666 case MMC_POWER_UP:
667 case MMC_POWER_ON:
99eeb8df 668 at91_set_gpio_value(host->board->vcc_pin, 1);
65dbf343
AV
669 break;
670 }
671 }
672}
673
674/*
675 * Handle an interrupt
676 */
7d12e780 677static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
678{
679 struct at91mci_host *host = devid;
680 int completed = 0;
df05a303 681 unsigned int int_status, int_mask;
65dbf343 682
e0b19b83 683 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 684 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 685
f3a8efa9 686 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 687 int_status & int_mask);
37b758e8 688
df05a303
AV
689 int_status = int_status & int_mask;
690
691 if (int_status & AT91_MCI_ERRORS) {
65dbf343 692 completed = 1;
37b758e8 693
df05a303
AV
694 if (int_status & AT91_MCI_UNRE)
695 pr_debug("MMC: Underrun error\n");
696 if (int_status & AT91_MCI_OVRE)
697 pr_debug("MMC: Overrun error\n");
698 if (int_status & AT91_MCI_DTOE)
699 pr_debug("MMC: Data timeout\n");
700 if (int_status & AT91_MCI_DCRCE)
701 pr_debug("MMC: CRC error in data\n");
702 if (int_status & AT91_MCI_RTOE)
703 pr_debug("MMC: Response timeout\n");
704 if (int_status & AT91_MCI_RENDE)
705 pr_debug("MMC: Response end bit error\n");
706 if (int_status & AT91_MCI_RCRCE)
707 pr_debug("MMC: Response CRC error\n");
708 if (int_status & AT91_MCI_RDIRE)
709 pr_debug("MMC: Response direction error\n");
710 if (int_status & AT91_MCI_RINDE)
711 pr_debug("MMC: Response index error\n");
712 } else {
713 /* Only continue processing if no errors */
65dbf343 714
65dbf343 715 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 716 pr_debug("TX buffer empty\n");
65dbf343
AV
717 at91_mci_handle_transmitted(host);
718 }
719
ed99c541
NF
720 if (int_status & AT91_MCI_ENDRX) {
721 pr_debug("ENDRX\n");
722 at91_mci_post_dma_read(host);
723 }
724
65dbf343 725 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 726 pr_debug("RX buffer full\n");
ed99c541
NF
727 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
728 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
729 completed = 1;
65dbf343
AV
730 }
731
df05a303 732 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 733 pr_debug("Transmit has ended\n");
65dbf343 734
65dbf343 735 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 736 pr_debug("Card is ready\n");
ed99c541 737 completed = 1;
65dbf343
AV
738 }
739
df05a303 740 if (int_status & AT91_MCI_DTIP)
b44fb7a0 741 pr_debug("Data transfer in progress\n");
65dbf343 742
ed99c541 743 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 744 pr_debug("Block transfer has ended\n");
ed99c541
NF
745 completed = 1;
746 }
65dbf343 747
df05a303 748 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 749 pr_debug("Ready to transmit\n");
65dbf343 750
df05a303 751 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 752 pr_debug("Ready to receive\n");
65dbf343
AV
753
754 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 755 pr_debug("Command ready\n");
ed99c541 756 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
757 }
758 }
65dbf343
AV
759
760 if (completed) {
b44fb7a0 761 pr_debug("Completed command\n");
e0b19b83 762 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 763 at91_mci_completed_command(host);
df05a303
AV
764 } else
765 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
766
767 return IRQ_HANDLED;
768}
769
7d12e780 770static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
771{
772 struct at91mci_host *host = _host;
773 int present = !at91_get_gpio_value(irq);
774
775 /*
776 * we expect this irq on both insert and remove,
777 * and use a short delay to debounce.
778 */
779 if (present != host->present) {
780 host->present = present;
b44fb7a0 781 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
782 present ? "insert" : "remove");
783 if (!present) {
b44fb7a0 784 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 785 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
786 }
787 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
788 }
789 return IRQ_HANDLED;
790}
791
a26b498c 792static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343
AV
793{
794 int read_only = 0;
795 struct at91mci_host *host = mmc_priv(mmc);
796
797 if (host->board->wp_pin) {
798 read_only = at91_get_gpio_value(host->board->wp_pin);
799 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
800 (read_only ? "read-only" : "read-write") );
801 }
802 else {
803 printk(KERN_WARNING "%s: host does not support reading read-only "
804 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
805 }
806 return read_only;
807}
808
ab7aefd0 809static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
810 .request = at91_mci_request,
811 .set_ios = at91_mci_set_ios,
812 .get_ro = at91_mci_get_ro,
813};
814
815/*
816 * Probe for the device
817 */
a26b498c 818static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
819{
820 struct mmc_host *mmc;
821 struct at91mci_host *host;
17ea0595 822 struct resource *res;
65dbf343
AV
823 int ret;
824
b44fb7a0 825 pr_debug("Probe MCI devices\n");
65dbf343 826
17ea0595
AV
827 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
828 if (!res)
829 return -ENXIO;
830
831 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
832 return -EBUSY;
833
65dbf343
AV
834 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
835 if (!mmc) {
b44fb7a0 836 pr_debug("Failed to allocate mmc host\n");
17ea0595 837 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
838 return -ENOMEM;
839 }
840
841 mmc->ops = &at91_mci_ops;
842 mmc->f_min = 375000;
843 mmc->f_max = 25000000;
844 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
845
fe4a3c7a 846 mmc->max_blk_size = 4095;
55db890a 847 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 848
65dbf343
AV
849 host = mmc_priv(mmc);
850 host->mmc = mmc;
851 host->buffer = NULL;
852 host->bus_mode = 0;
853 host->board = pdev->dev.platform_data;
854 if (host->board->wire4) {
ed99c541
NF
855 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
856 mmc->caps |= MMC_CAP_4_BIT_DATA;
857 else
858 printk("AT91 MMC: 4 wire bus mode not supported"
859 " - using 1 wire\n");
65dbf343
AV
860 }
861
862 /*
863 * Get Clock
864 */
3dd3b039
AV
865 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
866 if (IS_ERR(host->mci_clk)) {
65dbf343 867 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 868 mmc_free_host(mmc);
17ea0595 869 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
870 return -ENODEV;
871 }
65dbf343 872
17ea0595
AV
873 /*
874 * Map I/O region
875 */
876 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
877 if (!host->baseaddr) {
3dd3b039 878 clk_put(host->mci_clk);
17ea0595
AV
879 mmc_free_host(mmc);
880 release_mem_region(res->start, res->end - res->start + 1);
881 return -ENOMEM;
882 }
e0b19b83
AV
883
884 /*
885 * Reset hardware
886 */
3dd3b039 887 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
888 at91_mci_disable(host);
889 at91_mci_enable(host);
890
65dbf343
AV
891 /*
892 * Allocate the MCI interrupt
893 */
17ea0595
AV
894 host->irq = platform_get_irq(pdev, 0);
895 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 896 if (ret) {
f3a8efa9 897 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
3dd3b039
AV
898 clk_disable(host->mci_clk);
899 clk_put(host->mci_clk);
b44fb7a0 900 mmc_free_host(mmc);
17ea0595
AV
901 iounmap(host->baseaddr);
902 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
903 return ret;
904 }
905
906 platform_set_drvdata(pdev, mmc);
907
908 /*
909 * Add host to MMC layer
910 */
63b66438 911 if (host->board->det_pin) {
65dbf343 912 host->present = !at91_get_gpio_value(host->board->det_pin);
63b66438
MP
913 device_init_wakeup(&pdev->dev, 1);
914 }
65dbf343
AV
915 else
916 host->present = -1;
917
918 mmc_add_host(mmc);
919
920 /*
921 * monitor card insertion/removal if we can
922 */
923 if (host->board->det_pin) {
924 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 925 0, DRIVER_NAME, host);
65dbf343 926 if (ret)
f3a8efa9 927 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
65dbf343
AV
928 }
929
f3a8efa9 930 pr_debug("Added MCI driver\n");
65dbf343
AV
931
932 return 0;
933}
934
935/*
936 * Remove a device
937 */
a26b498c 938static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
939{
940 struct mmc_host *mmc = platform_get_drvdata(pdev);
941 struct at91mci_host *host;
17ea0595 942 struct resource *res;
65dbf343
AV
943
944 if (!mmc)
945 return -1;
946
947 host = mmc_priv(mmc);
948
e0cda54e 949 if (host->board->det_pin) {
63b66438 950 device_init_wakeup(&pdev->dev, 0);
65dbf343
AV
951 free_irq(host->board->det_pin, host);
952 cancel_delayed_work(&host->mmc->detect);
953 }
954
e0b19b83 955 at91_mci_disable(host);
17ea0595
AV
956 mmc_remove_host(mmc);
957 free_irq(host->irq, host);
65dbf343 958
3dd3b039
AV
959 clk_disable(host->mci_clk); /* Disable the peripheral clock */
960 clk_put(host->mci_clk);
65dbf343 961
17ea0595
AV
962 iounmap(host->baseaddr);
963 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
964 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 965
17ea0595
AV
966 mmc_free_host(mmc);
967 platform_set_drvdata(pdev, NULL);
b44fb7a0 968 pr_debug("MCI Removed\n");
65dbf343
AV
969
970 return 0;
971}
972
973#ifdef CONFIG_PM
974static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
975{
976 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 977 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
978 int ret = 0;
979
e0cda54e 980 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
981 enable_irq_wake(host->board->det_pin);
982
65dbf343
AV
983 if (mmc)
984 ret = mmc_suspend_host(mmc, state);
985
986 return ret;
987}
988
989static int at91_mci_resume(struct platform_device *pdev)
990{
991 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 992 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
993 int ret = 0;
994
e0cda54e 995 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
996 disable_irq_wake(host->board->det_pin);
997
65dbf343
AV
998 if (mmc)
999 ret = mmc_resume_host(mmc);
1000
1001 return ret;
1002}
1003#else
1004#define at91_mci_suspend NULL
1005#define at91_mci_resume NULL
1006#endif
1007
1008static struct platform_driver at91_mci_driver = {
a26b498c 1009 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1010 .suspend = at91_mci_suspend,
1011 .resume = at91_mci_resume,
1012 .driver = {
1013 .name = DRIVER_NAME,
1014 .owner = THIS_MODULE,
1015 },
1016};
1017
1018static int __init at91_mci_init(void)
1019{
a26b498c 1020 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1021}
1022
1023static void __exit at91_mci_exit(void)
1024{
1025 platform_driver_unregister(&at91_mci_driver);
1026}
1027
1028module_init(at91_mci_init);
1029module_exit(at91_mci_exit);
1030
1031MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1032MODULE_AUTHOR("Nick Randell");
1033MODULE_LICENSE("GPL");