mmc: remove BYTEBLOCK capability
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
99eeb8df 75#include <asm/arch/cpu.h>
65dbf343 76#include <asm/arch/gpio.h>
55d8baee 77#include <asm/arch/at91_mci.h>
65dbf343
AV
78
79#define DRIVER_NAME "at91_mci"
80
df05a303
AV
81#define FL_SENT_COMMAND (1 << 0)
82#define FL_SENT_STOP (1 << 1)
65dbf343 83
df05a303
AV
84#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
85 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 86 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 87
e0b19b83
AV
88#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
89#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 90
65dbf343
AV
91
92/*
93 * Low level type for this driver
94 */
95struct at91mci_host
96{
97 struct mmc_host *mmc;
98 struct mmc_command *cmd;
99 struct mmc_request *request;
100
e0b19b83 101 void __iomem *baseaddr;
17ea0595 102 int irq;
e0b19b83 103
65dbf343
AV
104 struct at91_mmc_data *board;
105 int present;
106
3dd3b039
AV
107 struct clk *mci_clk;
108
65dbf343
AV
109 /*
110 * Flag indicating when the command has been sent. This is used to
111 * work out whether or not to send the stop
112 */
113 unsigned int flags;
114 /* flag for current bus settings */
115 u32 bus_mode;
116
117 /* DMA buffer used for transmitting */
118 unsigned int* buffer;
119 dma_addr_t physical_address;
120 unsigned int total_length;
121
122 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
123 int in_use_index;
124
125 /* Latest in the scatterlist that has been enabled for transfer */
126 int transfer_index;
127};
128
129/*
130 * Copy from sg to a dma block - used for transfers
131 */
e8d04d3d 132static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
133{
134 unsigned int len, i, size;
135 unsigned *dmabuf = host->buffer;
136
137 size = host->total_length;
138 len = data->sg_len;
139
140 /*
141 * Just loop through all entries. Size might not
142 * be the entire list though so make sure that
143 * we do not transfer too much.
144 */
145 for (i = 0; i < len; i++) {
146 struct scatterlist *sg;
147 int amount;
65dbf343
AV
148 unsigned int *sgbuffer;
149
150 sg = &data->sg[i];
151
152 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
153 amount = min(size, sg->length);
154 size -= amount;
65dbf343 155
99eeb8df
AV
156 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
157 int index;
158
159 for (index = 0; index < (amount / 4); index++)
160 *dmabuf++ = swab32(sgbuffer[index]);
161 }
162 else
163 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
164
165 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
166
167 if (size == 0)
168 break;
169 }
170
171 /*
172 * Check that we didn't get a request to transfer
173 * more data than can fit into the SG list.
174 */
175 BUG_ON(size != 0);
176}
177
178/*
179 * Prepare a dma read
180 */
e8d04d3d 181static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
182{
183 int i;
184 struct scatterlist *sg;
185 struct mmc_command *cmd;
186 struct mmc_data *data;
187
b44fb7a0 188 pr_debug("pre dma read\n");
65dbf343
AV
189
190 cmd = host->cmd;
191 if (!cmd) {
b44fb7a0 192 pr_debug("no command\n");
65dbf343
AV
193 return;
194 }
195
196 data = cmd->data;
197 if (!data) {
b44fb7a0 198 pr_debug("no data\n");
65dbf343
AV
199 return;
200 }
201
202 for (i = 0; i < 2; i++) {
203 /* nothing left to transfer */
204 if (host->transfer_index >= data->sg_len) {
b44fb7a0 205 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
206 break;
207 }
208
209 /* Check to see if this needs filling */
210 if (i == 0) {
93a3ddc2 211 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 212 pr_debug("Transfer active in current\n");
65dbf343
AV
213 continue;
214 }
215 }
216 else {
93a3ddc2 217 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 218 pr_debug("Transfer active in next\n");
65dbf343
AV
219 continue;
220 }
221 }
222
223 /* Setup the next transfer */
b44fb7a0 224 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
225
226 sg = &data->sg[host->transfer_index++];
b44fb7a0 227 pr_debug("sg = %p\n", sg);
65dbf343
AV
228
229 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
230
b44fb7a0 231 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
232
233 if (i == 0) {
93a3ddc2
AV
234 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
235 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
236 }
237 else {
93a3ddc2
AV
238 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
239 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
240 }
241 }
242
b44fb7a0 243 pr_debug("pre dma read done\n");
65dbf343
AV
244}
245
246/*
247 * Handle after a dma read
248 */
e8d04d3d 249static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
250{
251 struct mmc_command *cmd;
252 struct mmc_data *data;
253
b44fb7a0 254 pr_debug("post dma read\n");
65dbf343
AV
255
256 cmd = host->cmd;
257 if (!cmd) {
b44fb7a0 258 pr_debug("no command\n");
65dbf343
AV
259 return;
260 }
261
262 data = cmd->data;
263 if (!data) {
b44fb7a0 264 pr_debug("no data\n");
65dbf343
AV
265 return;
266 }
267
268 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
269 struct scatterlist *sg;
270
b44fb7a0 271 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
272
273 sg = &data->sg[host->in_use_index++];
274
b44fb7a0 275 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
276
277 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
278
65dbf343
AV
279 data->bytes_xfered += sg->length;
280
99eeb8df 281 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 282 unsigned int *buffer;
99eeb8df 283 int index;
65dbf343 284
ed99c541
NF
285 /* Swap the contents of the buffer */
286 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
287 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
288
99eeb8df
AV
289 for (index = 0; index < (sg->length / 4); index++)
290 buffer[index] = swab32(buffer[index]);
ed99c541
NF
291
292 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 293 }
99eeb8df 294
65dbf343
AV
295 flush_dcache_page(sg->page);
296 }
297
298 /* Is there another transfer to trigger? */
299 if (host->transfer_index < data->sg_len)
e8d04d3d 300 at91_mci_pre_dma_read(host);
65dbf343 301 else {
ed99c541 302 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 303 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
304 }
305
b44fb7a0 306 pr_debug("post dma read done\n");
65dbf343
AV
307}
308
309/*
310 * Handle transmitted data
311 */
312static void at91_mci_handle_transmitted(struct at91mci_host *host)
313{
314 struct mmc_command *cmd;
315 struct mmc_data *data;
316
b44fb7a0 317 pr_debug("Handling the transmit\n");
65dbf343
AV
318
319 /* Disable the transfer */
93a3ddc2 320 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
321
322 /* Now wait for cmd ready */
e0b19b83 323 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
324
325 cmd = host->cmd;
326 if (!cmd) return;
327
328 data = cmd->data;
329 if (!data) return;
330
ed99c541
NF
331 if (cmd->data->flags & MMC_DATA_MULTI) {
332 pr_debug("multiple write : wait for BLKE...\n");
333 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
334 } else
335 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
336
65dbf343
AV
337 data->bytes_xfered = host->total_length;
338}
339
ed99c541
NF
340/*Handle after command sent ready*/
341static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
342{
343 if (!host->cmd)
344 return 1;
345 else if (!host->cmd->data) {
346 if (host->flags & FL_SENT_STOP) {
347 /*After multi block write, we must wait for NOTBUSY*/
348 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
349 } else return 1;
350 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
351 /*After sendding multi-block-write command, start DMA transfer*/
352 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
354 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
355 }
356
357 /* command not completed, have to wait */
358 return 0;
359}
360
361
65dbf343
AV
362/*
363 * Enable the controller
364 */
e0b19b83 365static void at91_mci_enable(struct at91mci_host *host)
65dbf343 366{
ed99c541
NF
367 unsigned int mr;
368
e0b19b83 369 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 370 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 371 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
372 mr = AT91_MCI_PDCMODE | 0x34a;
373
374 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
375 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
376
377 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
378
379 /* use Slot A or B (only one at same time) */
380 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
381}
382
383/*
384 * Disable the controller
385 */
e0b19b83 386static void at91_mci_disable(struct at91mci_host *host)
65dbf343 387{
e0b19b83 388 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
389}
390
391/*
392 * Send a command
65dbf343 393 */
ed99c541 394static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
395{
396 unsigned int cmdr, mr;
397 unsigned int block_length;
398 struct mmc_data *data = cmd->data;
399
400 unsigned int blocks;
401 unsigned int ier = 0;
402
403 host->cmd = cmd;
404
ed99c541 405 /* Needed for leaving busy state before CMD1 */
e0b19b83 406 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 407 pr_debug("Clearing timeout\n");
e0b19b83
AV
408 at91_mci_write(host, AT91_MCI_ARGR, 0);
409 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
410 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 411 /* spin */
e0b19b83 412 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
413 }
414 }
ed99c541 415
65dbf343
AV
416 cmdr = cmd->opcode;
417
418 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
419 cmdr |= AT91_MCI_RSPTYP_NONE;
420 else {
421 /* if a response is expected then allow maximum response latancy */
422 cmdr |= AT91_MCI_MAXLAT;
423 /* set 136 bit response for R2, 48 bit response otherwise */
424 if (mmc_resp_type(cmd) == MMC_RSP_R2)
425 cmdr |= AT91_MCI_RSPTYP_136;
426 else
427 cmdr |= AT91_MCI_RSPTYP_48;
428 }
429
430 if (data) {
a3fd4a1b 431 block_length = data->blksz;
65dbf343
AV
432 blocks = data->blocks;
433
434 /* always set data start - also set direction flag for read */
435 if (data->flags & MMC_DATA_READ)
436 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
437 else if (data->flags & MMC_DATA_WRITE)
438 cmdr |= AT91_MCI_TRCMD_START;
439
440 if (data->flags & MMC_DATA_STREAM)
441 cmdr |= AT91_MCI_TRTYP_STREAM;
442 if (data->flags & MMC_DATA_MULTI)
443 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
444 }
445 else {
446 block_length = 0;
447 blocks = 0;
448 }
449
b6cedb38 450 if (host->flags & FL_SENT_STOP)
65dbf343
AV
451 cmdr |= AT91_MCI_TRCMD_STOP;
452
453 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
454 cmdr |= AT91_MCI_OPDCMD;
455
456 /*
457 * Set the arguments and send the command
458 */
f3a8efa9 459 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 460 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
461
462 if (!data) {
93a3ddc2
AV
463 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
464 at91_mci_write(host, ATMEL_PDC_RPR, 0);
465 at91_mci_write(host, ATMEL_PDC_RCR, 0);
466 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
467 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
468 at91_mci_write(host, ATMEL_PDC_TPR, 0);
469 at91_mci_write(host, ATMEL_PDC_TCR, 0);
470 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
471 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
472 ier = AT91_MCI_CMDRDY;
473 } else {
474 /* zero block length and PDC mode */
475 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
476 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
e0b19b83 477
ed99c541
NF
478 /*
479 * Disable the PDC controller
480 */
481 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 482
ed99c541
NF
483 if (cmdr & AT91_MCI_TRCMD_START) {
484 data->bytes_xfered = 0;
485 host->transfer_index = 0;
486 host->in_use_index = 0;
487 if (cmdr & AT91_MCI_TRDIR) {
488 /*
489 * Handle a read
490 */
491 host->buffer = NULL;
492 host->total_length = 0;
493
494 at91_mci_pre_dma_read(host);
495 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
496 }
497 else {
498 /*
499 * Handle a write
500 */
501 host->total_length = block_length * blocks;
502 host->buffer = dma_alloc_coherent(NULL,
503 host->total_length,
504 &host->physical_address, GFP_KERNEL);
505
506 at91_mci_sg_to_dma(host, data);
507
508 pr_debug("Transmitting %d bytes\n", host->total_length);
509
510 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
511 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
512 ier = AT91_MCI_CMDRDY;
513 }
65dbf343
AV
514 }
515 }
516
517 /*
518 * Send the command and then enable the PDC - not the other way round as
519 * the data sheet says
520 */
521
e0b19b83
AV
522 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
523 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
524
525 if (cmdr & AT91_MCI_TRCMD_START) {
526 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 527 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 528 }
65dbf343 529
ed99c541 530 /* Enable selected interrupts */
df05a303 531 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
532}
533
534/*
535 * Process the next step in the request
536 */
e8d04d3d 537static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
538{
539 if (!(host->flags & FL_SENT_COMMAND)) {
540 host->flags |= FL_SENT_COMMAND;
ed99c541 541 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
542 }
543 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
544 host->flags |= FL_SENT_STOP;
ed99c541 545 at91_mci_send_command(host, host->request->stop);
65dbf343
AV
546 }
547 else
548 mmc_request_done(host->mmc, host->request);
549}
550
551/*
552 * Handle a command that has been completed
553 */
e8d04d3d 554static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
555{
556 struct mmc_command *cmd = host->cmd;
557 unsigned int status;
558
e0b19b83 559 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 560
e0b19b83
AV
561 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
562 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
563 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
564 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
565
566 if (host->buffer) {
567 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
568 host->buffer = NULL;
569 }
570
e0b19b83 571 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 572
b44fb7a0 573 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
574 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
575
576 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
577 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
578 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
b6cedb38 579 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 580 cmd->error = 0;
65dbf343
AV
581 }
582 else {
583 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
17b0429d 584 cmd->error = -ETIMEDOUT;
65dbf343 585 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
17b0429d 586 cmd->error = -EILSEQ;
65dbf343 587 else
17b0429d 588 cmd->error = -EIO;
65dbf343 589
b44fb7a0 590 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
591 cmd->error, cmd->opcode, cmd->retries);
592 }
593 }
594 else
17b0429d 595 cmd->error = 0;
65dbf343 596
e8d04d3d 597 at91_mci_process_next(host);
65dbf343
AV
598}
599
600/*
601 * Handle an MMC request
602 */
603static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
604{
605 struct at91mci_host *host = mmc_priv(mmc);
606 host->request = mrq;
607 host->flags = 0;
608
e8d04d3d 609 at91_mci_process_next(host);
65dbf343
AV
610}
611
612/*
613 * Set the IOS
614 */
615static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
616{
617 int clkdiv;
618 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 619 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 620
b44fb7a0 621 host->bus_mode = ios->bus_mode;
65dbf343
AV
622
623 if (ios->clock == 0) {
624 /* Disable the MCI controller */
e0b19b83 625 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
626 clkdiv = 0;
627 }
628 else {
629 /* Enable the MCI controller */
e0b19b83 630 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
631
632 if ((at91_master_clock % (ios->clock * 2)) == 0)
633 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
634 else
635 clkdiv = (at91_master_clock / ios->clock) / 2;
636
b44fb7a0 637 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
638 at91_master_clock / (2 * (clkdiv + 1)));
639 }
640 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 641 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 642 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
643 }
644 else {
b44fb7a0 645 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 646 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
647 }
648
649 /* Set the clock divider */
e0b19b83 650 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
651
652 /* maybe switch power to the card */
b44fb7a0 653 if (host->board->vcc_pin) {
65dbf343
AV
654 switch (ios->power_mode) {
655 case MMC_POWER_OFF:
99eeb8df 656 at91_set_gpio_value(host->board->vcc_pin, 0);
65dbf343
AV
657 break;
658 case MMC_POWER_UP:
659 case MMC_POWER_ON:
99eeb8df 660 at91_set_gpio_value(host->board->vcc_pin, 1);
65dbf343
AV
661 break;
662 }
663 }
664}
665
666/*
667 * Handle an interrupt
668 */
7d12e780 669static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
670{
671 struct at91mci_host *host = devid;
672 int completed = 0;
df05a303 673 unsigned int int_status, int_mask;
65dbf343 674
e0b19b83 675 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 676 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 677
f3a8efa9 678 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 679 int_status & int_mask);
37b758e8 680
df05a303
AV
681 int_status = int_status & int_mask;
682
683 if (int_status & AT91_MCI_ERRORS) {
65dbf343 684 completed = 1;
37b758e8 685
df05a303
AV
686 if (int_status & AT91_MCI_UNRE)
687 pr_debug("MMC: Underrun error\n");
688 if (int_status & AT91_MCI_OVRE)
689 pr_debug("MMC: Overrun error\n");
690 if (int_status & AT91_MCI_DTOE)
691 pr_debug("MMC: Data timeout\n");
692 if (int_status & AT91_MCI_DCRCE)
693 pr_debug("MMC: CRC error in data\n");
694 if (int_status & AT91_MCI_RTOE)
695 pr_debug("MMC: Response timeout\n");
696 if (int_status & AT91_MCI_RENDE)
697 pr_debug("MMC: Response end bit error\n");
698 if (int_status & AT91_MCI_RCRCE)
699 pr_debug("MMC: Response CRC error\n");
700 if (int_status & AT91_MCI_RDIRE)
701 pr_debug("MMC: Response direction error\n");
702 if (int_status & AT91_MCI_RINDE)
703 pr_debug("MMC: Response index error\n");
704 } else {
705 /* Only continue processing if no errors */
65dbf343 706
65dbf343 707 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 708 pr_debug("TX buffer empty\n");
65dbf343
AV
709 at91_mci_handle_transmitted(host);
710 }
711
ed99c541
NF
712 if (int_status & AT91_MCI_ENDRX) {
713 pr_debug("ENDRX\n");
714 at91_mci_post_dma_read(host);
715 }
716
65dbf343 717 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 718 pr_debug("RX buffer full\n");
ed99c541
NF
719 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
720 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
721 completed = 1;
65dbf343
AV
722 }
723
df05a303 724 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 725 pr_debug("Transmit has ended\n");
65dbf343 726
65dbf343 727 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 728 pr_debug("Card is ready\n");
ed99c541 729 completed = 1;
65dbf343
AV
730 }
731
df05a303 732 if (int_status & AT91_MCI_DTIP)
b44fb7a0 733 pr_debug("Data transfer in progress\n");
65dbf343 734
ed99c541 735 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 736 pr_debug("Block transfer has ended\n");
ed99c541
NF
737 completed = 1;
738 }
65dbf343 739
df05a303 740 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 741 pr_debug("Ready to transmit\n");
65dbf343 742
df05a303 743 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 744 pr_debug("Ready to receive\n");
65dbf343
AV
745
746 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 747 pr_debug("Command ready\n");
ed99c541 748 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
749 }
750 }
65dbf343
AV
751
752 if (completed) {
b44fb7a0 753 pr_debug("Completed command\n");
e0b19b83 754 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 755 at91_mci_completed_command(host);
df05a303
AV
756 } else
757 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
758
759 return IRQ_HANDLED;
760}
761
7d12e780 762static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
763{
764 struct at91mci_host *host = _host;
765 int present = !at91_get_gpio_value(irq);
766
767 /*
768 * we expect this irq on both insert and remove,
769 * and use a short delay to debounce.
770 */
771 if (present != host->present) {
772 host->present = present;
b44fb7a0 773 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
774 present ? "insert" : "remove");
775 if (!present) {
b44fb7a0 776 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 777 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
778 }
779 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
780 }
781 return IRQ_HANDLED;
782}
783
a26b498c 784static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343
AV
785{
786 int read_only = 0;
787 struct at91mci_host *host = mmc_priv(mmc);
788
789 if (host->board->wp_pin) {
790 read_only = at91_get_gpio_value(host->board->wp_pin);
791 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
792 (read_only ? "read-only" : "read-write") );
793 }
794 else {
795 printk(KERN_WARNING "%s: host does not support reading read-only "
796 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
797 }
798 return read_only;
799}
800
ab7aefd0 801static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
802 .request = at91_mci_request,
803 .set_ios = at91_mci_set_ios,
804 .get_ro = at91_mci_get_ro,
805};
806
807/*
808 * Probe for the device
809 */
a26b498c 810static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
811{
812 struct mmc_host *mmc;
813 struct at91mci_host *host;
17ea0595 814 struct resource *res;
65dbf343
AV
815 int ret;
816
b44fb7a0 817 pr_debug("Probe MCI devices\n");
65dbf343 818
17ea0595
AV
819 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
820 if (!res)
821 return -ENXIO;
822
823 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
824 return -EBUSY;
825
65dbf343
AV
826 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
827 if (!mmc) {
b44fb7a0 828 pr_debug("Failed to allocate mmc host\n");
17ea0595 829 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
830 return -ENOMEM;
831 }
832
833 mmc->ops = &at91_mci_ops;
834 mmc->f_min = 375000;
835 mmc->f_max = 25000000;
836 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
837
fe4a3c7a 838 mmc->max_blk_size = 4095;
55db890a 839 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 840
65dbf343
AV
841 host = mmc_priv(mmc);
842 host->mmc = mmc;
843 host->buffer = NULL;
844 host->bus_mode = 0;
845 host->board = pdev->dev.platform_data;
846 if (host->board->wire4) {
ed99c541
NF
847 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
848 mmc->caps |= MMC_CAP_4_BIT_DATA;
849 else
850 printk("AT91 MMC: 4 wire bus mode not supported"
851 " - using 1 wire\n");
65dbf343
AV
852 }
853
854 /*
855 * Get Clock
856 */
3dd3b039
AV
857 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
858 if (IS_ERR(host->mci_clk)) {
65dbf343 859 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 860 mmc_free_host(mmc);
17ea0595 861 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
862 return -ENODEV;
863 }
65dbf343 864
17ea0595
AV
865 /*
866 * Map I/O region
867 */
868 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
869 if (!host->baseaddr) {
3dd3b039 870 clk_put(host->mci_clk);
17ea0595
AV
871 mmc_free_host(mmc);
872 release_mem_region(res->start, res->end - res->start + 1);
873 return -ENOMEM;
874 }
e0b19b83
AV
875
876 /*
877 * Reset hardware
878 */
3dd3b039 879 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
880 at91_mci_disable(host);
881 at91_mci_enable(host);
882
65dbf343
AV
883 /*
884 * Allocate the MCI interrupt
885 */
17ea0595
AV
886 host->irq = platform_get_irq(pdev, 0);
887 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 888 if (ret) {
f3a8efa9 889 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
3dd3b039
AV
890 clk_disable(host->mci_clk);
891 clk_put(host->mci_clk);
b44fb7a0 892 mmc_free_host(mmc);
17ea0595
AV
893 iounmap(host->baseaddr);
894 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
895 return ret;
896 }
897
898 platform_set_drvdata(pdev, mmc);
899
900 /*
901 * Add host to MMC layer
902 */
63b66438 903 if (host->board->det_pin) {
65dbf343 904 host->present = !at91_get_gpio_value(host->board->det_pin);
63b66438
MP
905 device_init_wakeup(&pdev->dev, 1);
906 }
65dbf343
AV
907 else
908 host->present = -1;
909
910 mmc_add_host(mmc);
911
912 /*
913 * monitor card insertion/removal if we can
914 */
915 if (host->board->det_pin) {
916 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 917 0, DRIVER_NAME, host);
65dbf343 918 if (ret)
f3a8efa9 919 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
65dbf343
AV
920 }
921
f3a8efa9 922 pr_debug("Added MCI driver\n");
65dbf343
AV
923
924 return 0;
925}
926
927/*
928 * Remove a device
929 */
a26b498c 930static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
931{
932 struct mmc_host *mmc = platform_get_drvdata(pdev);
933 struct at91mci_host *host;
17ea0595 934 struct resource *res;
65dbf343
AV
935
936 if (!mmc)
937 return -1;
938
939 host = mmc_priv(mmc);
940
e0cda54e 941 if (host->board->det_pin) {
63b66438 942 device_init_wakeup(&pdev->dev, 0);
65dbf343
AV
943 free_irq(host->board->det_pin, host);
944 cancel_delayed_work(&host->mmc->detect);
945 }
946
e0b19b83 947 at91_mci_disable(host);
17ea0595
AV
948 mmc_remove_host(mmc);
949 free_irq(host->irq, host);
65dbf343 950
3dd3b039
AV
951 clk_disable(host->mci_clk); /* Disable the peripheral clock */
952 clk_put(host->mci_clk);
65dbf343 953
17ea0595
AV
954 iounmap(host->baseaddr);
955 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
956 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 957
17ea0595
AV
958 mmc_free_host(mmc);
959 platform_set_drvdata(pdev, NULL);
b44fb7a0 960 pr_debug("MCI Removed\n");
65dbf343
AV
961
962 return 0;
963}
964
965#ifdef CONFIG_PM
966static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
967{
968 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 969 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
970 int ret = 0;
971
e0cda54e 972 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
973 enable_irq_wake(host->board->det_pin);
974
65dbf343
AV
975 if (mmc)
976 ret = mmc_suspend_host(mmc, state);
977
978 return ret;
979}
980
981static int at91_mci_resume(struct platform_device *pdev)
982{
983 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 984 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
985 int ret = 0;
986
e0cda54e 987 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
988 disable_irq_wake(host->board->det_pin);
989
65dbf343
AV
990 if (mmc)
991 ret = mmc_resume_host(mmc);
992
993 return ret;
994}
995#else
996#define at91_mci_suspend NULL
997#define at91_mci_resume NULL
998#endif
999
1000static struct platform_driver at91_mci_driver = {
a26b498c 1001 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1002 .suspend = at91_mci_suspend,
1003 .resume = at91_mci_resume,
1004 .driver = {
1005 .name = DRIVER_NAME,
1006 .owner = THIS_MODULE,
1007 },
1008};
1009
1010static int __init at91_mci_init(void)
1011{
a26b498c 1012 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1013}
1014
1015static void __exit at91_mci_exit(void)
1016{
1017 platform_driver_unregister(&at91_mci_driver);
1018}
1019
1020module_init(at91_mci_init);
1021module_exit(at91_mci_exit);
1022
1023MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1024MODULE_AUTHOR("Nick Randell");
1025MODULE_LICENSE("GPL");