MMC: Trivial comment cleanup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
6e996ee8
DB
73#include <asm/gpio.h>
74
65dbf343
AV
75#include <asm/mach/mmc.h>
76#include <asm/arch/board.h>
99eeb8df 77#include <asm/arch/cpu.h>
55d8baee 78#include <asm/arch/at91_mci.h>
65dbf343
AV
79
80#define DRIVER_NAME "at91_mci"
81
df05a303
AV
82#define FL_SENT_COMMAND (1 << 0)
83#define FL_SENT_STOP (1 << 1)
65dbf343 84
df05a303
AV
85#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 88
e0b19b83
AV
89#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 91
65dbf343
AV
92
93/*
94 * Low level type for this driver
95 */
96struct at91mci_host
97{
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
e0b19b83 102 void __iomem *baseaddr;
17ea0595 103 int irq;
e0b19b83 104
65dbf343
AV
105 struct at91_mmc_data *board;
106 int present;
107
3dd3b039
AV
108 struct clk *mci_clk;
109
65dbf343
AV
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
128};
129
130/*
131 * Copy from sg to a dma block - used for transfers
132 */
e8d04d3d 133static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
134{
135 unsigned int len, i, size;
136 unsigned *dmabuf = host->buffer;
137
138 size = host->total_length;
139 len = data->sg_len;
140
141 /*
142 * Just loop through all entries. Size might not
143 * be the entire list though so make sure that
144 * we do not transfer too much.
145 */
146 for (i = 0; i < len; i++) {
147 struct scatterlist *sg;
148 int amount;
65dbf343
AV
149 unsigned int *sgbuffer;
150
151 sg = &data->sg[i];
152
45711f1a 153 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
65dbf343
AV
154 amount = min(size, sg->length);
155 size -= amount;
65dbf343 156
99eeb8df
AV
157 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
158 int index;
159
160 for (index = 0; index < (amount / 4); index++)
161 *dmabuf++ = swab32(sgbuffer[index]);
162 }
163 else
164 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
165
166 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
167
168 if (size == 0)
169 break;
170 }
171
172 /*
173 * Check that we didn't get a request to transfer
174 * more data than can fit into the SG list.
175 */
176 BUG_ON(size != 0);
177}
178
179/*
180 * Prepare a dma read
181 */
e8d04d3d 182static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
183{
184 int i;
185 struct scatterlist *sg;
186 struct mmc_command *cmd;
187 struct mmc_data *data;
188
b44fb7a0 189 pr_debug("pre dma read\n");
65dbf343
AV
190
191 cmd = host->cmd;
192 if (!cmd) {
b44fb7a0 193 pr_debug("no command\n");
65dbf343
AV
194 return;
195 }
196
197 data = cmd->data;
198 if (!data) {
b44fb7a0 199 pr_debug("no data\n");
65dbf343
AV
200 return;
201 }
202
203 for (i = 0; i < 2; i++) {
204 /* nothing left to transfer */
205 if (host->transfer_index >= data->sg_len) {
b44fb7a0 206 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
207 break;
208 }
209
210 /* Check to see if this needs filling */
211 if (i == 0) {
93a3ddc2 212 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 213 pr_debug("Transfer active in current\n");
65dbf343
AV
214 continue;
215 }
216 }
217 else {
93a3ddc2 218 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 219 pr_debug("Transfer active in next\n");
65dbf343
AV
220 continue;
221 }
222 }
223
224 /* Setup the next transfer */
b44fb7a0 225 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
226
227 sg = &data->sg[host->transfer_index++];
b44fb7a0 228 pr_debug("sg = %p\n", sg);
65dbf343 229
45711f1a 230 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
65dbf343 231
b44fb7a0 232 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
233
234 if (i == 0) {
93a3ddc2
AV
235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
236 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
237 }
238 else {
93a3ddc2
AV
239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
240 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
241 }
242 }
243
b44fb7a0 244 pr_debug("pre dma read done\n");
65dbf343
AV
245}
246
247/*
248 * Handle after a dma read
249 */
e8d04d3d 250static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
251{
252 struct mmc_command *cmd;
253 struct mmc_data *data;
254
b44fb7a0 255 pr_debug("post dma read\n");
65dbf343
AV
256
257 cmd = host->cmd;
258 if (!cmd) {
b44fb7a0 259 pr_debug("no command\n");
65dbf343
AV
260 return;
261 }
262
263 data = cmd->data;
264 if (!data) {
b44fb7a0 265 pr_debug("no data\n");
65dbf343
AV
266 return;
267 }
268
269 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
270 struct scatterlist *sg;
271
b44fb7a0 272 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
273
274 sg = &data->sg[host->in_use_index++];
275
b44fb7a0 276 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
277
278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
279
65dbf343
AV
280 data->bytes_xfered += sg->length;
281
99eeb8df 282 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 283 unsigned int *buffer;
99eeb8df 284 int index;
65dbf343 285
ed99c541 286 /* Swap the contents of the buffer */
45711f1a 287 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
ed99c541
NF
288 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
289
99eeb8df
AV
290 for (index = 0; index < (sg->length / 4); index++)
291 buffer[index] = swab32(buffer[index]);
ed99c541
NF
292
293 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 294 }
99eeb8df 295
45711f1a 296 flush_dcache_page(sg_page(sg));
65dbf343
AV
297 }
298
299 /* Is there another transfer to trigger? */
300 if (host->transfer_index < data->sg_len)
e8d04d3d 301 at91_mci_pre_dma_read(host);
65dbf343 302 else {
ed99c541 303 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
305 }
306
b44fb7a0 307 pr_debug("post dma read done\n");
65dbf343
AV
308}
309
310/*
311 * Handle transmitted data
312 */
313static void at91_mci_handle_transmitted(struct at91mci_host *host)
314{
315 struct mmc_command *cmd;
316 struct mmc_data *data;
317
b44fb7a0 318 pr_debug("Handling the transmit\n");
65dbf343
AV
319
320 /* Disable the transfer */
93a3ddc2 321 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
322
323 /* Now wait for cmd ready */
e0b19b83 324 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
325
326 cmd = host->cmd;
327 if (!cmd) return;
328
329 data = cmd->data;
330 if (!data) return;
331
be0192aa 332 if (cmd->data->blocks > 1) {
ed99c541
NF
333 pr_debug("multiple write : wait for BLKE...\n");
334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
335 } else
336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
337
65dbf343
AV
338 data->bytes_xfered = host->total_length;
339}
340
ed99c541
NF
341/*Handle after command sent ready*/
342static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
343{
344 if (!host->cmd)
345 return 1;
346 else if (!host->cmd->data) {
347 if (host->flags & FL_SENT_STOP) {
348 /*After multi block write, we must wait for NOTBUSY*/
349 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
350 } else return 1;
351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
352 /*After sendding multi-block-write command, start DMA transfer*/
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
354 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
355 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
356 }
357
358 /* command not completed, have to wait */
359 return 0;
360}
361
362
65dbf343
AV
363/*
364 * Enable the controller
365 */
e0b19b83 366static void at91_mci_enable(struct at91mci_host *host)
65dbf343 367{
ed99c541
NF
368 unsigned int mr;
369
e0b19b83 370 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 371 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 372 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
373 mr = AT91_MCI_PDCMODE | 0x34a;
374
375 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
376 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
377
378 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
379
380 /* use Slot A or B (only one at same time) */
381 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
382}
383
384/*
385 * Disable the controller
386 */
e0b19b83 387static void at91_mci_disable(struct at91mci_host *host)
65dbf343 388{
e0b19b83 389 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
390}
391
392/*
393 * Send a command
65dbf343 394 */
ed99c541 395static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
396{
397 unsigned int cmdr, mr;
398 unsigned int block_length;
399 struct mmc_data *data = cmd->data;
400
401 unsigned int blocks;
402 unsigned int ier = 0;
403
404 host->cmd = cmd;
405
ed99c541 406 /* Needed for leaving busy state before CMD1 */
e0b19b83 407 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 408 pr_debug("Clearing timeout\n");
e0b19b83
AV
409 at91_mci_write(host, AT91_MCI_ARGR, 0);
410 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
411 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 412 /* spin */
e0b19b83 413 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
414 }
415 }
ed99c541 416
65dbf343
AV
417 cmdr = cmd->opcode;
418
419 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
420 cmdr |= AT91_MCI_RSPTYP_NONE;
421 else {
422 /* if a response is expected then allow maximum response latancy */
423 cmdr |= AT91_MCI_MAXLAT;
424 /* set 136 bit response for R2, 48 bit response otherwise */
425 if (mmc_resp_type(cmd) == MMC_RSP_R2)
426 cmdr |= AT91_MCI_RSPTYP_136;
427 else
428 cmdr |= AT91_MCI_RSPTYP_48;
429 }
430
431 if (data) {
1d4de9ed
MP
432
433 if ( data->blksz & 0x3 ) {
434 pr_debug("Unsupported block size\n");
435 cmd->error = -EINVAL;
436 mmc_request_done(host->mmc, host->request);
437 return;
438 }
439
a3fd4a1b 440 block_length = data->blksz;
65dbf343
AV
441 blocks = data->blocks;
442
443 /* always set data start - also set direction flag for read */
444 if (data->flags & MMC_DATA_READ)
445 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
446 else if (data->flags & MMC_DATA_WRITE)
447 cmdr |= AT91_MCI_TRCMD_START;
448
449 if (data->flags & MMC_DATA_STREAM)
450 cmdr |= AT91_MCI_TRTYP_STREAM;
be0192aa 451 if (data->blocks > 1)
65dbf343
AV
452 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
453 }
454 else {
455 block_length = 0;
456 blocks = 0;
457 }
458
b6cedb38 459 if (host->flags & FL_SENT_STOP)
65dbf343
AV
460 cmdr |= AT91_MCI_TRCMD_STOP;
461
462 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
463 cmdr |= AT91_MCI_OPDCMD;
464
465 /*
466 * Set the arguments and send the command
467 */
f3a8efa9 468 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 469 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
470
471 if (!data) {
93a3ddc2
AV
472 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
473 at91_mci_write(host, ATMEL_PDC_RPR, 0);
474 at91_mci_write(host, ATMEL_PDC_RCR, 0);
475 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
476 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
477 at91_mci_write(host, ATMEL_PDC_TPR, 0);
478 at91_mci_write(host, ATMEL_PDC_TCR, 0);
479 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
480 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
481 ier = AT91_MCI_CMDRDY;
482 } else {
483 /* zero block length and PDC mode */
484 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
485 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
e0b19b83 486
ed99c541
NF
487 /*
488 * Disable the PDC controller
489 */
490 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 491
ed99c541
NF
492 if (cmdr & AT91_MCI_TRCMD_START) {
493 data->bytes_xfered = 0;
494 host->transfer_index = 0;
495 host->in_use_index = 0;
496 if (cmdr & AT91_MCI_TRDIR) {
497 /*
498 * Handle a read
499 */
500 host->buffer = NULL;
501 host->total_length = 0;
502
503 at91_mci_pre_dma_read(host);
504 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
505 }
506 else {
507 /*
508 * Handle a write
509 */
510 host->total_length = block_length * blocks;
511 host->buffer = dma_alloc_coherent(NULL,
512 host->total_length,
513 &host->physical_address, GFP_KERNEL);
514
515 at91_mci_sg_to_dma(host, data);
516
517 pr_debug("Transmitting %d bytes\n", host->total_length);
518
519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
520 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
521 ier = AT91_MCI_CMDRDY;
522 }
65dbf343
AV
523 }
524 }
525
526 /*
527 * Send the command and then enable the PDC - not the other way round as
528 * the data sheet says
529 */
530
e0b19b83
AV
531 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
532 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
533
534 if (cmdr & AT91_MCI_TRCMD_START) {
535 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 536 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 537 }
65dbf343 538
ed99c541 539 /* Enable selected interrupts */
df05a303 540 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
541}
542
543/*
544 * Process the next step in the request
545 */
e8d04d3d 546static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
547{
548 if (!(host->flags & FL_SENT_COMMAND)) {
549 host->flags |= FL_SENT_COMMAND;
ed99c541 550 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
551 }
552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
553 host->flags |= FL_SENT_STOP;
ed99c541 554 at91_mci_send_command(host, host->request->stop);
65dbf343
AV
555 }
556 else
557 mmc_request_done(host->mmc, host->request);
558}
559
560/*
561 * Handle a command that has been completed
562 */
e8d04d3d 563static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
564{
565 struct mmc_command *cmd = host->cmd;
566 unsigned int status;
567
e0b19b83 568 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 569
e0b19b83
AV
570 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
571 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
572 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
573 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
574
575 if (host->buffer) {
576 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
577 host->buffer = NULL;
578 }
579
e0b19b83 580 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 581
b44fb7a0 582 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
583 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
584
9e3866b5 585 if (status & AT91_MCI_ERRORS) {
b6cedb38 586 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 587 cmd->error = 0;
65dbf343
AV
588 }
589 else {
590 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
17b0429d 591 cmd->error = -ETIMEDOUT;
65dbf343 592 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
17b0429d 593 cmd->error = -EILSEQ;
65dbf343 594 else
17b0429d 595 cmd->error = -EIO;
65dbf343 596
b44fb7a0 597 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
598 cmd->error, cmd->opcode, cmd->retries);
599 }
600 }
601 else
17b0429d 602 cmd->error = 0;
65dbf343 603
e8d04d3d 604 at91_mci_process_next(host);
65dbf343
AV
605}
606
607/*
608 * Handle an MMC request
609 */
610static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
611{
612 struct at91mci_host *host = mmc_priv(mmc);
613 host->request = mrq;
614 host->flags = 0;
615
e8d04d3d 616 at91_mci_process_next(host);
65dbf343
AV
617}
618
619/*
620 * Set the IOS
621 */
622static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
623{
624 int clkdiv;
625 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 626 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 627
b44fb7a0 628 host->bus_mode = ios->bus_mode;
65dbf343
AV
629
630 if (ios->clock == 0) {
631 /* Disable the MCI controller */
e0b19b83 632 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
633 clkdiv = 0;
634 }
635 else {
636 /* Enable the MCI controller */
e0b19b83 637 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
638
639 if ((at91_master_clock % (ios->clock * 2)) == 0)
640 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
641 else
642 clkdiv = (at91_master_clock / ios->clock) / 2;
643
b44fb7a0 644 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
645 at91_master_clock / (2 * (clkdiv + 1)));
646 }
647 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 648 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 649 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
650 }
651 else {
b44fb7a0 652 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 653 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
654 }
655
656 /* Set the clock divider */
e0b19b83 657 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
658
659 /* maybe switch power to the card */
b44fb7a0 660 if (host->board->vcc_pin) {
65dbf343
AV
661 switch (ios->power_mode) {
662 case MMC_POWER_OFF:
6e996ee8 663 gpio_set_value(host->board->vcc_pin, 0);
65dbf343
AV
664 break;
665 case MMC_POWER_UP:
6e996ee8 666 gpio_set_value(host->board->vcc_pin, 1);
65dbf343 667 break;
e5c0ef90
MP
668 case MMC_POWER_ON:
669 break;
670 default:
671 WARN_ON(1);
65dbf343
AV
672 }
673 }
674}
675
676/*
677 * Handle an interrupt
678 */
7d12e780 679static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
680{
681 struct at91mci_host *host = devid;
682 int completed = 0;
df05a303 683 unsigned int int_status, int_mask;
65dbf343 684
e0b19b83 685 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 686 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 687
f3a8efa9 688 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 689 int_status & int_mask);
37b758e8 690
df05a303
AV
691 int_status = int_status & int_mask;
692
693 if (int_status & AT91_MCI_ERRORS) {
65dbf343 694 completed = 1;
37b758e8 695
df05a303
AV
696 if (int_status & AT91_MCI_UNRE)
697 pr_debug("MMC: Underrun error\n");
698 if (int_status & AT91_MCI_OVRE)
699 pr_debug("MMC: Overrun error\n");
700 if (int_status & AT91_MCI_DTOE)
701 pr_debug("MMC: Data timeout\n");
702 if (int_status & AT91_MCI_DCRCE)
703 pr_debug("MMC: CRC error in data\n");
704 if (int_status & AT91_MCI_RTOE)
705 pr_debug("MMC: Response timeout\n");
706 if (int_status & AT91_MCI_RENDE)
707 pr_debug("MMC: Response end bit error\n");
708 if (int_status & AT91_MCI_RCRCE)
709 pr_debug("MMC: Response CRC error\n");
710 if (int_status & AT91_MCI_RDIRE)
711 pr_debug("MMC: Response direction error\n");
712 if (int_status & AT91_MCI_RINDE)
713 pr_debug("MMC: Response index error\n");
714 } else {
715 /* Only continue processing if no errors */
65dbf343 716
65dbf343 717 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 718 pr_debug("TX buffer empty\n");
65dbf343
AV
719 at91_mci_handle_transmitted(host);
720 }
721
ed99c541
NF
722 if (int_status & AT91_MCI_ENDRX) {
723 pr_debug("ENDRX\n");
724 at91_mci_post_dma_read(host);
725 }
726
65dbf343 727 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 728 pr_debug("RX buffer full\n");
ed99c541
NF
729 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
730 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
731 completed = 1;
65dbf343
AV
732 }
733
df05a303 734 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 735 pr_debug("Transmit has ended\n");
65dbf343 736
65dbf343 737 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 738 pr_debug("Card is ready\n");
ed99c541 739 completed = 1;
65dbf343
AV
740 }
741
df05a303 742 if (int_status & AT91_MCI_DTIP)
b44fb7a0 743 pr_debug("Data transfer in progress\n");
65dbf343 744
ed99c541 745 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 746 pr_debug("Block transfer has ended\n");
ed99c541
NF
747 completed = 1;
748 }
65dbf343 749
df05a303 750 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 751 pr_debug("Ready to transmit\n");
65dbf343 752
df05a303 753 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 754 pr_debug("Ready to receive\n");
65dbf343
AV
755
756 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 757 pr_debug("Command ready\n");
ed99c541 758 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
759 }
760 }
65dbf343
AV
761
762 if (completed) {
b44fb7a0 763 pr_debug("Completed command\n");
e0b19b83 764 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 765 at91_mci_completed_command(host);
df05a303
AV
766 } else
767 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
768
769 return IRQ_HANDLED;
770}
771
7d12e780 772static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
773{
774 struct at91mci_host *host = _host;
6e996ee8 775 int present = !gpio_get_value(irq_to_gpio(irq));
65dbf343
AV
776
777 /*
778 * we expect this irq on both insert and remove,
779 * and use a short delay to debounce.
780 */
781 if (present != host->present) {
782 host->present = present;
b44fb7a0 783 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
784 present ? "insert" : "remove");
785 if (!present) {
b44fb7a0 786 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 787 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
788 }
789 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
790 }
791 return IRQ_HANDLED;
792}
793
a26b498c 794static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343 795{
65dbf343
AV
796 struct at91mci_host *host = mmc_priv(mmc);
797
08f80bb5
AV
798 if (host->board->wp_pin)
799 return !!gpio_get_value(host->board->wp_pin);
800 /*
801 * Board doesn't support read only detection; let the mmc core
802 * decide what to do.
803 */
804 return -ENOSYS;
65dbf343
AV
805}
806
ab7aefd0 807static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
808 .request = at91_mci_request,
809 .set_ios = at91_mci_set_ios,
810 .get_ro = at91_mci_get_ro,
811};
812
813/*
814 * Probe for the device
815 */
a26b498c 816static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
817{
818 struct mmc_host *mmc;
819 struct at91mci_host *host;
17ea0595 820 struct resource *res;
65dbf343
AV
821 int ret;
822
17ea0595
AV
823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
824 if (!res)
825 return -ENXIO;
826
827 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
828 return -EBUSY;
829
65dbf343
AV
830 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
831 if (!mmc) {
6e996ee8
DB
832 ret = -ENOMEM;
833 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
834 goto fail6;
65dbf343
AV
835 }
836
837 mmc->ops = &at91_mci_ops;
838 mmc->f_min = 375000;
839 mmc->f_max = 25000000;
840 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
841
fe4a3c7a 842 mmc->max_blk_size = 4095;
55db890a 843 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 844
65dbf343
AV
845 host = mmc_priv(mmc);
846 host->mmc = mmc;
847 host->buffer = NULL;
848 host->bus_mode = 0;
849 host->board = pdev->dev.platform_data;
850 if (host->board->wire4) {
ed99c541
NF
851 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
852 mmc->caps |= MMC_CAP_4_BIT_DATA;
853 else
6e996ee8 854 dev_warn(&pdev->dev, "4 wire bus mode not supported"
ed99c541 855 " - using 1 wire\n");
65dbf343
AV
856 }
857
6e996ee8
DB
858 /*
859 * Reserve GPIOs ... board init code makes sure these pins are set
860 * up as GPIOs with the right direction (input, except for vcc)
861 */
862 if (host->board->det_pin) {
863 ret = gpio_request(host->board->det_pin, "mmc_detect");
864 if (ret < 0) {
865 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
866 goto fail5;
867 }
868 }
869 if (host->board->wp_pin) {
870 ret = gpio_request(host->board->wp_pin, "mmc_wp");
871 if (ret < 0) {
872 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
873 goto fail4;
874 }
875 }
876 if (host->board->vcc_pin) {
877 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
878 if (ret < 0) {
879 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
880 goto fail3;
881 }
882 }
883
65dbf343
AV
884 /*
885 * Get Clock
886 */
3dd3b039
AV
887 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
888 if (IS_ERR(host->mci_clk)) {
6e996ee8
DB
889 ret = -ENODEV;
890 dev_dbg(&pdev->dev, "no mci_clk?\n");
891 goto fail2;
65dbf343 892 }
65dbf343 893
17ea0595
AV
894 /*
895 * Map I/O region
896 */
897 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
898 if (!host->baseaddr) {
6e996ee8
DB
899 ret = -ENOMEM;
900 goto fail1;
17ea0595 901 }
e0b19b83
AV
902
903 /*
904 * Reset hardware
905 */
3dd3b039 906 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
907 at91_mci_disable(host);
908 at91_mci_enable(host);
909
65dbf343
AV
910 /*
911 * Allocate the MCI interrupt
912 */
17ea0595 913 host->irq = platform_get_irq(pdev, 0);
6e996ee8
DB
914 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
915 mmc_hostname(mmc), host);
65dbf343 916 if (ret) {
6e996ee8
DB
917 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
918 goto fail0;
65dbf343
AV
919 }
920
921 platform_set_drvdata(pdev, mmc);
922
923 /*
924 * Add host to MMC layer
925 */
63b66438 926 if (host->board->det_pin) {
6e996ee8 927 host->present = !gpio_get_value(host->board->det_pin);
63b66438 928 }
65dbf343
AV
929 else
930 host->present = -1;
931
932 mmc_add_host(mmc);
933
934 /*
935 * monitor card insertion/removal if we can
936 */
937 if (host->board->det_pin) {
6e996ee8
DB
938 ret = request_irq(gpio_to_irq(host->board->det_pin),
939 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
65dbf343 940 if (ret)
6e996ee8
DB
941 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
942 else
943 device_init_wakeup(&pdev->dev, 1);
65dbf343
AV
944 }
945
f3a8efa9 946 pr_debug("Added MCI driver\n");
65dbf343
AV
947
948 return 0;
6e996ee8
DB
949
950fail0:
951 clk_disable(host->mci_clk);
952 iounmap(host->baseaddr);
953fail1:
954 clk_put(host->mci_clk);
955fail2:
956 if (host->board->vcc_pin)
957 gpio_free(host->board->vcc_pin);
958fail3:
959 if (host->board->wp_pin)
960 gpio_free(host->board->wp_pin);
961fail4:
962 if (host->board->det_pin)
963 gpio_free(host->board->det_pin);
964fail5:
965 mmc_free_host(mmc);
966fail6:
967 release_mem_region(res->start, res->end - res->start + 1);
968 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
969 return ret;
65dbf343
AV
970}
971
972/*
973 * Remove a device
974 */
a26b498c 975static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
976{
977 struct mmc_host *mmc = platform_get_drvdata(pdev);
978 struct at91mci_host *host;
17ea0595 979 struct resource *res;
65dbf343
AV
980
981 if (!mmc)
982 return -1;
983
984 host = mmc_priv(mmc);
985
e0cda54e 986 if (host->board->det_pin) {
6e996ee8
DB
987 if (device_can_wakeup(&pdev->dev))
988 free_irq(gpio_to_irq(host->board->det_pin), host);
63b66438 989 device_init_wakeup(&pdev->dev, 0);
6e996ee8 990 gpio_free(host->board->det_pin);
65dbf343
AV
991 }
992
e0b19b83 993 at91_mci_disable(host);
17ea0595
AV
994 mmc_remove_host(mmc);
995 free_irq(host->irq, host);
65dbf343 996
3dd3b039
AV
997 clk_disable(host->mci_clk); /* Disable the peripheral clock */
998 clk_put(host->mci_clk);
65dbf343 999
6e996ee8
DB
1000 if (host->board->vcc_pin)
1001 gpio_free(host->board->vcc_pin);
1002 if (host->board->wp_pin)
1003 gpio_free(host->board->wp_pin);
1004
17ea0595
AV
1005 iounmap(host->baseaddr);
1006 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1007 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 1008
17ea0595
AV
1009 mmc_free_host(mmc);
1010 platform_set_drvdata(pdev, NULL);
b44fb7a0 1011 pr_debug("MCI Removed\n");
65dbf343
AV
1012
1013 return 0;
1014}
1015
1016#ifdef CONFIG_PM
1017static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1018{
1019 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1020 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1021 int ret = 0;
1022
e0cda54e 1023 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1024 enable_irq_wake(host->board->det_pin);
1025
65dbf343
AV
1026 if (mmc)
1027 ret = mmc_suspend_host(mmc, state);
1028
1029 return ret;
1030}
1031
1032static int at91_mci_resume(struct platform_device *pdev)
1033{
1034 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1035 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1036 int ret = 0;
1037
e0cda54e 1038 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1039 disable_irq_wake(host->board->det_pin);
1040
65dbf343
AV
1041 if (mmc)
1042 ret = mmc_resume_host(mmc);
1043
1044 return ret;
1045}
1046#else
1047#define at91_mci_suspend NULL
1048#define at91_mci_resume NULL
1049#endif
1050
1051static struct platform_driver at91_mci_driver = {
a26b498c 1052 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1053 .suspend = at91_mci_suspend,
1054 .resume = at91_mci_resume,
1055 .driver = {
1056 .name = DRIVER_NAME,
1057 .owner = THIS_MODULE,
1058 },
1059};
1060
1061static int __init at91_mci_init(void)
1062{
a26b498c 1063 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1064}
1065
1066static void __exit at91_mci_exit(void)
1067{
1068 platform_driver_unregister(&at91_mci_driver);
1069}
1070
1071module_init(at91_mci_init);
1072module_exit(at91_mci_exit);
1073
1074MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1075MODULE_AUTHOR("Nick Randell");
1076MODULE_LICENSE("GPL");
bc65c724 1077MODULE_ALIAS("platform:at91_mci");