mm: sparsemem memory_present() fix
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / host / at91_mci.c
CommitLineData
65dbf343 1/*
70f10482 2 * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
93a3ddc2 67#include <linux/atmel_pdc.h>
65dbf343
AV
68
69#include <linux/mmc/host.h>
65dbf343
AV
70
71#include <asm/io.h>
72#include <asm/irq.h>
6e996ee8
DB
73#include <asm/gpio.h>
74
65dbf343
AV
75#include <asm/mach/mmc.h>
76#include <asm/arch/board.h>
99eeb8df 77#include <asm/arch/cpu.h>
55d8baee 78#include <asm/arch/at91_mci.h>
65dbf343
AV
79
80#define DRIVER_NAME "at91_mci"
81
df05a303
AV
82#define FL_SENT_COMMAND (1 << 0)
83#define FL_SENT_STOP (1 << 1)
65dbf343 84
df05a303
AV
85#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
86 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
37b758e8 87 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 88
e0b19b83
AV
89#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
90#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 91
65dbf343
AV
92
93/*
94 * Low level type for this driver
95 */
96struct at91mci_host
97{
98 struct mmc_host *mmc;
99 struct mmc_command *cmd;
100 struct mmc_request *request;
101
e0b19b83 102 void __iomem *baseaddr;
17ea0595 103 int irq;
e0b19b83 104
65dbf343
AV
105 struct at91_mmc_data *board;
106 int present;
107
3dd3b039
AV
108 struct clk *mci_clk;
109
65dbf343
AV
110 /*
111 * Flag indicating when the command has been sent. This is used to
112 * work out whether or not to send the stop
113 */
114 unsigned int flags;
115 /* flag for current bus settings */
116 u32 bus_mode;
117
118 /* DMA buffer used for transmitting */
119 unsigned int* buffer;
120 dma_addr_t physical_address;
121 unsigned int total_length;
122
123 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
124 int in_use_index;
125
126 /* Latest in the scatterlist that has been enabled for transfer */
127 int transfer_index;
128};
129
130/*
131 * Copy from sg to a dma block - used for transfers
132 */
e8d04d3d 133static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
65dbf343
AV
134{
135 unsigned int len, i, size;
136 unsigned *dmabuf = host->buffer;
137
138 size = host->total_length;
139 len = data->sg_len;
140
141 /*
142 * Just loop through all entries. Size might not
143 * be the entire list though so make sure that
144 * we do not transfer too much.
145 */
146 for (i = 0; i < len; i++) {
147 struct scatterlist *sg;
148 int amount;
65dbf343
AV
149 unsigned int *sgbuffer;
150
151 sg = &data->sg[i];
152
45711f1a 153 sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
65dbf343
AV
154 amount = min(size, sg->length);
155 size -= amount;
65dbf343 156
99eeb8df
AV
157 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
158 int index;
159
160 for (index = 0; index < (amount / 4); index++)
161 *dmabuf++ = swab32(sgbuffer[index]);
162 }
163 else
164 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
165
166 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
167
168 if (size == 0)
169 break;
170 }
171
172 /*
173 * Check that we didn't get a request to transfer
174 * more data than can fit into the SG list.
175 */
176 BUG_ON(size != 0);
177}
178
179/*
180 * Prepare a dma read
181 */
e8d04d3d 182static void at91_mci_pre_dma_read(struct at91mci_host *host)
65dbf343
AV
183{
184 int i;
185 struct scatterlist *sg;
186 struct mmc_command *cmd;
187 struct mmc_data *data;
188
b44fb7a0 189 pr_debug("pre dma read\n");
65dbf343
AV
190
191 cmd = host->cmd;
192 if (!cmd) {
b44fb7a0 193 pr_debug("no command\n");
65dbf343
AV
194 return;
195 }
196
197 data = cmd->data;
198 if (!data) {
b44fb7a0 199 pr_debug("no data\n");
65dbf343
AV
200 return;
201 }
202
203 for (i = 0; i < 2; i++) {
204 /* nothing left to transfer */
205 if (host->transfer_index >= data->sg_len) {
b44fb7a0 206 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
207 break;
208 }
209
210 /* Check to see if this needs filling */
211 if (i == 0) {
93a3ddc2 212 if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
b44fb7a0 213 pr_debug("Transfer active in current\n");
65dbf343
AV
214 continue;
215 }
216 }
217 else {
93a3ddc2 218 if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
b44fb7a0 219 pr_debug("Transfer active in next\n");
65dbf343
AV
220 continue;
221 }
222 }
223
224 /* Setup the next transfer */
b44fb7a0 225 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
226
227 sg = &data->sg[host->transfer_index++];
b44fb7a0 228 pr_debug("sg = %p\n", sg);
65dbf343 229
45711f1a 230 sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
65dbf343 231
b44fb7a0 232 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
233
234 if (i == 0) {
93a3ddc2
AV
235 at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
236 at91_mci_write(host, ATMEL_PDC_RCR, sg->length / 4);
65dbf343
AV
237 }
238 else {
93a3ddc2
AV
239 at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
240 at91_mci_write(host, ATMEL_PDC_RNCR, sg->length / 4);
65dbf343
AV
241 }
242 }
243
b44fb7a0 244 pr_debug("pre dma read done\n");
65dbf343
AV
245}
246
247/*
248 * Handle after a dma read
249 */
e8d04d3d 250static void at91_mci_post_dma_read(struct at91mci_host *host)
65dbf343
AV
251{
252 struct mmc_command *cmd;
253 struct mmc_data *data;
254
b44fb7a0 255 pr_debug("post dma read\n");
65dbf343
AV
256
257 cmd = host->cmd;
258 if (!cmd) {
b44fb7a0 259 pr_debug("no command\n");
65dbf343
AV
260 return;
261 }
262
263 data = cmd->data;
264 if (!data) {
b44fb7a0 265 pr_debug("no data\n");
65dbf343
AV
266 return;
267 }
268
269 while (host->in_use_index < host->transfer_index) {
65dbf343
AV
270 struct scatterlist *sg;
271
b44fb7a0 272 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
273
274 sg = &data->sg[host->in_use_index++];
275
b44fb7a0 276 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
277
278 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
279
65dbf343
AV
280 data->bytes_xfered += sg->length;
281
99eeb8df 282 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
ed99c541 283 unsigned int *buffer;
99eeb8df 284 int index;
65dbf343 285
ed99c541 286 /* Swap the contents of the buffer */
45711f1a 287 buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
ed99c541
NF
288 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
289
99eeb8df
AV
290 for (index = 0; index < (sg->length / 4); index++)
291 buffer[index] = swab32(buffer[index]);
ed99c541
NF
292
293 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
65dbf343 294 }
99eeb8df 295
45711f1a 296 flush_dcache_page(sg_page(sg));
65dbf343
AV
297 }
298
299 /* Is there another transfer to trigger? */
300 if (host->transfer_index < data->sg_len)
e8d04d3d 301 at91_mci_pre_dma_read(host);
65dbf343 302 else {
ed99c541 303 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
e0b19b83 304 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
65dbf343
AV
305 }
306
b44fb7a0 307 pr_debug("post dma read done\n");
65dbf343
AV
308}
309
310/*
311 * Handle transmitted data
312 */
313static void at91_mci_handle_transmitted(struct at91mci_host *host)
314{
315 struct mmc_command *cmd;
316 struct mmc_data *data;
317
b44fb7a0 318 pr_debug("Handling the transmit\n");
65dbf343
AV
319
320 /* Disable the transfer */
93a3ddc2 321 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343
AV
322
323 /* Now wait for cmd ready */
e0b19b83 324 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
65dbf343
AV
325
326 cmd = host->cmd;
327 if (!cmd) return;
328
329 data = cmd->data;
330 if (!data) return;
331
be0192aa 332 if (cmd->data->blocks > 1) {
ed99c541
NF
333 pr_debug("multiple write : wait for BLKE...\n");
334 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
335 } else
336 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
337
65dbf343
AV
338 data->bytes_xfered = host->total_length;
339}
340
ed99c541
NF
341/*Handle after command sent ready*/
342static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
343{
344 if (!host->cmd)
345 return 1;
346 else if (!host->cmd->data) {
347 if (host->flags & FL_SENT_STOP) {
348 /*After multi block write, we must wait for NOTBUSY*/
349 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
350 } else return 1;
351 } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
352 /*After sendding multi-block-write command, start DMA transfer*/
353 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE);
354 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
355 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
356 }
357
358 /* command not completed, have to wait */
359 return 0;
360}
361
362
65dbf343
AV
363/*
364 * Enable the controller
365 */
e0b19b83 366static void at91_mci_enable(struct at91mci_host *host)
65dbf343 367{
ed99c541
NF
368 unsigned int mr;
369
e0b19b83 370 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 371 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 372 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
ed99c541
NF
373 mr = AT91_MCI_PDCMODE | 0x34a;
374
375 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
376 mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
377
378 at91_mci_write(host, AT91_MCI_MR, mr);
99eeb8df
AV
379
380 /* use Slot A or B (only one at same time) */
381 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
382}
383
384/*
385 * Disable the controller
386 */
e0b19b83 387static void at91_mci_disable(struct at91mci_host *host)
65dbf343 388{
e0b19b83 389 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
390}
391
392/*
393 * Send a command
65dbf343 394 */
ed99c541 395static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
65dbf343
AV
396{
397 unsigned int cmdr, mr;
398 unsigned int block_length;
399 struct mmc_data *data = cmd->data;
400
401 unsigned int blocks;
402 unsigned int ier = 0;
403
404 host->cmd = cmd;
405
ed99c541 406 /* Needed for leaving busy state before CMD1 */
e0b19b83 407 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 408 pr_debug("Clearing timeout\n");
e0b19b83
AV
409 at91_mci_write(host, AT91_MCI_ARGR, 0);
410 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
411 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 412 /* spin */
e0b19b83 413 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
414 }
415 }
ed99c541 416
65dbf343
AV
417 cmdr = cmd->opcode;
418
419 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
420 cmdr |= AT91_MCI_RSPTYP_NONE;
421 else {
422 /* if a response is expected then allow maximum response latancy */
423 cmdr |= AT91_MCI_MAXLAT;
424 /* set 136 bit response for R2, 48 bit response otherwise */
425 if (mmc_resp_type(cmd) == MMC_RSP_R2)
426 cmdr |= AT91_MCI_RSPTYP_136;
427 else
428 cmdr |= AT91_MCI_RSPTYP_48;
429 }
430
431 if (data) {
1d4de9ed
MP
432
433 if ( data->blksz & 0x3 ) {
434 pr_debug("Unsupported block size\n");
435 cmd->error = -EINVAL;
436 mmc_request_done(host->mmc, host->request);
437 return;
438 }
439
a3fd4a1b 440 block_length = data->blksz;
65dbf343
AV
441 blocks = data->blocks;
442
443 /* always set data start - also set direction flag for read */
444 if (data->flags & MMC_DATA_READ)
445 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
446 else if (data->flags & MMC_DATA_WRITE)
447 cmdr |= AT91_MCI_TRCMD_START;
448
449 if (data->flags & MMC_DATA_STREAM)
450 cmdr |= AT91_MCI_TRTYP_STREAM;
be0192aa 451 if (data->blocks > 1)
65dbf343
AV
452 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
453 }
454 else {
455 block_length = 0;
456 blocks = 0;
457 }
458
b6cedb38 459 if (host->flags & FL_SENT_STOP)
65dbf343
AV
460 cmdr |= AT91_MCI_TRCMD_STOP;
461
462 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
463 cmdr |= AT91_MCI_OPDCMD;
464
465 /*
466 * Set the arguments and send the command
467 */
f3a8efa9 468 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 469 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
470
471 if (!data) {
93a3ddc2
AV
472 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
473 at91_mci_write(host, ATMEL_PDC_RPR, 0);
474 at91_mci_write(host, ATMEL_PDC_RCR, 0);
475 at91_mci_write(host, ATMEL_PDC_RNPR, 0);
476 at91_mci_write(host, ATMEL_PDC_RNCR, 0);
477 at91_mci_write(host, ATMEL_PDC_TPR, 0);
478 at91_mci_write(host, ATMEL_PDC_TCR, 0);
479 at91_mci_write(host, ATMEL_PDC_TNPR, 0);
480 at91_mci_write(host, ATMEL_PDC_TNCR, 0);
ed99c541
NF
481 ier = AT91_MCI_CMDRDY;
482 } else {
483 /* zero block length and PDC mode */
484 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
485 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
e0b19b83 486
ed99c541
NF
487 /*
488 * Disable the PDC controller
489 */
490 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
65dbf343 491
ed99c541
NF
492 if (cmdr & AT91_MCI_TRCMD_START) {
493 data->bytes_xfered = 0;
494 host->transfer_index = 0;
495 host->in_use_index = 0;
496 if (cmdr & AT91_MCI_TRDIR) {
497 /*
498 * Handle a read
499 */
500 host->buffer = NULL;
501 host->total_length = 0;
502
503 at91_mci_pre_dma_read(host);
504 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
505 }
506 else {
507 /*
508 * Handle a write
509 */
510 host->total_length = block_length * blocks;
511 host->buffer = dma_alloc_coherent(NULL,
512 host->total_length,
513 &host->physical_address, GFP_KERNEL);
514
515 at91_mci_sg_to_dma(host, data);
516
517 pr_debug("Transmitting %d bytes\n", host->total_length);
518
519 at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
520 at91_mci_write(host, ATMEL_PDC_TCR, host->total_length / 4);
521 ier = AT91_MCI_CMDRDY;
522 }
65dbf343
AV
523 }
524 }
525
526 /*
527 * Send the command and then enable the PDC - not the other way round as
528 * the data sheet says
529 */
530
e0b19b83
AV
531 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
532 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
533
534 if (cmdr & AT91_MCI_TRCMD_START) {
535 if (cmdr & AT91_MCI_TRDIR)
93a3ddc2 536 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
65dbf343 537 }
65dbf343 538
ed99c541 539 /* Enable selected interrupts */
df05a303 540 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
541}
542
543/*
544 * Process the next step in the request
545 */
e8d04d3d 546static void at91_mci_process_next(struct at91mci_host *host)
65dbf343
AV
547{
548 if (!(host->flags & FL_SENT_COMMAND)) {
549 host->flags |= FL_SENT_COMMAND;
ed99c541 550 at91_mci_send_command(host, host->request->cmd);
65dbf343
AV
551 }
552 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
553 host->flags |= FL_SENT_STOP;
ed99c541 554 at91_mci_send_command(host, host->request->stop);
65dbf343
AV
555 }
556 else
557 mmc_request_done(host->mmc, host->request);
558}
559
560/*
561 * Handle a command that has been completed
562 */
e8d04d3d 563static void at91_mci_completed_command(struct at91mci_host *host)
65dbf343
AV
564{
565 struct mmc_command *cmd = host->cmd;
566 unsigned int status;
567
e0b19b83 568 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 569
e0b19b83
AV
570 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
571 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
572 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
573 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
574
575 if (host->buffer) {
576 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
577 host->buffer = NULL;
578 }
579
e0b19b83 580 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 581
b44fb7a0 582 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
583 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
584
9e3866b5 585 if (status & AT91_MCI_ERRORS) {
b6cedb38 586 if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
17b0429d 587 cmd->error = 0;
65dbf343
AV
588 }
589 else {
590 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
17b0429d 591 cmd->error = -ETIMEDOUT;
65dbf343 592 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
17b0429d 593 cmd->error = -EILSEQ;
65dbf343 594 else
17b0429d 595 cmd->error = -EIO;
65dbf343 596
b44fb7a0 597 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
598 cmd->error, cmd->opcode, cmd->retries);
599 }
600 }
601 else
17b0429d 602 cmd->error = 0;
65dbf343 603
e8d04d3d 604 at91_mci_process_next(host);
65dbf343
AV
605}
606
607/*
608 * Handle an MMC request
609 */
610static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
611{
612 struct at91mci_host *host = mmc_priv(mmc);
613 host->request = mrq;
614 host->flags = 0;
615
e8d04d3d 616 at91_mci_process_next(host);
65dbf343
AV
617}
618
619/*
620 * Set the IOS
621 */
622static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
623{
624 int clkdiv;
625 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 626 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 627
b44fb7a0 628 host->bus_mode = ios->bus_mode;
65dbf343
AV
629
630 if (ios->clock == 0) {
631 /* Disable the MCI controller */
e0b19b83 632 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
633 clkdiv = 0;
634 }
635 else {
636 /* Enable the MCI controller */
e0b19b83 637 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
638
639 if ((at91_master_clock % (ios->clock * 2)) == 0)
640 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
641 else
642 clkdiv = (at91_master_clock / ios->clock) / 2;
643
b44fb7a0 644 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
645 at91_master_clock / (2 * (clkdiv + 1)));
646 }
647 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 648 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 649 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
650 }
651 else {
b44fb7a0 652 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 653 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
654 }
655
656 /* Set the clock divider */
e0b19b83 657 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
658
659 /* maybe switch power to the card */
b44fb7a0 660 if (host->board->vcc_pin) {
65dbf343
AV
661 switch (ios->power_mode) {
662 case MMC_POWER_OFF:
6e996ee8 663 gpio_set_value(host->board->vcc_pin, 0);
65dbf343
AV
664 break;
665 case MMC_POWER_UP:
666 case MMC_POWER_ON:
6e996ee8 667 gpio_set_value(host->board->vcc_pin, 1);
65dbf343
AV
668 break;
669 }
670 }
671}
672
673/*
674 * Handle an interrupt
675 */
7d12e780 676static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
677{
678 struct at91mci_host *host = devid;
679 int completed = 0;
df05a303 680 unsigned int int_status, int_mask;
65dbf343 681
e0b19b83 682 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303 683 int_mask = at91_mci_read(host, AT91_MCI_IMR);
37b758e8 684
f3a8efa9 685 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303 686 int_status & int_mask);
37b758e8 687
df05a303
AV
688 int_status = int_status & int_mask;
689
690 if (int_status & AT91_MCI_ERRORS) {
65dbf343 691 completed = 1;
37b758e8 692
df05a303
AV
693 if (int_status & AT91_MCI_UNRE)
694 pr_debug("MMC: Underrun error\n");
695 if (int_status & AT91_MCI_OVRE)
696 pr_debug("MMC: Overrun error\n");
697 if (int_status & AT91_MCI_DTOE)
698 pr_debug("MMC: Data timeout\n");
699 if (int_status & AT91_MCI_DCRCE)
700 pr_debug("MMC: CRC error in data\n");
701 if (int_status & AT91_MCI_RTOE)
702 pr_debug("MMC: Response timeout\n");
703 if (int_status & AT91_MCI_RENDE)
704 pr_debug("MMC: Response end bit error\n");
705 if (int_status & AT91_MCI_RCRCE)
706 pr_debug("MMC: Response CRC error\n");
707 if (int_status & AT91_MCI_RDIRE)
708 pr_debug("MMC: Response direction error\n");
709 if (int_status & AT91_MCI_RINDE)
710 pr_debug("MMC: Response index error\n");
711 } else {
712 /* Only continue processing if no errors */
65dbf343 713
65dbf343 714 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 715 pr_debug("TX buffer empty\n");
65dbf343
AV
716 at91_mci_handle_transmitted(host);
717 }
718
ed99c541
NF
719 if (int_status & AT91_MCI_ENDRX) {
720 pr_debug("ENDRX\n");
721 at91_mci_post_dma_read(host);
722 }
723
65dbf343 724 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 725 pr_debug("RX buffer full\n");
ed99c541
NF
726 at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
727 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
728 completed = 1;
65dbf343
AV
729 }
730
df05a303 731 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 732 pr_debug("Transmit has ended\n");
65dbf343 733
65dbf343 734 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 735 pr_debug("Card is ready\n");
ed99c541 736 completed = 1;
65dbf343
AV
737 }
738
df05a303 739 if (int_status & AT91_MCI_DTIP)
b44fb7a0 740 pr_debug("Data transfer in progress\n");
65dbf343 741
ed99c541 742 if (int_status & AT91_MCI_BLKE) {
b44fb7a0 743 pr_debug("Block transfer has ended\n");
ed99c541
NF
744 completed = 1;
745 }
65dbf343 746
df05a303 747 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 748 pr_debug("Ready to transmit\n");
65dbf343 749
df05a303 750 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 751 pr_debug("Ready to receive\n");
65dbf343
AV
752
753 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 754 pr_debug("Command ready\n");
ed99c541 755 completed = at91_mci_handle_cmdrdy(host);
65dbf343
AV
756 }
757 }
65dbf343
AV
758
759 if (completed) {
b44fb7a0 760 pr_debug("Completed command\n");
e0b19b83 761 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e8d04d3d 762 at91_mci_completed_command(host);
df05a303
AV
763 } else
764 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
765
766 return IRQ_HANDLED;
767}
768
7d12e780 769static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
770{
771 struct at91mci_host *host = _host;
6e996ee8 772 int present = !gpio_get_value(irq_to_gpio(irq));
65dbf343
AV
773
774 /*
775 * we expect this irq on both insert and remove,
776 * and use a short delay to debounce.
777 */
778 if (present != host->present) {
779 host->present = present;
b44fb7a0 780 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
781 present ? "insert" : "remove");
782 if (!present) {
b44fb7a0 783 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 784 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
785 }
786 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
787 }
788 return IRQ_HANDLED;
789}
790
a26b498c 791static int at91_mci_get_ro(struct mmc_host *mmc)
65dbf343
AV
792{
793 int read_only = 0;
794 struct at91mci_host *host = mmc_priv(mmc);
795
796 if (host->board->wp_pin) {
6e996ee8 797 read_only = gpio_get_value(host->board->wp_pin);
65dbf343
AV
798 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
799 (read_only ? "read-only" : "read-write") );
800 }
801 else {
802 printk(KERN_WARNING "%s: host does not support reading read-only "
803 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
804 }
805 return read_only;
806}
807
ab7aefd0 808static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
809 .request = at91_mci_request,
810 .set_ios = at91_mci_set_ios,
811 .get_ro = at91_mci_get_ro,
812};
813
814/*
815 * Probe for the device
816 */
a26b498c 817static int __init at91_mci_probe(struct platform_device *pdev)
65dbf343
AV
818{
819 struct mmc_host *mmc;
820 struct at91mci_host *host;
17ea0595 821 struct resource *res;
65dbf343
AV
822 int ret;
823
17ea0595
AV
824 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
825 if (!res)
826 return -ENXIO;
827
828 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
829 return -EBUSY;
830
65dbf343
AV
831 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
832 if (!mmc) {
6e996ee8
DB
833 ret = -ENOMEM;
834 dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
835 goto fail6;
65dbf343
AV
836 }
837
838 mmc->ops = &at91_mci_ops;
839 mmc->f_min = 375000;
840 mmc->f_max = 25000000;
841 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
842
fe4a3c7a 843 mmc->max_blk_size = 4095;
55db890a 844 mmc->max_blk_count = mmc->max_req_size;
fe4a3c7a 845
65dbf343
AV
846 host = mmc_priv(mmc);
847 host->mmc = mmc;
848 host->buffer = NULL;
849 host->bus_mode = 0;
850 host->board = pdev->dev.platform_data;
851 if (host->board->wire4) {
ed99c541
NF
852 if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
853 mmc->caps |= MMC_CAP_4_BIT_DATA;
854 else
6e996ee8 855 dev_warn(&pdev->dev, "4 wire bus mode not supported"
ed99c541 856 " - using 1 wire\n");
65dbf343
AV
857 }
858
6e996ee8
DB
859 /*
860 * Reserve GPIOs ... board init code makes sure these pins are set
861 * up as GPIOs with the right direction (input, except for vcc)
862 */
863 if (host->board->det_pin) {
864 ret = gpio_request(host->board->det_pin, "mmc_detect");
865 if (ret < 0) {
866 dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
867 goto fail5;
868 }
869 }
870 if (host->board->wp_pin) {
871 ret = gpio_request(host->board->wp_pin, "mmc_wp");
872 if (ret < 0) {
873 dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
874 goto fail4;
875 }
876 }
877 if (host->board->vcc_pin) {
878 ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
879 if (ret < 0) {
880 dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
881 goto fail3;
882 }
883 }
884
65dbf343
AV
885 /*
886 * Get Clock
887 */
3dd3b039
AV
888 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
889 if (IS_ERR(host->mci_clk)) {
6e996ee8
DB
890 ret = -ENODEV;
891 dev_dbg(&pdev->dev, "no mci_clk?\n");
892 goto fail2;
65dbf343 893 }
65dbf343 894
17ea0595
AV
895 /*
896 * Map I/O region
897 */
898 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
899 if (!host->baseaddr) {
6e996ee8
DB
900 ret = -ENOMEM;
901 goto fail1;
17ea0595 902 }
e0b19b83
AV
903
904 /*
905 * Reset hardware
906 */
3dd3b039 907 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
908 at91_mci_disable(host);
909 at91_mci_enable(host);
910
65dbf343
AV
911 /*
912 * Allocate the MCI interrupt
913 */
17ea0595 914 host->irq = platform_get_irq(pdev, 0);
6e996ee8
DB
915 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
916 mmc_hostname(mmc), host);
65dbf343 917 if (ret) {
6e996ee8
DB
918 dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
919 goto fail0;
65dbf343
AV
920 }
921
922 platform_set_drvdata(pdev, mmc);
923
924 /*
925 * Add host to MMC layer
926 */
63b66438 927 if (host->board->det_pin) {
6e996ee8 928 host->present = !gpio_get_value(host->board->det_pin);
63b66438 929 }
65dbf343
AV
930 else
931 host->present = -1;
932
933 mmc_add_host(mmc);
934
935 /*
936 * monitor card insertion/removal if we can
937 */
938 if (host->board->det_pin) {
6e996ee8
DB
939 ret = request_irq(gpio_to_irq(host->board->det_pin),
940 at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
65dbf343 941 if (ret)
6e996ee8
DB
942 dev_warn(&pdev->dev, "request MMC detect irq failed\n");
943 else
944 device_init_wakeup(&pdev->dev, 1);
65dbf343
AV
945 }
946
f3a8efa9 947 pr_debug("Added MCI driver\n");
65dbf343
AV
948
949 return 0;
6e996ee8
DB
950
951fail0:
952 clk_disable(host->mci_clk);
953 iounmap(host->baseaddr);
954fail1:
955 clk_put(host->mci_clk);
956fail2:
957 if (host->board->vcc_pin)
958 gpio_free(host->board->vcc_pin);
959fail3:
960 if (host->board->wp_pin)
961 gpio_free(host->board->wp_pin);
962fail4:
963 if (host->board->det_pin)
964 gpio_free(host->board->det_pin);
965fail5:
966 mmc_free_host(mmc);
967fail6:
968 release_mem_region(res->start, res->end - res->start + 1);
969 dev_err(&pdev->dev, "probe failed, err %d\n", ret);
970 return ret;
65dbf343
AV
971}
972
973/*
974 * Remove a device
975 */
a26b498c 976static int __exit at91_mci_remove(struct platform_device *pdev)
65dbf343
AV
977{
978 struct mmc_host *mmc = platform_get_drvdata(pdev);
979 struct at91mci_host *host;
17ea0595 980 struct resource *res;
65dbf343
AV
981
982 if (!mmc)
983 return -1;
984
985 host = mmc_priv(mmc);
986
e0cda54e 987 if (host->board->det_pin) {
6e996ee8
DB
988 if (device_can_wakeup(&pdev->dev))
989 free_irq(gpio_to_irq(host->board->det_pin), host);
63b66438 990 device_init_wakeup(&pdev->dev, 0);
6e996ee8 991 gpio_free(host->board->det_pin);
65dbf343
AV
992 }
993
e0b19b83 994 at91_mci_disable(host);
17ea0595
AV
995 mmc_remove_host(mmc);
996 free_irq(host->irq, host);
65dbf343 997
3dd3b039
AV
998 clk_disable(host->mci_clk); /* Disable the peripheral clock */
999 clk_put(host->mci_clk);
65dbf343 1000
6e996ee8
DB
1001 if (host->board->vcc_pin)
1002 gpio_free(host->board->vcc_pin);
1003 if (host->board->wp_pin)
1004 gpio_free(host->board->wp_pin);
1005
17ea0595
AV
1006 iounmap(host->baseaddr);
1007 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 1009
17ea0595
AV
1010 mmc_free_host(mmc);
1011 platform_set_drvdata(pdev, NULL);
b44fb7a0 1012 pr_debug("MCI Removed\n");
65dbf343
AV
1013
1014 return 0;
1015}
1016
1017#ifdef CONFIG_PM
1018static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
1019{
1020 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1021 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1022 int ret = 0;
1023
e0cda54e 1024 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1025 enable_irq_wake(host->board->det_pin);
1026
65dbf343
AV
1027 if (mmc)
1028 ret = mmc_suspend_host(mmc, state);
1029
1030 return ret;
1031}
1032
1033static int at91_mci_resume(struct platform_device *pdev)
1034{
1035 struct mmc_host *mmc = platform_get_drvdata(pdev);
63b66438 1036 struct at91mci_host *host = mmc_priv(mmc);
65dbf343
AV
1037 int ret = 0;
1038
e0cda54e 1039 if (host->board->det_pin && device_may_wakeup(&pdev->dev))
63b66438
MP
1040 disable_irq_wake(host->board->det_pin);
1041
65dbf343
AV
1042 if (mmc)
1043 ret = mmc_resume_host(mmc);
1044
1045 return ret;
1046}
1047#else
1048#define at91_mci_suspend NULL
1049#define at91_mci_resume NULL
1050#endif
1051
1052static struct platform_driver at91_mci_driver = {
a26b498c 1053 .remove = __exit_p(at91_mci_remove),
65dbf343
AV
1054 .suspend = at91_mci_suspend,
1055 .resume = at91_mci_resume,
1056 .driver = {
1057 .name = DRIVER_NAME,
1058 .owner = THIS_MODULE,
1059 },
1060};
1061
1062static int __init at91_mci_init(void)
1063{
a26b498c 1064 return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
65dbf343
AV
1065}
1066
1067static void __exit at91_mci_exit(void)
1068{
1069 platform_driver_unregister(&at91_mci_driver);
1070}
1071
1072module_init(at91_mci_init);
1073module_exit(at91_mci_exit);
1074
1075MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
1076MODULE_AUTHOR("Nick Randell");
1077MODULE_LICENSE("GPL");