ARM: OMAP: fix MMC workqueue changes
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / at91_mci.c
CommitLineData
65dbf343 1/*
99eeb8df 2 * linux/drivers/mmc/at91_mci.c - ATMEL AT91 MCI Driver
65dbf343
AV
3 *
4 * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
5 *
6 * Copyright (C) 2006 Malcolm Noyes
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13/*
99eeb8df 14 This is the AT91 MCI driver that has been tested with both MMC cards
65dbf343
AV
15 and SD-cards. Boards that support write protect are now supported.
16 The CCAT91SBC001 board does not support SD cards.
17
18 The three entry points are at91_mci_request, at91_mci_set_ios
19 and at91_mci_get_ro.
20
21 SET IOS
22 This configures the device to put it into the correct mode and clock speed
23 required.
24
25 MCI REQUEST
26 MCI request processes the commands sent in the mmc_request structure. This
27 can consist of a processing command and a stop command in the case of
28 multiple block transfers.
29
30 There are three main types of request, commands, reads and writes.
31
32 Commands are straight forward. The command is submitted to the controller and
33 the request function returns. When the controller generates an interrupt to indicate
34 the command is finished, the response to the command are read and the mmc_request_done
35 function called to end the request.
36
37 Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
38 controller to manage the transfers.
39
40 A read is done from the controller directly to the scatterlist passed in from the request.
99eeb8df
AV
41 Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
42 swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
65dbf343
AV
43
44 The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
45
46 A write is slightly different in that the bytes to write are read from the scatterlist
47 into a dma memory buffer (this is in case the source buffer should be read only). The
48 entire write buffer is then done from this single dma memory buffer.
49
50 The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
51
52 GET RO
53 Gets the status of the write protect pin, if available.
54*/
55
65dbf343
AV
56#include <linux/module.h>
57#include <linux/moduleparam.h>
58#include <linux/init.h>
59#include <linux/ioport.h>
60#include <linux/platform_device.h>
61#include <linux/interrupt.h>
62#include <linux/blkdev.h>
63#include <linux/delay.h>
64#include <linux/err.h>
65#include <linux/dma-mapping.h>
66#include <linux/clk.h>
67
68#include <linux/mmc/host.h>
69#include <linux/mmc/protocol.h>
70
71#include <asm/io.h>
72#include <asm/irq.h>
73#include <asm/mach/mmc.h>
74#include <asm/arch/board.h>
99eeb8df 75#include <asm/arch/cpu.h>
65dbf343 76#include <asm/arch/gpio.h>
55d8baee
AV
77#include <asm/arch/at91_mci.h>
78#include <asm/arch/at91_pdc.h>
65dbf343
AV
79
80#define DRIVER_NAME "at91_mci"
81
82#undef SUPPORT_4WIRE
83
df05a303
AV
84#define FL_SENT_COMMAND (1 << 0)
85#define FL_SENT_STOP (1 << 1)
65dbf343 86
df05a303
AV
87#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
88 | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
89 | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
65dbf343 90
e0b19b83
AV
91#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
92#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
65dbf343 93
65dbf343
AV
94
95/*
96 * Low level type for this driver
97 */
98struct at91mci_host
99{
100 struct mmc_host *mmc;
101 struct mmc_command *cmd;
102 struct mmc_request *request;
103
e0b19b83 104 void __iomem *baseaddr;
17ea0595 105 int irq;
e0b19b83 106
65dbf343
AV
107 struct at91_mmc_data *board;
108 int present;
109
3dd3b039
AV
110 struct clk *mci_clk;
111
65dbf343
AV
112 /*
113 * Flag indicating when the command has been sent. This is used to
114 * work out whether or not to send the stop
115 */
116 unsigned int flags;
117 /* flag for current bus settings */
118 u32 bus_mode;
119
120 /* DMA buffer used for transmitting */
121 unsigned int* buffer;
122 dma_addr_t physical_address;
123 unsigned int total_length;
124
125 /* Latest in the scatterlist that has been enabled for transfer, but not freed */
126 int in_use_index;
127
128 /* Latest in the scatterlist that has been enabled for transfer */
129 int transfer_index;
130};
131
132/*
133 * Copy from sg to a dma block - used for transfers
134 */
135static inline void at91mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
136{
137 unsigned int len, i, size;
138 unsigned *dmabuf = host->buffer;
139
140 size = host->total_length;
141 len = data->sg_len;
142
143 /*
144 * Just loop through all entries. Size might not
145 * be the entire list though so make sure that
146 * we do not transfer too much.
147 */
148 for (i = 0; i < len; i++) {
149 struct scatterlist *sg;
150 int amount;
65dbf343
AV
151 unsigned int *sgbuffer;
152
153 sg = &data->sg[i];
154
155 sgbuffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
156 amount = min(size, sg->length);
157 size -= amount;
65dbf343 158
99eeb8df
AV
159 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
160 int index;
161
162 for (index = 0; index < (amount / 4); index++)
163 *dmabuf++ = swab32(sgbuffer[index]);
164 }
165 else
166 memcpy(dmabuf, sgbuffer, amount);
65dbf343
AV
167
168 kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
169
170 if (size == 0)
171 break;
172 }
173
174 /*
175 * Check that we didn't get a request to transfer
176 * more data than can fit into the SG list.
177 */
178 BUG_ON(size != 0);
179}
180
181/*
182 * Prepare a dma read
183 */
184static void at91mci_pre_dma_read(struct at91mci_host *host)
185{
186 int i;
187 struct scatterlist *sg;
188 struct mmc_command *cmd;
189 struct mmc_data *data;
190
b44fb7a0 191 pr_debug("pre dma read\n");
65dbf343
AV
192
193 cmd = host->cmd;
194 if (!cmd) {
b44fb7a0 195 pr_debug("no command\n");
65dbf343
AV
196 return;
197 }
198
199 data = cmd->data;
200 if (!data) {
b44fb7a0 201 pr_debug("no data\n");
65dbf343
AV
202 return;
203 }
204
205 for (i = 0; i < 2; i++) {
206 /* nothing left to transfer */
207 if (host->transfer_index >= data->sg_len) {
b44fb7a0 208 pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
65dbf343
AV
209 break;
210 }
211
212 /* Check to see if this needs filling */
213 if (i == 0) {
e0b19b83 214 if (at91_mci_read(host, AT91_PDC_RCR) != 0) {
b44fb7a0 215 pr_debug("Transfer active in current\n");
65dbf343
AV
216 continue;
217 }
218 }
219 else {
e0b19b83 220 if (at91_mci_read(host, AT91_PDC_RNCR) != 0) {
b44fb7a0 221 pr_debug("Transfer active in next\n");
65dbf343
AV
222 continue;
223 }
224 }
225
226 /* Setup the next transfer */
b44fb7a0 227 pr_debug("Using transfer index %d\n", host->transfer_index);
65dbf343
AV
228
229 sg = &data->sg[host->transfer_index++];
b44fb7a0 230 pr_debug("sg = %p\n", sg);
65dbf343
AV
231
232 sg->dma_address = dma_map_page(NULL, sg->page, sg->offset, sg->length, DMA_FROM_DEVICE);
233
b44fb7a0 234 pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
65dbf343
AV
235
236 if (i == 0) {
e0b19b83
AV
237 at91_mci_write(host, AT91_PDC_RPR, sg->dma_address);
238 at91_mci_write(host, AT91_PDC_RCR, sg->length / 4);
65dbf343
AV
239 }
240 else {
e0b19b83
AV
241 at91_mci_write(host, AT91_PDC_RNPR, sg->dma_address);
242 at91_mci_write(host, AT91_PDC_RNCR, sg->length / 4);
65dbf343
AV
243 }
244 }
245
b44fb7a0 246 pr_debug("pre dma read done\n");
65dbf343
AV
247}
248
249/*
250 * Handle after a dma read
251 */
252static void at91mci_post_dma_read(struct at91mci_host *host)
253{
254 struct mmc_command *cmd;
255 struct mmc_data *data;
256
b44fb7a0 257 pr_debug("post dma read\n");
65dbf343
AV
258
259 cmd = host->cmd;
260 if (!cmd) {
b44fb7a0 261 pr_debug("no command\n");
65dbf343
AV
262 return;
263 }
264
265 data = cmd->data;
266 if (!data) {
b44fb7a0 267 pr_debug("no data\n");
65dbf343
AV
268 return;
269 }
270
271 while (host->in_use_index < host->transfer_index) {
272 unsigned int *buffer;
65dbf343
AV
273
274 struct scatterlist *sg;
275
b44fb7a0 276 pr_debug("finishing index %d\n", host->in_use_index);
65dbf343
AV
277
278 sg = &data->sg[host->in_use_index++];
279
b44fb7a0 280 pr_debug("Unmapping page %08X\n", sg->dma_address);
65dbf343
AV
281
282 dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
283
284 /* Swap the contents of the buffer */
285 buffer = kmap_atomic(sg->page, KM_BIO_SRC_IRQ) + sg->offset;
b44fb7a0 286 pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
65dbf343
AV
287
288 data->bytes_xfered += sg->length;
289
99eeb8df
AV
290 if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
291 int index;
65dbf343 292
99eeb8df
AV
293 for (index = 0; index < (sg->length / 4); index++)
294 buffer[index] = swab32(buffer[index]);
65dbf343 295 }
99eeb8df 296
65dbf343
AV
297 kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
298 flush_dcache_page(sg->page);
299 }
300
301 /* Is there another transfer to trigger? */
302 if (host->transfer_index < data->sg_len)
303 at91mci_pre_dma_read(host);
304 else {
e0b19b83
AV
305 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
306 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
307 }
308
b44fb7a0 309 pr_debug("post dma read done\n");
65dbf343
AV
310}
311
312/*
313 * Handle transmitted data
314 */
315static void at91_mci_handle_transmitted(struct at91mci_host *host)
316{
317 struct mmc_command *cmd;
318 struct mmc_data *data;
319
b44fb7a0 320 pr_debug("Handling the transmit\n");
65dbf343
AV
321
322 /* Disable the transfer */
e0b19b83 323 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
324
325 /* Now wait for cmd ready */
e0b19b83
AV
326 at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
327 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
65dbf343
AV
328
329 cmd = host->cmd;
330 if (!cmd) return;
331
332 data = cmd->data;
333 if (!data) return;
334
335 data->bytes_xfered = host->total_length;
336}
337
338/*
339 * Enable the controller
340 */
e0b19b83 341static void at91_mci_enable(struct at91mci_host *host)
65dbf343 342{
e0b19b83 343 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
f3a8efa9 344 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
e0b19b83 345 at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
f3a8efa9 346 at91_mci_write(host, AT91_MCI_MR, AT91_MCI_PDCMODE | 0x34a);
99eeb8df
AV
347
348 /* use Slot A or B (only one at same time) */
349 at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
65dbf343
AV
350}
351
352/*
353 * Disable the controller
354 */
e0b19b83 355static void at91_mci_disable(struct at91mci_host *host)
65dbf343 356{
e0b19b83 357 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
65dbf343
AV
358}
359
360/*
361 * Send a command
362 * return the interrupts to enable
363 */
364static unsigned int at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
365{
366 unsigned int cmdr, mr;
367 unsigned int block_length;
368 struct mmc_data *data = cmd->data;
369
370 unsigned int blocks;
371 unsigned int ier = 0;
372
373 host->cmd = cmd;
374
375 /* Not sure if this is needed */
376#if 0
e0b19b83 377 if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
b44fb7a0 378 pr_debug("Clearing timeout\n");
e0b19b83
AV
379 at91_mci_write(host, AT91_MCI_ARGR, 0);
380 at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
381 while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
65dbf343 382 /* spin */
e0b19b83 383 pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
65dbf343
AV
384 }
385 }
386#endif
387 cmdr = cmd->opcode;
388
389 if (mmc_resp_type(cmd) == MMC_RSP_NONE)
390 cmdr |= AT91_MCI_RSPTYP_NONE;
391 else {
392 /* if a response is expected then allow maximum response latancy */
393 cmdr |= AT91_MCI_MAXLAT;
394 /* set 136 bit response for R2, 48 bit response otherwise */
395 if (mmc_resp_type(cmd) == MMC_RSP_R2)
396 cmdr |= AT91_MCI_RSPTYP_136;
397 else
398 cmdr |= AT91_MCI_RSPTYP_48;
399 }
400
401 if (data) {
a3fd4a1b 402 block_length = data->blksz;
65dbf343
AV
403 blocks = data->blocks;
404
405 /* always set data start - also set direction flag for read */
406 if (data->flags & MMC_DATA_READ)
407 cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
408 else if (data->flags & MMC_DATA_WRITE)
409 cmdr |= AT91_MCI_TRCMD_START;
410
411 if (data->flags & MMC_DATA_STREAM)
412 cmdr |= AT91_MCI_TRTYP_STREAM;
413 if (data->flags & MMC_DATA_MULTI)
414 cmdr |= AT91_MCI_TRTYP_MULTIPLE;
415 }
416 else {
417 block_length = 0;
418 blocks = 0;
419 }
420
421 if (cmd->opcode == MMC_STOP_TRANSMISSION)
422 cmdr |= AT91_MCI_TRCMD_STOP;
423
424 if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
425 cmdr |= AT91_MCI_OPDCMD;
426
427 /*
428 * Set the arguments and send the command
429 */
f3a8efa9 430 pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
e0b19b83 431 cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
65dbf343
AV
432
433 if (!data) {
e0b19b83
AV
434 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTDIS | AT91_PDC_RXTDIS);
435 at91_mci_write(host, AT91_PDC_RPR, 0);
436 at91_mci_write(host, AT91_PDC_RCR, 0);
437 at91_mci_write(host, AT91_PDC_RNPR, 0);
438 at91_mci_write(host, AT91_PDC_RNCR, 0);
439 at91_mci_write(host, AT91_PDC_TPR, 0);
440 at91_mci_write(host, AT91_PDC_TCR, 0);
441 at91_mci_write(host, AT91_PDC_TNPR, 0);
442 at91_mci_write(host, AT91_PDC_TNCR, 0);
443
444 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
445 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
446 return AT91_MCI_CMDRDY;
447 }
448
e0b19b83
AV
449 mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; /* zero block length and PDC mode */
450 at91_mci_write(host, AT91_MCI_MR, mr | (block_length << 16) | AT91_MCI_PDCMODE);
65dbf343
AV
451
452 /*
453 * Disable the PDC controller
454 */
e0b19b83 455 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTDIS | AT91_PDC_TXTDIS);
65dbf343
AV
456
457 if (cmdr & AT91_MCI_TRCMD_START) {
458 data->bytes_xfered = 0;
459 host->transfer_index = 0;
460 host->in_use_index = 0;
461 if (cmdr & AT91_MCI_TRDIR) {
462 /*
463 * Handle a read
464 */
465 host->buffer = NULL;
466 host->total_length = 0;
467
468 at91mci_pre_dma_read(host);
469 ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
470 }
471 else {
472 /*
473 * Handle a write
474 */
475 host->total_length = block_length * blocks;
476 host->buffer = dma_alloc_coherent(NULL,
477 host->total_length,
478 &host->physical_address, GFP_KERNEL);
479
480 at91mci_sg_to_dma(host, data);
481
b44fb7a0 482 pr_debug("Transmitting %d bytes\n", host->total_length);
65dbf343 483
e0b19b83
AV
484 at91_mci_write(host, AT91_PDC_TPR, host->physical_address);
485 at91_mci_write(host, AT91_PDC_TCR, host->total_length / 4);
65dbf343
AV
486 ier = AT91_MCI_TXBUFE;
487 }
488 }
489
490 /*
491 * Send the command and then enable the PDC - not the other way round as
492 * the data sheet says
493 */
494
e0b19b83
AV
495 at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
496 at91_mci_write(host, AT91_MCI_CMDR, cmdr);
65dbf343
AV
497
498 if (cmdr & AT91_MCI_TRCMD_START) {
499 if (cmdr & AT91_MCI_TRDIR)
e0b19b83 500 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_RXTEN);
65dbf343 501 else
e0b19b83 502 at91_mci_write(host, AT91_PDC_PTCR, AT91_PDC_TXTEN);
65dbf343
AV
503 }
504 return ier;
505}
506
507/*
508 * Wait for a command to complete
509 */
510static void at91mci_process_command(struct at91mci_host *host, struct mmc_command *cmd)
511{
512 unsigned int ier;
513
514 ier = at91_mci_send_command(host, cmd);
515
b44fb7a0 516 pr_debug("setting ier to %08X\n", ier);
65dbf343
AV
517
518 /* Stop on errors or the required value */
df05a303 519 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
65dbf343
AV
520}
521
522/*
523 * Process the next step in the request
524 */
525static void at91mci_process_next(struct at91mci_host *host)
526{
527 if (!(host->flags & FL_SENT_COMMAND)) {
528 host->flags |= FL_SENT_COMMAND;
529 at91mci_process_command(host, host->request->cmd);
530 }
531 else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
532 host->flags |= FL_SENT_STOP;
533 at91mci_process_command(host, host->request->stop);
534 }
535 else
536 mmc_request_done(host->mmc, host->request);
537}
538
539/*
540 * Handle a command that has been completed
541 */
542static void at91mci_completed_command(struct at91mci_host *host)
543{
544 struct mmc_command *cmd = host->cmd;
545 unsigned int status;
546
e0b19b83 547 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 548
e0b19b83
AV
549 cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
550 cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
551 cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
552 cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
65dbf343
AV
553
554 if (host->buffer) {
555 dma_free_coherent(NULL, host->total_length, host->buffer, host->physical_address);
556 host->buffer = NULL;
557 }
558
e0b19b83 559 status = at91_mci_read(host, AT91_MCI_SR);
65dbf343 560
b44fb7a0 561 pr_debug("Status = %08X [%08X %08X %08X %08X]\n",
65dbf343
AV
562 status, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
563
564 if (status & (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE |
565 AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE |
566 AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)) {
567 if ((status & AT91_MCI_RCRCE) &&
568 ((cmd->opcode == MMC_SEND_OP_COND) || (cmd->opcode == SD_APP_OP_COND))) {
569 cmd->error = MMC_ERR_NONE;
570 }
571 else {
572 if (status & (AT91_MCI_RTOE | AT91_MCI_DTOE))
573 cmd->error = MMC_ERR_TIMEOUT;
574 else if (status & (AT91_MCI_RCRCE | AT91_MCI_DCRCE))
575 cmd->error = MMC_ERR_BADCRC;
576 else if (status & (AT91_MCI_OVRE | AT91_MCI_UNRE))
577 cmd->error = MMC_ERR_FIFO;
578 else
579 cmd->error = MMC_ERR_FAILED;
580
b44fb7a0 581 pr_debug("Error detected and set to %d (cmd = %d, retries = %d)\n",
65dbf343
AV
582 cmd->error, cmd->opcode, cmd->retries);
583 }
584 }
585 else
586 cmd->error = MMC_ERR_NONE;
587
588 at91mci_process_next(host);
589}
590
591/*
592 * Handle an MMC request
593 */
594static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
595{
596 struct at91mci_host *host = mmc_priv(mmc);
597 host->request = mrq;
598 host->flags = 0;
599
600 at91mci_process_next(host);
601}
602
603/*
604 * Set the IOS
605 */
606static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
607{
608 int clkdiv;
609 struct at91mci_host *host = mmc_priv(mmc);
3dd3b039 610 unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
65dbf343 611
b44fb7a0 612 host->bus_mode = ios->bus_mode;
65dbf343
AV
613
614 if (ios->clock == 0) {
615 /* Disable the MCI controller */
e0b19b83 616 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
65dbf343
AV
617 clkdiv = 0;
618 }
619 else {
620 /* Enable the MCI controller */
e0b19b83 621 at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
65dbf343
AV
622
623 if ((at91_master_clock % (ios->clock * 2)) == 0)
624 clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
625 else
626 clkdiv = (at91_master_clock / ios->clock) / 2;
627
b44fb7a0 628 pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
65dbf343
AV
629 at91_master_clock / (2 * (clkdiv + 1)));
630 }
631 if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
b44fb7a0 632 pr_debug("MMC: Setting controller bus width to 4\n");
e0b19b83 633 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
65dbf343
AV
634 }
635 else {
b44fb7a0 636 pr_debug("MMC: Setting controller bus width to 1\n");
e0b19b83 637 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
638 }
639
640 /* Set the clock divider */
e0b19b83 641 at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
65dbf343
AV
642
643 /* maybe switch power to the card */
b44fb7a0 644 if (host->board->vcc_pin) {
65dbf343
AV
645 switch (ios->power_mode) {
646 case MMC_POWER_OFF:
99eeb8df 647 at91_set_gpio_value(host->board->vcc_pin, 0);
65dbf343
AV
648 break;
649 case MMC_POWER_UP:
650 case MMC_POWER_ON:
99eeb8df 651 at91_set_gpio_value(host->board->vcc_pin, 1);
65dbf343
AV
652 break;
653 }
654 }
655}
656
657/*
658 * Handle an interrupt
659 */
7d12e780 660static irqreturn_t at91_mci_irq(int irq, void *devid)
65dbf343
AV
661{
662 struct at91mci_host *host = devid;
663 int completed = 0;
df05a303 664 unsigned int int_status, int_mask;
65dbf343 665
e0b19b83 666 int_status = at91_mci_read(host, AT91_MCI_SR);
df05a303
AV
667 int_mask = at91_mci_read(host, AT91_MCI_IMR);
668
f3a8efa9 669 pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
df05a303
AV
670 int_status & int_mask);
671
672 int_status = int_status & int_mask;
673
674 if (int_status & AT91_MCI_ERRORS) {
65dbf343 675 completed = 1;
df05a303
AV
676
677 if (int_status & AT91_MCI_UNRE)
678 pr_debug("MMC: Underrun error\n");
679 if (int_status & AT91_MCI_OVRE)
680 pr_debug("MMC: Overrun error\n");
681 if (int_status & AT91_MCI_DTOE)
682 pr_debug("MMC: Data timeout\n");
683 if (int_status & AT91_MCI_DCRCE)
684 pr_debug("MMC: CRC error in data\n");
685 if (int_status & AT91_MCI_RTOE)
686 pr_debug("MMC: Response timeout\n");
687 if (int_status & AT91_MCI_RENDE)
688 pr_debug("MMC: Response end bit error\n");
689 if (int_status & AT91_MCI_RCRCE)
690 pr_debug("MMC: Response CRC error\n");
691 if (int_status & AT91_MCI_RDIRE)
692 pr_debug("MMC: Response direction error\n");
693 if (int_status & AT91_MCI_RINDE)
694 pr_debug("MMC: Response index error\n");
695 } else {
696 /* Only continue processing if no errors */
65dbf343 697
65dbf343 698 if (int_status & AT91_MCI_TXBUFE) {
b44fb7a0 699 pr_debug("TX buffer empty\n");
65dbf343
AV
700 at91_mci_handle_transmitted(host);
701 }
702
703 if (int_status & AT91_MCI_RXBUFF) {
b44fb7a0 704 pr_debug("RX buffer full\n");
e0b19b83 705 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
706 }
707
df05a303 708 if (int_status & AT91_MCI_ENDTX)
b44fb7a0 709 pr_debug("Transmit has ended\n");
65dbf343
AV
710
711 if (int_status & AT91_MCI_ENDRX) {
b44fb7a0 712 pr_debug("Receive has ended\n");
65dbf343
AV
713 at91mci_post_dma_read(host);
714 }
715
716 if (int_status & AT91_MCI_NOTBUSY) {
b44fb7a0 717 pr_debug("Card is ready\n");
e0b19b83 718 at91_mci_write(host, AT91_MCI_IER, AT91_MCI_CMDRDY);
65dbf343
AV
719 }
720
df05a303 721 if (int_status & AT91_MCI_DTIP)
b44fb7a0 722 pr_debug("Data transfer in progress\n");
65dbf343 723
df05a303 724 if (int_status & AT91_MCI_BLKE)
b44fb7a0 725 pr_debug("Block transfer has ended\n");
65dbf343 726
df05a303 727 if (int_status & AT91_MCI_TXRDY)
b44fb7a0 728 pr_debug("Ready to transmit\n");
65dbf343 729
df05a303 730 if (int_status & AT91_MCI_RXRDY)
b44fb7a0 731 pr_debug("Ready to receive\n");
65dbf343
AV
732
733 if (int_status & AT91_MCI_CMDRDY) {
b44fb7a0 734 pr_debug("Command ready\n");
65dbf343
AV
735 completed = 1;
736 }
737 }
65dbf343
AV
738
739 if (completed) {
b44fb7a0 740 pr_debug("Completed command\n");
e0b19b83 741 at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
65dbf343 742 at91mci_completed_command(host);
df05a303
AV
743 } else
744 at91_mci_write(host, AT91_MCI_IDR, int_status);
65dbf343
AV
745
746 return IRQ_HANDLED;
747}
748
7d12e780 749static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
65dbf343
AV
750{
751 struct at91mci_host *host = _host;
752 int present = !at91_get_gpio_value(irq);
753
754 /*
755 * we expect this irq on both insert and remove,
756 * and use a short delay to debounce.
757 */
758 if (present != host->present) {
759 host->present = present;
b44fb7a0 760 pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
65dbf343
AV
761 present ? "insert" : "remove");
762 if (!present) {
b44fb7a0 763 pr_debug("****** Resetting SD-card bus width ******\n");
99eeb8df 764 at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
65dbf343
AV
765 }
766 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
767 }
768 return IRQ_HANDLED;
769}
770
771int at91_mci_get_ro(struct mmc_host *mmc)
772{
773 int read_only = 0;
774 struct at91mci_host *host = mmc_priv(mmc);
775
776 if (host->board->wp_pin) {
777 read_only = at91_get_gpio_value(host->board->wp_pin);
778 printk(KERN_WARNING "%s: card is %s\n", mmc_hostname(mmc),
779 (read_only ? "read-only" : "read-write") );
780 }
781 else {
782 printk(KERN_WARNING "%s: host does not support reading read-only "
783 "switch. Assuming write-enable.\n", mmc_hostname(mmc));
784 }
785 return read_only;
786}
787
ab7aefd0 788static const struct mmc_host_ops at91_mci_ops = {
65dbf343
AV
789 .request = at91_mci_request,
790 .set_ios = at91_mci_set_ios,
791 .get_ro = at91_mci_get_ro,
792};
793
794/*
795 * Probe for the device
796 */
797static int at91_mci_probe(struct platform_device *pdev)
798{
799 struct mmc_host *mmc;
800 struct at91mci_host *host;
17ea0595 801 struct resource *res;
65dbf343
AV
802 int ret;
803
b44fb7a0 804 pr_debug("Probe MCI devices\n");
65dbf343 805
17ea0595
AV
806 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
807 if (!res)
808 return -ENXIO;
809
810 if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
811 return -EBUSY;
812
65dbf343
AV
813 mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
814 if (!mmc) {
b44fb7a0 815 pr_debug("Failed to allocate mmc host\n");
17ea0595 816 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
817 return -ENOMEM;
818 }
819
820 mmc->ops = &at91_mci_ops;
821 mmc->f_min = 375000;
822 mmc->f_max = 25000000;
823 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
42431acb 824 mmc->caps = MMC_CAP_BYTEBLOCK;
65dbf343
AV
825
826 host = mmc_priv(mmc);
827 host->mmc = mmc;
828 host->buffer = NULL;
829 host->bus_mode = 0;
830 host->board = pdev->dev.platform_data;
831 if (host->board->wire4) {
832#ifdef SUPPORT_4WIRE
833 mmc->caps |= MMC_CAP_4_BIT_DATA;
834#else
f3a8efa9 835 printk("AT91 MMC: 4 wire bus mode not supported by this driver - using 1 wire\n");
65dbf343
AV
836#endif
837 }
838
839 /*
840 * Get Clock
841 */
3dd3b039
AV
842 host->mci_clk = clk_get(&pdev->dev, "mci_clk");
843 if (IS_ERR(host->mci_clk)) {
65dbf343 844 printk(KERN_ERR "AT91 MMC: no clock defined.\n");
b44fb7a0 845 mmc_free_host(mmc);
17ea0595 846 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
847 return -ENODEV;
848 }
65dbf343 849
17ea0595
AV
850 /*
851 * Map I/O region
852 */
853 host->baseaddr = ioremap(res->start, res->end - res->start + 1);
854 if (!host->baseaddr) {
3dd3b039 855 clk_put(host->mci_clk);
17ea0595
AV
856 mmc_free_host(mmc);
857 release_mem_region(res->start, res->end - res->start + 1);
858 return -ENOMEM;
859 }
e0b19b83
AV
860
861 /*
862 * Reset hardware
863 */
3dd3b039 864 clk_enable(host->mci_clk); /* Enable the peripheral clock */
e0b19b83
AV
865 at91_mci_disable(host);
866 at91_mci_enable(host);
867
65dbf343
AV
868 /*
869 * Allocate the MCI interrupt
870 */
17ea0595
AV
871 host->irq = platform_get_irq(pdev, 0);
872 ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, DRIVER_NAME, host);
65dbf343 873 if (ret) {
f3a8efa9 874 printk(KERN_ERR "AT91 MMC: Failed to request MCI interrupt\n");
3dd3b039
AV
875 clk_disable(host->mci_clk);
876 clk_put(host->mci_clk);
b44fb7a0 877 mmc_free_host(mmc);
17ea0595
AV
878 iounmap(host->baseaddr);
879 release_mem_region(res->start, res->end - res->start + 1);
65dbf343
AV
880 return ret;
881 }
882
883 platform_set_drvdata(pdev, mmc);
884
885 /*
886 * Add host to MMC layer
887 */
888 if (host->board->det_pin)
889 host->present = !at91_get_gpio_value(host->board->det_pin);
890 else
891 host->present = -1;
892
893 mmc_add_host(mmc);
894
895 /*
896 * monitor card insertion/removal if we can
897 */
898 if (host->board->det_pin) {
899 ret = request_irq(host->board->det_pin, at91_mmc_det_irq,
b44fb7a0 900 0, DRIVER_NAME, host);
65dbf343 901 if (ret)
f3a8efa9 902 printk(KERN_ERR "AT91 MMC: Couldn't allocate MMC detect irq\n");
65dbf343
AV
903 }
904
f3a8efa9 905 pr_debug("Added MCI driver\n");
65dbf343
AV
906
907 return 0;
908}
909
910/*
911 * Remove a device
912 */
913static int at91_mci_remove(struct platform_device *pdev)
914{
915 struct mmc_host *mmc = platform_get_drvdata(pdev);
916 struct at91mci_host *host;
17ea0595 917 struct resource *res;
65dbf343
AV
918
919 if (!mmc)
920 return -1;
921
922 host = mmc_priv(mmc);
923
924 if (host->present != -1) {
925 free_irq(host->board->det_pin, host);
926 cancel_delayed_work(&host->mmc->detect);
927 }
928
e0b19b83 929 at91_mci_disable(host);
17ea0595
AV
930 mmc_remove_host(mmc);
931 free_irq(host->irq, host);
65dbf343 932
3dd3b039
AV
933 clk_disable(host->mci_clk); /* Disable the peripheral clock */
934 clk_put(host->mci_clk);
65dbf343 935
17ea0595
AV
936 iounmap(host->baseaddr);
937 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
938 release_mem_region(res->start, res->end - res->start + 1);
65dbf343 939
17ea0595
AV
940 mmc_free_host(mmc);
941 platform_set_drvdata(pdev, NULL);
b44fb7a0 942 pr_debug("MCI Removed\n");
65dbf343
AV
943
944 return 0;
945}
946
947#ifdef CONFIG_PM
948static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
949{
950 struct mmc_host *mmc = platform_get_drvdata(pdev);
951 int ret = 0;
952
953 if (mmc)
954 ret = mmc_suspend_host(mmc, state);
955
956 return ret;
957}
958
959static int at91_mci_resume(struct platform_device *pdev)
960{
961 struct mmc_host *mmc = platform_get_drvdata(pdev);
962 int ret = 0;
963
964 if (mmc)
965 ret = mmc_resume_host(mmc);
966
967 return ret;
968}
969#else
970#define at91_mci_suspend NULL
971#define at91_mci_resume NULL
972#endif
973
974static struct platform_driver at91_mci_driver = {
975 .probe = at91_mci_probe,
976 .remove = at91_mci_remove,
977 .suspend = at91_mci_suspend,
978 .resume = at91_mci_resume,
979 .driver = {
980 .name = DRIVER_NAME,
981 .owner = THIS_MODULE,
982 },
983};
984
985static int __init at91_mci_init(void)
986{
987 return platform_driver_register(&at91_mci_driver);
988}
989
990static void __exit at91_mci_exit(void)
991{
992 platform_driver_unregister(&at91_mci_driver);
993}
994
995module_init(at91_mci_init);
996module_exit(at91_mci_exit);
997
998MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
999MODULE_AUTHOR("Nick Randell");
1000MODULE_LICENSE("GPL");