[MMC] Add MMC command type flags
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / wbsd.c
1 /*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24 #include <linux/config.h>
25 #include <linux/module.h>
26 #include <linux/moduleparam.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/platform_device.h>
30 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/delay.h>
33 #include <linux/pnp.h>
34 #include <linux/highmem.h>
35 #include <linux/mmc/host.h>
36 #include <linux/mmc/protocol.h>
37
38 #include <asm/io.h>
39 #include <asm/dma.h>
40 #include <asm/scatterlist.h>
41
42 #include "wbsd.h"
43
44 #define DRIVER_NAME "wbsd"
45 #define DRIVER_VERSION "1.5"
46
47 #ifdef CONFIG_MMC_DEBUG
48 #define DBG(x...) \
49 printk(KERN_DEBUG DRIVER_NAME ": " x)
50 #define DBGF(f, x...) \
51 printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
52 #else
53 #define DBG(x...) do { } while (0)
54 #define DBGF(x...) do { } while (0)
55 #endif
56
57 /*
58 * Device resources
59 */
60
61 #ifdef CONFIG_PNP
62
63 static const struct pnp_device_id pnp_dev_table[] = {
64 { "WEC0517", 0 },
65 { "WEC0518", 0 },
66 { "", 0 },
67 };
68
69 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
70
71 #endif /* CONFIG_PNP */
72
73 static const int config_ports[] = { 0x2E, 0x4E };
74 static const int unlock_codes[] = { 0x83, 0x87 };
75
76 static const int valid_ids[] = {
77 0x7112,
78 };
79
80 #ifdef CONFIG_PNP
81 static unsigned int nopnp = 0;
82 #else
83 static const unsigned int nopnp = 1;
84 #endif
85 static unsigned int io = 0x248;
86 static unsigned int irq = 6;
87 static int dma = 2;
88
89 /*
90 * Basic functions
91 */
92
93 static inline void wbsd_unlock_config(struct wbsd_host *host)
94 {
95 BUG_ON(host->config == 0);
96
97 outb(host->unlock_code, host->config);
98 outb(host->unlock_code, host->config);
99 }
100
101 static inline void wbsd_lock_config(struct wbsd_host *host)
102 {
103 BUG_ON(host->config == 0);
104
105 outb(LOCK_CODE, host->config);
106 }
107
108 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
109 {
110 BUG_ON(host->config == 0);
111
112 outb(reg, host->config);
113 outb(value, host->config + 1);
114 }
115
116 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
117 {
118 BUG_ON(host->config == 0);
119
120 outb(reg, host->config);
121 return inb(host->config + 1);
122 }
123
124 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
125 {
126 outb(index, host->base + WBSD_IDXR);
127 outb(value, host->base + WBSD_DATAR);
128 }
129
130 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
131 {
132 outb(index, host->base + WBSD_IDXR);
133 return inb(host->base + WBSD_DATAR);
134 }
135
136 /*
137 * Common routines
138 */
139
140 static void wbsd_init_device(struct wbsd_host *host)
141 {
142 u8 setup, ier;
143
144 /*
145 * Reset chip (SD/MMC part) and fifo.
146 */
147 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
148 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
150
151 /*
152 * Set DAT3 to input
153 */
154 setup &= ~WBSD_DAT3_H;
155 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
156 host->flags &= ~WBSD_FIGNORE_DETECT;
157
158 /*
159 * Read back default clock.
160 */
161 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
162
163 /*
164 * Power down port.
165 */
166 outb(WBSD_POWER_N, host->base + WBSD_CSR);
167
168 /*
169 * Set maximum timeout.
170 */
171 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
172
173 /*
174 * Test for card presence
175 */
176 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
177 host->flags |= WBSD_FCARD_PRESENT;
178 else
179 host->flags &= ~WBSD_FCARD_PRESENT;
180
181 /*
182 * Enable interesting interrupts.
183 */
184 ier = 0;
185 ier |= WBSD_EINT_CARD;
186 ier |= WBSD_EINT_FIFO_THRE;
187 ier |= WBSD_EINT_CCRC;
188 ier |= WBSD_EINT_TIMEOUT;
189 ier |= WBSD_EINT_CRC;
190 ier |= WBSD_EINT_TC;
191
192 outb(ier, host->base + WBSD_EIR);
193
194 /*
195 * Clear interrupts.
196 */
197 inb(host->base + WBSD_ISR);
198 }
199
200 static void wbsd_reset(struct wbsd_host *host)
201 {
202 u8 setup;
203
204 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
205
206 /*
207 * Soft reset of chip (SD/MMC part).
208 */
209 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
210 setup |= WBSD_SOFT_RESET;
211 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
212 }
213
214 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
215 {
216 unsigned long dmaflags;
217
218 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
219
220 if (host->dma >= 0) {
221 /*
222 * Release ISA DMA controller.
223 */
224 dmaflags = claim_dma_lock();
225 disable_dma(host->dma);
226 clear_dma_ff(host->dma);
227 release_dma_lock(dmaflags);
228
229 /*
230 * Disable DMA on host.
231 */
232 wbsd_write_index(host, WBSD_IDX_DMA, 0);
233 }
234
235 host->mrq = NULL;
236
237 /*
238 * MMC layer might call back into the driver so first unlock.
239 */
240 spin_unlock(&host->lock);
241 mmc_request_done(host->mmc, mrq);
242 spin_lock(&host->lock);
243 }
244
245 /*
246 * Scatter/gather functions
247 */
248
249 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
250 {
251 /*
252 * Get info. about SG list from data structure.
253 */
254 host->cur_sg = data->sg;
255 host->num_sg = data->sg_len;
256
257 host->offset = 0;
258 host->remain = host->cur_sg->length;
259 }
260
261 static inline int wbsd_next_sg(struct wbsd_host *host)
262 {
263 /*
264 * Skip to next SG entry.
265 */
266 host->cur_sg++;
267 host->num_sg--;
268
269 /*
270 * Any entries left?
271 */
272 if (host->num_sg > 0) {
273 host->offset = 0;
274 host->remain = host->cur_sg->length;
275 }
276
277 return host->num_sg;
278 }
279
280 static inline char *wbsd_kmap_sg(struct wbsd_host *host)
281 {
282 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
283 host->cur_sg->offset;
284 return host->mapped_sg;
285 }
286
287 static inline void wbsd_kunmap_sg(struct wbsd_host *host)
288 {
289 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
290 }
291
292 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
293 {
294 unsigned int len, i, size;
295 struct scatterlist *sg;
296 char *dmabuf = host->dma_buffer;
297 char *sgbuf;
298
299 size = host->size;
300
301 sg = data->sg;
302 len = data->sg_len;
303
304 /*
305 * Just loop through all entries. Size might not
306 * be the entire list though so make sure that
307 * we do not transfer too much.
308 */
309 for (i = 0; i < len; i++) {
310 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
311 if (size < sg[i].length)
312 memcpy(dmabuf, sgbuf, size);
313 else
314 memcpy(dmabuf, sgbuf, sg[i].length);
315 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
316 dmabuf += sg[i].length;
317
318 if (size < sg[i].length)
319 size = 0;
320 else
321 size -= sg[i].length;
322
323 if (size == 0)
324 break;
325 }
326
327 /*
328 * Check that we didn't get a request to transfer
329 * more data than can fit into the SG list.
330 */
331
332 BUG_ON(size != 0);
333
334 host->size -= size;
335 }
336
337 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
338 {
339 unsigned int len, i, size;
340 struct scatterlist *sg;
341 char *dmabuf = host->dma_buffer;
342 char *sgbuf;
343
344 size = host->size;
345
346 sg = data->sg;
347 len = data->sg_len;
348
349 /*
350 * Just loop through all entries. Size might not
351 * be the entire list though so make sure that
352 * we do not transfer too much.
353 */
354 for (i = 0; i < len; i++) {
355 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
356 if (size < sg[i].length)
357 memcpy(sgbuf, dmabuf, size);
358 else
359 memcpy(sgbuf, dmabuf, sg[i].length);
360 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
361 dmabuf += sg[i].length;
362
363 if (size < sg[i].length)
364 size = 0;
365 else
366 size -= sg[i].length;
367
368 if (size == 0)
369 break;
370 }
371
372 /*
373 * Check that we didn't get a request to transfer
374 * more data than can fit into the SG list.
375 */
376
377 BUG_ON(size != 0);
378
379 host->size -= size;
380 }
381
382 /*
383 * Command handling
384 */
385
386 static inline void wbsd_get_short_reply(struct wbsd_host *host,
387 struct mmc_command *cmd)
388 {
389 /*
390 * Correct response type?
391 */
392 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
393 cmd->error = MMC_ERR_INVALID;
394 return;
395 }
396
397 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
398 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
399 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
400 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
401 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
402 }
403
404 static inline void wbsd_get_long_reply(struct wbsd_host *host,
405 struct mmc_command *cmd)
406 {
407 int i;
408
409 /*
410 * Correct response type?
411 */
412 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
413 cmd->error = MMC_ERR_INVALID;
414 return;
415 }
416
417 for (i = 0; i < 4; i++) {
418 cmd->resp[i] =
419 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
420 cmd->resp[i] |=
421 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
422 cmd->resp[i] |=
423 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
424 cmd->resp[i] |=
425 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
426 }
427 }
428
429 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
430 {
431 int i;
432 u8 status, isr;
433
434 DBGF("Sending cmd (%x)\n", cmd->opcode);
435
436 /*
437 * Clear accumulated ISR. The interrupt routine
438 * will fill this one with events that occur during
439 * transfer.
440 */
441 host->isr = 0;
442
443 /*
444 * Send the command (CRC calculated by host).
445 */
446 outb(cmd->opcode, host->base + WBSD_CMDR);
447 for (i = 3; i >= 0; i--)
448 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
449
450 cmd->error = MMC_ERR_NONE;
451
452 /*
453 * Wait for the request to complete.
454 */
455 do {
456 status = wbsd_read_index(host, WBSD_IDX_STATUS);
457 } while (status & WBSD_CARDTRAFFIC);
458
459 /*
460 * Do we expect a reply?
461 */
462 if (cmd->flags & MMC_RSP_PRESENT) {
463 /*
464 * Read back status.
465 */
466 isr = host->isr;
467
468 /* Card removed? */
469 if (isr & WBSD_INT_CARD)
470 cmd->error = MMC_ERR_TIMEOUT;
471 /* Timeout? */
472 else if (isr & WBSD_INT_TIMEOUT)
473 cmd->error = MMC_ERR_TIMEOUT;
474 /* CRC? */
475 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
476 cmd->error = MMC_ERR_BADCRC;
477 /* All ok */
478 else {
479 if (cmd->flags & MMC_RSP_136)
480 wbsd_get_long_reply(host, cmd);
481 else
482 wbsd_get_short_reply(host, cmd);
483 }
484 }
485
486 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
487 }
488
489 /*
490 * Data functions
491 */
492
493 static void wbsd_empty_fifo(struct wbsd_host *host)
494 {
495 struct mmc_data *data = host->mrq->cmd->data;
496 char *buffer;
497 int i, fsr, fifo;
498
499 /*
500 * Handle excessive data.
501 */
502 if (data->bytes_xfered == host->size)
503 return;
504
505 buffer = wbsd_kmap_sg(host) + host->offset;
506
507 /*
508 * Drain the fifo. This has a tendency to loop longer
509 * than the FIFO length (usually one block).
510 */
511 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
512 /*
513 * The size field in the FSR is broken so we have to
514 * do some guessing.
515 */
516 if (fsr & WBSD_FIFO_FULL)
517 fifo = 16;
518 else if (fsr & WBSD_FIFO_FUTHRE)
519 fifo = 8;
520 else
521 fifo = 1;
522
523 for (i = 0; i < fifo; i++) {
524 *buffer = inb(host->base + WBSD_DFR);
525 buffer++;
526 host->offset++;
527 host->remain--;
528
529 data->bytes_xfered++;
530
531 /*
532 * Transfer done?
533 */
534 if (data->bytes_xfered == host->size) {
535 wbsd_kunmap_sg(host);
536 return;
537 }
538
539 /*
540 * End of scatter list entry?
541 */
542 if (host->remain == 0) {
543 wbsd_kunmap_sg(host);
544
545 /*
546 * Get next entry. Check if last.
547 */
548 if (!wbsd_next_sg(host)) {
549 /*
550 * We should never reach this point.
551 * It means that we're trying to
552 * transfer more blocks than can fit
553 * into the scatter list.
554 */
555 BUG_ON(1);
556
557 host->size = data->bytes_xfered;
558
559 return;
560 }
561
562 buffer = wbsd_kmap_sg(host);
563 }
564 }
565 }
566
567 wbsd_kunmap_sg(host);
568
569 /*
570 * This is a very dirty hack to solve a
571 * hardware problem. The chip doesn't trigger
572 * FIFO threshold interrupts properly.
573 */
574 if ((host->size - data->bytes_xfered) < 16)
575 tasklet_schedule(&host->fifo_tasklet);
576 }
577
578 static void wbsd_fill_fifo(struct wbsd_host *host)
579 {
580 struct mmc_data *data = host->mrq->cmd->data;
581 char *buffer;
582 int i, fsr, fifo;
583
584 /*
585 * Check that we aren't being called after the
586 * entire buffer has been transfered.
587 */
588 if (data->bytes_xfered == host->size)
589 return;
590
591 buffer = wbsd_kmap_sg(host) + host->offset;
592
593 /*
594 * Fill the fifo. This has a tendency to loop longer
595 * than the FIFO length (usually one block).
596 */
597 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
598 /*
599 * The size field in the FSR is broken so we have to
600 * do some guessing.
601 */
602 if (fsr & WBSD_FIFO_EMPTY)
603 fifo = 0;
604 else if (fsr & WBSD_FIFO_EMTHRE)
605 fifo = 8;
606 else
607 fifo = 15;
608
609 for (i = 16; i > fifo; i--) {
610 outb(*buffer, host->base + WBSD_DFR);
611 buffer++;
612 host->offset++;
613 host->remain--;
614
615 data->bytes_xfered++;
616
617 /*
618 * Transfer done?
619 */
620 if (data->bytes_xfered == host->size) {
621 wbsd_kunmap_sg(host);
622 return;
623 }
624
625 /*
626 * End of scatter list entry?
627 */
628 if (host->remain == 0) {
629 wbsd_kunmap_sg(host);
630
631 /*
632 * Get next entry. Check if last.
633 */
634 if (!wbsd_next_sg(host)) {
635 /*
636 * We should never reach this point.
637 * It means that we're trying to
638 * transfer more blocks than can fit
639 * into the scatter list.
640 */
641 BUG_ON(1);
642
643 host->size = data->bytes_xfered;
644
645 return;
646 }
647
648 buffer = wbsd_kmap_sg(host);
649 }
650 }
651 }
652
653 wbsd_kunmap_sg(host);
654
655 /*
656 * The controller stops sending interrupts for
657 * 'FIFO empty' under certain conditions. So we
658 * need to be a bit more pro-active.
659 */
660 tasklet_schedule(&host->fifo_tasklet);
661 }
662
663 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
664 {
665 u16 blksize;
666 u8 setup;
667 unsigned long dmaflags;
668
669 DBGF("blksz %04x blks %04x flags %08x\n",
670 1 << data->blksz_bits, data->blocks, data->flags);
671 DBGF("tsac %d ms nsac %d clk\n",
672 data->timeout_ns / 1000000, data->timeout_clks);
673
674 /*
675 * Calculate size.
676 */
677 host->size = data->blocks << data->blksz_bits;
678
679 /*
680 * Check timeout values for overflow.
681 * (Yes, some cards cause this value to overflow).
682 */
683 if (data->timeout_ns > 127000000)
684 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
685 else {
686 wbsd_write_index(host, WBSD_IDX_TAAC,
687 data->timeout_ns / 1000000);
688 }
689
690 if (data->timeout_clks > 255)
691 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
692 else
693 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
694
695 /*
696 * Inform the chip of how large blocks will be
697 * sent. It needs this to determine when to
698 * calculate CRC.
699 *
700 * Space for CRC must be included in the size.
701 * Two bytes are needed for each data line.
702 */
703 if (host->bus_width == MMC_BUS_WIDTH_1) {
704 blksize = (1 << data->blksz_bits) + 2;
705
706 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
707 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
708 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
709 blksize = (1 << data->blksz_bits) + 2 * 4;
710
711 wbsd_write_index(host, WBSD_IDX_PBSMSB,
712 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
713 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
714 } else {
715 data->error = MMC_ERR_INVALID;
716 return;
717 }
718
719 /*
720 * Clear the FIFO. This is needed even for DMA
721 * transfers since the chip still uses the FIFO
722 * internally.
723 */
724 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
725 setup |= WBSD_FIFO_RESET;
726 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
727
728 /*
729 * DMA transfer?
730 */
731 if (host->dma >= 0) {
732 /*
733 * The buffer for DMA is only 64 kB.
734 */
735 BUG_ON(host->size > 0x10000);
736 if (host->size > 0x10000) {
737 data->error = MMC_ERR_INVALID;
738 return;
739 }
740
741 /*
742 * Transfer data from the SG list to
743 * the DMA buffer.
744 */
745 if (data->flags & MMC_DATA_WRITE)
746 wbsd_sg_to_dma(host, data);
747
748 /*
749 * Initialise the ISA DMA controller.
750 */
751 dmaflags = claim_dma_lock();
752 disable_dma(host->dma);
753 clear_dma_ff(host->dma);
754 if (data->flags & MMC_DATA_READ)
755 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
756 else
757 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
758 set_dma_addr(host->dma, host->dma_addr);
759 set_dma_count(host->dma, host->size);
760
761 enable_dma(host->dma);
762 release_dma_lock(dmaflags);
763
764 /*
765 * Enable DMA on the host.
766 */
767 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
768 } else {
769 /*
770 * This flag is used to keep printk
771 * output to a minimum.
772 */
773 host->firsterr = 1;
774
775 /*
776 * Initialise the SG list.
777 */
778 wbsd_init_sg(host, data);
779
780 /*
781 * Turn off DMA.
782 */
783 wbsd_write_index(host, WBSD_IDX_DMA, 0);
784
785 /*
786 * Set up FIFO threshold levels (and fill
787 * buffer if doing a write).
788 */
789 if (data->flags & MMC_DATA_READ) {
790 wbsd_write_index(host, WBSD_IDX_FIFOEN,
791 WBSD_FIFOEN_FULL | 8);
792 } else {
793 wbsd_write_index(host, WBSD_IDX_FIFOEN,
794 WBSD_FIFOEN_EMPTY | 8);
795 wbsd_fill_fifo(host);
796 }
797 }
798
799 data->error = MMC_ERR_NONE;
800 }
801
802 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
803 {
804 unsigned long dmaflags;
805 int count;
806 u8 status;
807
808 WARN_ON(host->mrq == NULL);
809
810 /*
811 * Send a stop command if needed.
812 */
813 if (data->stop)
814 wbsd_send_command(host, data->stop);
815
816 /*
817 * Wait for the controller to leave data
818 * transfer state.
819 */
820 do {
821 status = wbsd_read_index(host, WBSD_IDX_STATUS);
822 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
823
824 /*
825 * DMA transfer?
826 */
827 if (host->dma >= 0) {
828 /*
829 * Disable DMA on the host.
830 */
831 wbsd_write_index(host, WBSD_IDX_DMA, 0);
832
833 /*
834 * Turn of ISA DMA controller.
835 */
836 dmaflags = claim_dma_lock();
837 disable_dma(host->dma);
838 clear_dma_ff(host->dma);
839 count = get_dma_residue(host->dma);
840 release_dma_lock(dmaflags);
841
842 /*
843 * Any leftover data?
844 */
845 if (count) {
846 printk(KERN_ERR "%s: Incomplete DMA transfer. "
847 "%d bytes left.\n",
848 mmc_hostname(host->mmc), count);
849
850 data->error = MMC_ERR_FAILED;
851 } else {
852 /*
853 * Transfer data from DMA buffer to
854 * SG list.
855 */
856 if (data->flags & MMC_DATA_READ)
857 wbsd_dma_to_sg(host, data);
858
859 data->bytes_xfered = host->size;
860 }
861 }
862
863 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
864
865 wbsd_request_end(host, host->mrq);
866 }
867
868 /*****************************************************************************\
869 * *
870 * MMC layer callbacks *
871 * *
872 \*****************************************************************************/
873
874 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
875 {
876 struct wbsd_host *host = mmc_priv(mmc);
877 struct mmc_command *cmd;
878
879 /*
880 * Disable tasklets to avoid a deadlock.
881 */
882 spin_lock_bh(&host->lock);
883
884 BUG_ON(host->mrq != NULL);
885
886 cmd = mrq->cmd;
887
888 host->mrq = mrq;
889
890 /*
891 * If there is no card in the slot then
892 * timeout immediatly.
893 */
894 if (!(host->flags & WBSD_FCARD_PRESENT)) {
895 cmd->error = MMC_ERR_TIMEOUT;
896 goto done;
897 }
898
899 /*
900 * Does the request include data?
901 */
902 if (cmd->data) {
903 wbsd_prepare_data(host, cmd->data);
904
905 if (cmd->data->error != MMC_ERR_NONE)
906 goto done;
907 }
908
909 wbsd_send_command(host, cmd);
910
911 /*
912 * If this is a data transfer the request
913 * will be finished after the data has
914 * transfered.
915 */
916 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
917 /*
918 * Dirty fix for hardware bug.
919 */
920 if (host->dma == -1)
921 tasklet_schedule(&host->fifo_tasklet);
922
923 spin_unlock_bh(&host->lock);
924
925 return;
926 }
927
928 done:
929 wbsd_request_end(host, mrq);
930
931 spin_unlock_bh(&host->lock);
932 }
933
934 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
935 {
936 struct wbsd_host *host = mmc_priv(mmc);
937 u8 clk, setup, pwr;
938
939 DBGF("clock %uHz busmode %u powermode %u cs %u Vdd %u width %u\n",
940 ios->clock, ios->bus_mode, ios->power_mode, ios->chip_select,
941 ios->vdd, ios->bus_width);
942
943 spin_lock_bh(&host->lock);
944
945 /*
946 * Reset the chip on each power off.
947 * Should clear out any weird states.
948 */
949 if (ios->power_mode == MMC_POWER_OFF)
950 wbsd_init_device(host);
951
952 if (ios->clock >= 24000000)
953 clk = WBSD_CLK_24M;
954 else if (ios->clock >= 16000000)
955 clk = WBSD_CLK_16M;
956 else if (ios->clock >= 12000000)
957 clk = WBSD_CLK_12M;
958 else
959 clk = WBSD_CLK_375K;
960
961 /*
962 * Only write to the clock register when
963 * there is an actual change.
964 */
965 if (clk != host->clk) {
966 wbsd_write_index(host, WBSD_IDX_CLK, clk);
967 host->clk = clk;
968 }
969
970 /*
971 * Power up card.
972 */
973 if (ios->power_mode != MMC_POWER_OFF) {
974 pwr = inb(host->base + WBSD_CSR);
975 pwr &= ~WBSD_POWER_N;
976 outb(pwr, host->base + WBSD_CSR);
977 }
978
979 /*
980 * MMC cards need to have pin 1 high during init.
981 * It wreaks havoc with the card detection though so
982 * that needs to be disabled.
983 */
984 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
985 if (ios->chip_select == MMC_CS_HIGH) {
986 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
987 setup |= WBSD_DAT3_H;
988 host->flags |= WBSD_FIGNORE_DETECT;
989 } else {
990 if (setup & WBSD_DAT3_H) {
991 setup &= ~WBSD_DAT3_H;
992
993 /*
994 * We cannot resume card detection immediatly
995 * because of capacitance and delays in the chip.
996 */
997 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
998 }
999 }
1000 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
1001
1002 /*
1003 * Store bus width for later. Will be used when
1004 * setting up the data transfer.
1005 */
1006 host->bus_width = ios->bus_width;
1007
1008 spin_unlock_bh(&host->lock);
1009 }
1010
1011 static int wbsd_get_ro(struct mmc_host *mmc)
1012 {
1013 struct wbsd_host *host = mmc_priv(mmc);
1014 u8 csr;
1015
1016 spin_lock_bh(&host->lock);
1017
1018 csr = inb(host->base + WBSD_CSR);
1019 csr |= WBSD_MSLED;
1020 outb(csr, host->base + WBSD_CSR);
1021
1022 mdelay(1);
1023
1024 csr = inb(host->base + WBSD_CSR);
1025 csr &= ~WBSD_MSLED;
1026 outb(csr, host->base + WBSD_CSR);
1027
1028 spin_unlock_bh(&host->lock);
1029
1030 return csr & WBSD_WRPT;
1031 }
1032
1033 static struct mmc_host_ops wbsd_ops = {
1034 .request = wbsd_request,
1035 .set_ios = wbsd_set_ios,
1036 .get_ro = wbsd_get_ro,
1037 };
1038
1039 /*****************************************************************************\
1040 * *
1041 * Interrupt handling *
1042 * *
1043 \*****************************************************************************/
1044
1045 /*
1046 * Helper function to reset detection ignore
1047 */
1048
1049 static void wbsd_reset_ignore(unsigned long data)
1050 {
1051 struct wbsd_host *host = (struct wbsd_host *)data;
1052
1053 BUG_ON(host == NULL);
1054
1055 DBG("Resetting card detection ignore\n");
1056
1057 spin_lock_bh(&host->lock);
1058
1059 host->flags &= ~WBSD_FIGNORE_DETECT;
1060
1061 /*
1062 * Card status might have changed during the
1063 * blackout.
1064 */
1065 tasklet_schedule(&host->card_tasklet);
1066
1067 spin_unlock_bh(&host->lock);
1068 }
1069
1070 /*
1071 * Tasklets
1072 */
1073
1074 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1075 {
1076 WARN_ON(!host->mrq);
1077 if (!host->mrq)
1078 return NULL;
1079
1080 WARN_ON(!host->mrq->cmd);
1081 if (!host->mrq->cmd)
1082 return NULL;
1083
1084 WARN_ON(!host->mrq->cmd->data);
1085 if (!host->mrq->cmd->data)
1086 return NULL;
1087
1088 return host->mrq->cmd->data;
1089 }
1090
1091 static void wbsd_tasklet_card(unsigned long param)
1092 {
1093 struct wbsd_host *host = (struct wbsd_host *)param;
1094 u8 csr;
1095 int delay = -1;
1096
1097 spin_lock(&host->lock);
1098
1099 if (host->flags & WBSD_FIGNORE_DETECT) {
1100 spin_unlock(&host->lock);
1101 return;
1102 }
1103
1104 csr = inb(host->base + WBSD_CSR);
1105 WARN_ON(csr == 0xff);
1106
1107 if (csr & WBSD_CARDPRESENT) {
1108 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1109 DBG("Card inserted\n");
1110 host->flags |= WBSD_FCARD_PRESENT;
1111
1112 delay = 500;
1113 }
1114 } else if (host->flags & WBSD_FCARD_PRESENT) {
1115 DBG("Card removed\n");
1116 host->flags &= ~WBSD_FCARD_PRESENT;
1117
1118 if (host->mrq) {
1119 printk(KERN_ERR "%s: Card removed during transfer!\n",
1120 mmc_hostname(host->mmc));
1121 wbsd_reset(host);
1122
1123 host->mrq->cmd->error = MMC_ERR_FAILED;
1124 tasklet_schedule(&host->finish_tasklet);
1125 }
1126
1127 delay = 0;
1128 }
1129
1130 /*
1131 * Unlock first since we might get a call back.
1132 */
1133
1134 spin_unlock(&host->lock);
1135
1136 if (delay != -1)
1137 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1138 }
1139
1140 static void wbsd_tasklet_fifo(unsigned long param)
1141 {
1142 struct wbsd_host *host = (struct wbsd_host *)param;
1143 struct mmc_data *data;
1144
1145 spin_lock(&host->lock);
1146
1147 if (!host->mrq)
1148 goto end;
1149
1150 data = wbsd_get_data(host);
1151 if (!data)
1152 goto end;
1153
1154 if (data->flags & MMC_DATA_WRITE)
1155 wbsd_fill_fifo(host);
1156 else
1157 wbsd_empty_fifo(host);
1158
1159 /*
1160 * Done?
1161 */
1162 if (host->size == data->bytes_xfered) {
1163 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1164 tasklet_schedule(&host->finish_tasklet);
1165 }
1166
1167 end:
1168 spin_unlock(&host->lock);
1169 }
1170
1171 static void wbsd_tasklet_crc(unsigned long param)
1172 {
1173 struct wbsd_host *host = (struct wbsd_host *)param;
1174 struct mmc_data *data;
1175
1176 spin_lock(&host->lock);
1177
1178 if (!host->mrq)
1179 goto end;
1180
1181 data = wbsd_get_data(host);
1182 if (!data)
1183 goto end;
1184
1185 DBGF("CRC error\n");
1186
1187 data->error = MMC_ERR_BADCRC;
1188
1189 tasklet_schedule(&host->finish_tasklet);
1190
1191 end:
1192 spin_unlock(&host->lock);
1193 }
1194
1195 static void wbsd_tasklet_timeout(unsigned long param)
1196 {
1197 struct wbsd_host *host = (struct wbsd_host *)param;
1198 struct mmc_data *data;
1199
1200 spin_lock(&host->lock);
1201
1202 if (!host->mrq)
1203 goto end;
1204
1205 data = wbsd_get_data(host);
1206 if (!data)
1207 goto end;
1208
1209 DBGF("Timeout\n");
1210
1211 data->error = MMC_ERR_TIMEOUT;
1212
1213 tasklet_schedule(&host->finish_tasklet);
1214
1215 end:
1216 spin_unlock(&host->lock);
1217 }
1218
1219 static void wbsd_tasklet_finish(unsigned long param)
1220 {
1221 struct wbsd_host *host = (struct wbsd_host *)param;
1222 struct mmc_data *data;
1223
1224 spin_lock(&host->lock);
1225
1226 WARN_ON(!host->mrq);
1227 if (!host->mrq)
1228 goto end;
1229
1230 data = wbsd_get_data(host);
1231 if (!data)
1232 goto end;
1233
1234 wbsd_finish_data(host, data);
1235
1236 end:
1237 spin_unlock(&host->lock);
1238 }
1239
1240 static void wbsd_tasklet_block(unsigned long param)
1241 {
1242 struct wbsd_host *host = (struct wbsd_host *)param;
1243 struct mmc_data *data;
1244
1245 spin_lock(&host->lock);
1246
1247 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1248 WBSD_CRC_OK) {
1249 data = wbsd_get_data(host);
1250 if (!data)
1251 goto end;
1252
1253 DBGF("CRC error\n");
1254
1255 data->error = MMC_ERR_BADCRC;
1256
1257 tasklet_schedule(&host->finish_tasklet);
1258 }
1259
1260 end:
1261 spin_unlock(&host->lock);
1262 }
1263
1264 /*
1265 * Interrupt handling
1266 */
1267
1268 static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1269 {
1270 struct wbsd_host *host = dev_id;
1271 int isr;
1272
1273 isr = inb(host->base + WBSD_ISR);
1274
1275 /*
1276 * Was it actually our hardware that caused the interrupt?
1277 */
1278 if (isr == 0xff || isr == 0x00)
1279 return IRQ_NONE;
1280
1281 host->isr |= isr;
1282
1283 /*
1284 * Schedule tasklets as needed.
1285 */
1286 if (isr & WBSD_INT_CARD)
1287 tasklet_schedule(&host->card_tasklet);
1288 if (isr & WBSD_INT_FIFO_THRE)
1289 tasklet_schedule(&host->fifo_tasklet);
1290 if (isr & WBSD_INT_CRC)
1291 tasklet_hi_schedule(&host->crc_tasklet);
1292 if (isr & WBSD_INT_TIMEOUT)
1293 tasklet_hi_schedule(&host->timeout_tasklet);
1294 if (isr & WBSD_INT_BUSYEND)
1295 tasklet_hi_schedule(&host->block_tasklet);
1296 if (isr & WBSD_INT_TC)
1297 tasklet_schedule(&host->finish_tasklet);
1298
1299 return IRQ_HANDLED;
1300 }
1301
1302 /*****************************************************************************\
1303 * *
1304 * Device initialisation and shutdown *
1305 * *
1306 \*****************************************************************************/
1307
1308 /*
1309 * Allocate/free MMC structure.
1310 */
1311
1312 static int __devinit wbsd_alloc_mmc(struct device *dev)
1313 {
1314 struct mmc_host *mmc;
1315 struct wbsd_host *host;
1316
1317 /*
1318 * Allocate MMC structure.
1319 */
1320 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1321 if (!mmc)
1322 return -ENOMEM;
1323
1324 host = mmc_priv(mmc);
1325 host->mmc = mmc;
1326
1327 host->dma = -1;
1328
1329 /*
1330 * Set host parameters.
1331 */
1332 mmc->ops = &wbsd_ops;
1333 mmc->f_min = 375000;
1334 mmc->f_max = 24000000;
1335 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1336 mmc->caps = MMC_CAP_4_BIT_DATA;
1337
1338 spin_lock_init(&host->lock);
1339
1340 /*
1341 * Set up timers
1342 */
1343 init_timer(&host->ignore_timer);
1344 host->ignore_timer.data = (unsigned long)host;
1345 host->ignore_timer.function = wbsd_reset_ignore;
1346
1347 /*
1348 * Maximum number of segments. Worst case is one sector per segment
1349 * so this will be 64kB/512.
1350 */
1351 mmc->max_hw_segs = 128;
1352 mmc->max_phys_segs = 128;
1353
1354 /*
1355 * Maximum number of sectors in one transfer. Also limited by 64kB
1356 * buffer.
1357 */
1358 mmc->max_sectors = 128;
1359
1360 /*
1361 * Maximum segment size. Could be one segment with the maximum number
1362 * of segments.
1363 */
1364 mmc->max_seg_size = mmc->max_sectors * 512;
1365
1366 dev_set_drvdata(dev, mmc);
1367
1368 return 0;
1369 }
1370
1371 static void __devexit wbsd_free_mmc(struct device *dev)
1372 {
1373 struct mmc_host *mmc;
1374 struct wbsd_host *host;
1375
1376 mmc = dev_get_drvdata(dev);
1377 if (!mmc)
1378 return;
1379
1380 host = mmc_priv(mmc);
1381 BUG_ON(host == NULL);
1382
1383 del_timer_sync(&host->ignore_timer);
1384
1385 mmc_free_host(mmc);
1386
1387 dev_set_drvdata(dev, NULL);
1388 }
1389
1390 /*
1391 * Scan for known chip id:s
1392 */
1393
1394 static int __devinit wbsd_scan(struct wbsd_host *host)
1395 {
1396 int i, j, k;
1397 int id;
1398
1399 /*
1400 * Iterate through all ports, all codes to
1401 * find hardware that is in our known list.
1402 */
1403 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1404 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1405 continue;
1406
1407 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1408 id = 0xFFFF;
1409
1410 host->config = config_ports[i];
1411 host->unlock_code = unlock_codes[j];
1412
1413 wbsd_unlock_config(host);
1414
1415 outb(WBSD_CONF_ID_HI, config_ports[i]);
1416 id = inb(config_ports[i] + 1) << 8;
1417
1418 outb(WBSD_CONF_ID_LO, config_ports[i]);
1419 id |= inb(config_ports[i] + 1);
1420
1421 wbsd_lock_config(host);
1422
1423 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1424 if (id == valid_ids[k]) {
1425 host->chip_id = id;
1426
1427 return 0;
1428 }
1429 }
1430
1431 if (id != 0xFFFF) {
1432 DBG("Unknown hardware (id %x) found at %x\n",
1433 id, config_ports[i]);
1434 }
1435 }
1436
1437 release_region(config_ports[i], 2);
1438 }
1439
1440 host->config = 0;
1441 host->unlock_code = 0;
1442
1443 return -ENODEV;
1444 }
1445
1446 /*
1447 * Allocate/free io port ranges
1448 */
1449
1450 static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1451 {
1452 if (io & 0x7)
1453 return -EINVAL;
1454
1455 if (!request_region(base, 8, DRIVER_NAME))
1456 return -EIO;
1457
1458 host->base = io;
1459
1460 return 0;
1461 }
1462
1463 static void __devexit wbsd_release_regions(struct wbsd_host *host)
1464 {
1465 if (host->base)
1466 release_region(host->base, 8);
1467
1468 host->base = 0;
1469
1470 if (host->config)
1471 release_region(host->config, 2);
1472
1473 host->config = 0;
1474 }
1475
1476 /*
1477 * Allocate/free DMA port and buffer
1478 */
1479
1480 static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1481 {
1482 if (dma < 0)
1483 return;
1484
1485 if (request_dma(dma, DRIVER_NAME))
1486 goto err;
1487
1488 /*
1489 * We need to allocate a special buffer in
1490 * order for ISA to be able to DMA to it.
1491 */
1492 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1493 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1494 if (!host->dma_buffer)
1495 goto free;
1496
1497 /*
1498 * Translate the address to a physical address.
1499 */
1500 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1501 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1502
1503 /*
1504 * ISA DMA must be aligned on a 64k basis.
1505 */
1506 if ((host->dma_addr & 0xffff) != 0)
1507 goto kfree;
1508 /*
1509 * ISA cannot access memory above 16 MB.
1510 */
1511 else if (host->dma_addr >= 0x1000000)
1512 goto kfree;
1513
1514 host->dma = dma;
1515
1516 return;
1517
1518 kfree:
1519 /*
1520 * If we've gotten here then there is some kind of alignment bug
1521 */
1522 BUG_ON(1);
1523
1524 dma_unmap_single(host->mmc->dev, host->dma_addr,
1525 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1526 host->dma_addr = (dma_addr_t)NULL;
1527
1528 kfree(host->dma_buffer);
1529 host->dma_buffer = NULL;
1530
1531 free:
1532 free_dma(dma);
1533
1534 err:
1535 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1536 "Falling back on FIFO.\n", dma);
1537 }
1538
1539 static void __devexit wbsd_release_dma(struct wbsd_host *host)
1540 {
1541 if (host->dma_addr) {
1542 dma_unmap_single(host->mmc->dev, host->dma_addr,
1543 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1544 }
1545 kfree(host->dma_buffer);
1546 if (host->dma >= 0)
1547 free_dma(host->dma);
1548
1549 host->dma = -1;
1550 host->dma_buffer = NULL;
1551 host->dma_addr = (dma_addr_t)NULL;
1552 }
1553
1554 /*
1555 * Allocate/free IRQ.
1556 */
1557
1558 static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1559 {
1560 int ret;
1561
1562 /*
1563 * Allocate interrupt.
1564 */
1565
1566 ret = request_irq(irq, wbsd_irq, SA_SHIRQ, DRIVER_NAME, host);
1567 if (ret)
1568 return ret;
1569
1570 host->irq = irq;
1571
1572 /*
1573 * Set up tasklets.
1574 */
1575 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1576 (unsigned long)host);
1577 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1578 (unsigned long)host);
1579 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1580 (unsigned long)host);
1581 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1582 (unsigned long)host);
1583 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1584 (unsigned long)host);
1585 tasklet_init(&host->block_tasklet, wbsd_tasklet_block,
1586 (unsigned long)host);
1587
1588 return 0;
1589 }
1590
1591 static void __devexit wbsd_release_irq(struct wbsd_host *host)
1592 {
1593 if (!host->irq)
1594 return;
1595
1596 free_irq(host->irq, host);
1597
1598 host->irq = 0;
1599
1600 tasklet_kill(&host->card_tasklet);
1601 tasklet_kill(&host->fifo_tasklet);
1602 tasklet_kill(&host->crc_tasklet);
1603 tasklet_kill(&host->timeout_tasklet);
1604 tasklet_kill(&host->finish_tasklet);
1605 tasklet_kill(&host->block_tasklet);
1606 }
1607
1608 /*
1609 * Allocate all resources for the host.
1610 */
1611
1612 static int __devinit wbsd_request_resources(struct wbsd_host *host,
1613 int base, int irq, int dma)
1614 {
1615 int ret;
1616
1617 /*
1618 * Allocate I/O ports.
1619 */
1620 ret = wbsd_request_region(host, base);
1621 if (ret)
1622 return ret;
1623
1624 /*
1625 * Allocate interrupt.
1626 */
1627 ret = wbsd_request_irq(host, irq);
1628 if (ret)
1629 return ret;
1630
1631 /*
1632 * Allocate DMA.
1633 */
1634 wbsd_request_dma(host, dma);
1635
1636 return 0;
1637 }
1638
1639 /*
1640 * Release all resources for the host.
1641 */
1642
1643 static void __devexit wbsd_release_resources(struct wbsd_host *host)
1644 {
1645 wbsd_release_dma(host);
1646 wbsd_release_irq(host);
1647 wbsd_release_regions(host);
1648 }
1649
1650 /*
1651 * Configure the resources the chip should use.
1652 */
1653
1654 static void wbsd_chip_config(struct wbsd_host *host)
1655 {
1656 wbsd_unlock_config(host);
1657
1658 /*
1659 * Reset the chip.
1660 */
1661 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1662 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1663
1664 /*
1665 * Select SD/MMC function.
1666 */
1667 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1668
1669 /*
1670 * Set up card detection.
1671 */
1672 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1673
1674 /*
1675 * Configure chip
1676 */
1677 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1678 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1679
1680 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1681
1682 if (host->dma >= 0)
1683 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1684
1685 /*
1686 * Enable and power up chip.
1687 */
1688 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1689 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1690
1691 wbsd_lock_config(host);
1692 }
1693
1694 /*
1695 * Check that configured resources are correct.
1696 */
1697
1698 static int wbsd_chip_validate(struct wbsd_host *host)
1699 {
1700 int base, irq, dma;
1701
1702 wbsd_unlock_config(host);
1703
1704 /*
1705 * Select SD/MMC function.
1706 */
1707 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1708
1709 /*
1710 * Read configuration.
1711 */
1712 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1713 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1714
1715 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1716
1717 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1718
1719 wbsd_lock_config(host);
1720
1721 /*
1722 * Validate against given configuration.
1723 */
1724 if (base != host->base)
1725 return 0;
1726 if (irq != host->irq)
1727 return 0;
1728 if ((dma != host->dma) && (host->dma != -1))
1729 return 0;
1730
1731 return 1;
1732 }
1733
1734 /*
1735 * Powers down the SD function
1736 */
1737
1738 static void wbsd_chip_poweroff(struct wbsd_host *host)
1739 {
1740 wbsd_unlock_config(host);
1741
1742 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1743 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1744
1745 wbsd_lock_config(host);
1746 }
1747
1748 /*****************************************************************************\
1749 * *
1750 * Devices setup and shutdown *
1751 * *
1752 \*****************************************************************************/
1753
1754 static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1755 int pnp)
1756 {
1757 struct wbsd_host *host = NULL;
1758 struct mmc_host *mmc = NULL;
1759 int ret;
1760
1761 ret = wbsd_alloc_mmc(dev);
1762 if (ret)
1763 return ret;
1764
1765 mmc = dev_get_drvdata(dev);
1766 host = mmc_priv(mmc);
1767
1768 /*
1769 * Scan for hardware.
1770 */
1771 ret = wbsd_scan(host);
1772 if (ret) {
1773 if (pnp && (ret == -ENODEV)) {
1774 printk(KERN_WARNING DRIVER_NAME
1775 ": Unable to confirm device presence. You may "
1776 "experience lock-ups.\n");
1777 } else {
1778 wbsd_free_mmc(dev);
1779 return ret;
1780 }
1781 }
1782
1783 /*
1784 * Request resources.
1785 */
1786 ret = wbsd_request_resources(host, io, irq, dma);
1787 if (ret) {
1788 wbsd_release_resources(host);
1789 wbsd_free_mmc(dev);
1790 return ret;
1791 }
1792
1793 /*
1794 * See if chip needs to be configured.
1795 */
1796 if (pnp) {
1797 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1798 printk(KERN_WARNING DRIVER_NAME
1799 ": PnP active but chip not configured! "
1800 "You probably have a buggy BIOS. "
1801 "Configuring chip manually.\n");
1802 wbsd_chip_config(host);
1803 }
1804 } else
1805 wbsd_chip_config(host);
1806
1807 /*
1808 * Power Management stuff. No idea how this works.
1809 * Not tested.
1810 */
1811 #ifdef CONFIG_PM
1812 if (host->config) {
1813 wbsd_unlock_config(host);
1814 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1815 wbsd_lock_config(host);
1816 }
1817 #endif
1818 /*
1819 * Allow device to initialise itself properly.
1820 */
1821 mdelay(5);
1822
1823 /*
1824 * Reset the chip into a known state.
1825 */
1826 wbsd_init_device(host);
1827
1828 mmc_add_host(mmc);
1829
1830 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1831 if (host->chip_id != 0)
1832 printk(" id %x", (int)host->chip_id);
1833 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1834 if (host->dma >= 0)
1835 printk(" dma %d", (int)host->dma);
1836 else
1837 printk(" FIFO");
1838 if (pnp)
1839 printk(" PnP");
1840 printk("\n");
1841
1842 return 0;
1843 }
1844
1845 static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1846 {
1847 struct mmc_host *mmc = dev_get_drvdata(dev);
1848 struct wbsd_host *host;
1849
1850 if (!mmc)
1851 return;
1852
1853 host = mmc_priv(mmc);
1854
1855 mmc_remove_host(mmc);
1856
1857 /*
1858 * Power down the SD/MMC function.
1859 */
1860 if (!pnp)
1861 wbsd_chip_poweroff(host);
1862
1863 wbsd_release_resources(host);
1864
1865 wbsd_free_mmc(dev);
1866 }
1867
1868 /*
1869 * Non-PnP
1870 */
1871
1872 static int __devinit wbsd_probe(struct platform_device *dev)
1873 {
1874 return wbsd_init(&dev->dev, io, irq, dma, 0);
1875 }
1876
1877 static int __devexit wbsd_remove(struct platform_device *dev)
1878 {
1879 wbsd_shutdown(&dev->dev, 0);
1880
1881 return 0;
1882 }
1883
1884 /*
1885 * PnP
1886 */
1887
1888 #ifdef CONFIG_PNP
1889
1890 static int __devinit
1891 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1892 {
1893 int io, irq, dma;
1894
1895 /*
1896 * Get resources from PnP layer.
1897 */
1898 io = pnp_port_start(pnpdev, 0);
1899 irq = pnp_irq(pnpdev, 0);
1900 if (pnp_dma_valid(pnpdev, 0))
1901 dma = pnp_dma(pnpdev, 0);
1902 else
1903 dma = -1;
1904
1905 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1906
1907 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1908 }
1909
1910 static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1911 {
1912 wbsd_shutdown(&dev->dev, 1);
1913 }
1914
1915 #endif /* CONFIG_PNP */
1916
1917 /*
1918 * Power management
1919 */
1920
1921 #ifdef CONFIG_PM
1922
1923 static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1924 {
1925 BUG_ON(host == NULL);
1926
1927 return mmc_suspend_host(host->mmc, state);
1928 }
1929
1930 static int wbsd_resume(struct wbsd_host *host)
1931 {
1932 BUG_ON(host == NULL);
1933
1934 wbsd_init_device(host);
1935
1936 return mmc_resume_host(host->mmc);
1937 }
1938
1939 static int wbsd_platform_suspend(struct platform_device *dev,
1940 pm_message_t state)
1941 {
1942 struct mmc_host *mmc = platform_get_drvdata(dev);
1943 struct wbsd_host *host;
1944 int ret;
1945
1946 if (mmc == NULL)
1947 return 0;
1948
1949 DBGF("Suspending...\n");
1950
1951 host = mmc_priv(mmc);
1952
1953 ret = wbsd_suspend(host, state);
1954 if (ret)
1955 return ret;
1956
1957 wbsd_chip_poweroff(host);
1958
1959 return 0;
1960 }
1961
1962 static int wbsd_platform_resume(struct platform_device *dev)
1963 {
1964 struct mmc_host *mmc = platform_get_drvdata(dev);
1965 struct wbsd_host *host;
1966
1967 if (mmc == NULL)
1968 return 0;
1969
1970 DBGF("Resuming...\n");
1971
1972 host = mmc_priv(mmc);
1973
1974 wbsd_chip_config(host);
1975
1976 /*
1977 * Allow device to initialise itself properly.
1978 */
1979 mdelay(5);
1980
1981 return wbsd_resume(host);
1982 }
1983
1984 #ifdef CONFIG_PNP
1985
1986 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1987 {
1988 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1989 struct wbsd_host *host;
1990
1991 if (mmc == NULL)
1992 return 0;
1993
1994 DBGF("Suspending...\n");
1995
1996 host = mmc_priv(mmc);
1997
1998 return wbsd_suspend(host, state);
1999 }
2000
2001 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
2002 {
2003 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
2004 struct wbsd_host *host;
2005
2006 if (mmc == NULL)
2007 return 0;
2008
2009 DBGF("Resuming...\n");
2010
2011 host = mmc_priv(mmc);
2012
2013 /*
2014 * See if chip needs to be configured.
2015 */
2016 if (host->config != 0) {
2017 if (!wbsd_chip_validate(host)) {
2018 printk(KERN_WARNING DRIVER_NAME
2019 ": PnP active but chip not configured! "
2020 "You probably have a buggy BIOS. "
2021 "Configuring chip manually.\n");
2022 wbsd_chip_config(host);
2023 }
2024 }
2025
2026 /*
2027 * Allow device to initialise itself properly.
2028 */
2029 mdelay(5);
2030
2031 return wbsd_resume(host);
2032 }
2033
2034 #endif /* CONFIG_PNP */
2035
2036 #else /* CONFIG_PM */
2037
2038 #define wbsd_platform_suspend NULL
2039 #define wbsd_platform_resume NULL
2040
2041 #define wbsd_pnp_suspend NULL
2042 #define wbsd_pnp_resume NULL
2043
2044 #endif /* CONFIG_PM */
2045
2046 static struct platform_device *wbsd_device;
2047
2048 static struct platform_driver wbsd_driver = {
2049 .probe = wbsd_probe,
2050 .remove = __devexit_p(wbsd_remove),
2051
2052 .suspend = wbsd_platform_suspend,
2053 .resume = wbsd_platform_resume,
2054 .driver = {
2055 .name = DRIVER_NAME,
2056 },
2057 };
2058
2059 #ifdef CONFIG_PNP
2060
2061 static struct pnp_driver wbsd_pnp_driver = {
2062 .name = DRIVER_NAME,
2063 .id_table = pnp_dev_table,
2064 .probe = wbsd_pnp_probe,
2065 .remove = __devexit_p(wbsd_pnp_remove),
2066
2067 .suspend = wbsd_pnp_suspend,
2068 .resume = wbsd_pnp_resume,
2069 };
2070
2071 #endif /* CONFIG_PNP */
2072
2073 /*
2074 * Module loading/unloading
2075 */
2076
2077 static int __init wbsd_drv_init(void)
2078 {
2079 int result;
2080
2081 printk(KERN_INFO DRIVER_NAME
2082 ": Winbond W83L51xD SD/MMC card interface driver, "
2083 DRIVER_VERSION "\n");
2084 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2085
2086 #ifdef CONFIG_PNP
2087
2088 if (!nopnp) {
2089 result = pnp_register_driver(&wbsd_pnp_driver);
2090 if (result < 0)
2091 return result;
2092 }
2093 #endif /* CONFIG_PNP */
2094
2095 if (nopnp) {
2096 result = platform_driver_register(&wbsd_driver);
2097 if (result < 0)
2098 return result;
2099
2100 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2101 if (!wbsd_device) {
2102 platform_driver_unregister(&wbsd_driver);
2103 return -ENOMEM;
2104 }
2105
2106 result = platform_device_add(wbsd_device);
2107 if (result) {
2108 platform_device_put(wbsd_device);
2109 platform_driver_unregister(&wbsd_driver);
2110 return result;
2111 }
2112 }
2113
2114 return 0;
2115 }
2116
2117 static void __exit wbsd_drv_exit(void)
2118 {
2119 #ifdef CONFIG_PNP
2120
2121 if (!nopnp)
2122 pnp_unregister_driver(&wbsd_pnp_driver);
2123
2124 #endif /* CONFIG_PNP */
2125
2126 if (nopnp) {
2127 platform_device_unregister(wbsd_device);
2128
2129 platform_driver_unregister(&wbsd_driver);
2130 }
2131
2132 DBG("unloaded\n");
2133 }
2134
2135 module_init(wbsd_drv_init);
2136 module_exit(wbsd_drv_exit);
2137 #ifdef CONFIG_PNP
2138 module_param(nopnp, uint, 0444);
2139 #endif
2140 module_param(io, uint, 0444);
2141 module_param(irq, uint, 0444);
2142 module_param(dma, int, 0444);
2143
2144 MODULE_LICENSE("GPL");
2145 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2146 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2147 MODULE_VERSION(DRIVER_VERSION);
2148
2149 #ifdef CONFIG_PNP
2150 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2151 #endif
2152 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2153 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2154 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");