[ARM] 3744/1: MMC: mmcqd gets stuck when block queue is plugged
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / mmc / wbsd.c
1 /*
2 * linux/drivers/mmc/wbsd.c - Winbond W83L51xD SD/MMC driver
3 *
4 * Copyright (C) 2004-2005 Pierre Ossman, All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 *
11 * Warning!
12 *
13 * Changes to the FIFO system should be done with extreme care since
14 * the hardware is full of bugs related to the FIFO. Known issues are:
15 *
16 * - FIFO size field in FSR is always zero.
17 *
18 * - FIFO interrupts tend not to work as they should. Interrupts are
19 * triggered only for full/empty events, not for threshold values.
20 *
21 * - On APIC systems the FIFO empty interrupt is sometimes lost.
22 */
23
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/init.h>
27 #include <linux/ioport.h>
28 #include <linux/platform_device.h>
29 #include <linux/interrupt.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/delay.h>
32 #include <linux/pnp.h>
33 #include <linux/highmem.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/protocol.h>
36
37 #include <asm/io.h>
38 #include <asm/dma.h>
39 #include <asm/scatterlist.h>
40
41 #include "wbsd.h"
42
43 #define DRIVER_NAME "wbsd"
44 #define DRIVER_VERSION "1.5"
45
46 #define DBG(x...) \
47 pr_debug(DRIVER_NAME ": " x)
48 #define DBGF(f, x...) \
49 pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
50
51 /*
52 * Device resources
53 */
54
55 #ifdef CONFIG_PNP
56
57 static const struct pnp_device_id pnp_dev_table[] = {
58 { "WEC0517", 0 },
59 { "WEC0518", 0 },
60 { "", 0 },
61 };
62
63 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
64
65 #endif /* CONFIG_PNP */
66
67 static const int config_ports[] = { 0x2E, 0x4E };
68 static const int unlock_codes[] = { 0x83, 0x87 };
69
70 static const int valid_ids[] = {
71 0x7112,
72 };
73
74 #ifdef CONFIG_PNP
75 static unsigned int nopnp = 0;
76 #else
77 static const unsigned int nopnp = 1;
78 #endif
79 static unsigned int io = 0x248;
80 static unsigned int irq = 6;
81 static int dma = 2;
82
83 /*
84 * Basic functions
85 */
86
87 static inline void wbsd_unlock_config(struct wbsd_host *host)
88 {
89 BUG_ON(host->config == 0);
90
91 outb(host->unlock_code, host->config);
92 outb(host->unlock_code, host->config);
93 }
94
95 static inline void wbsd_lock_config(struct wbsd_host *host)
96 {
97 BUG_ON(host->config == 0);
98
99 outb(LOCK_CODE, host->config);
100 }
101
102 static inline void wbsd_write_config(struct wbsd_host *host, u8 reg, u8 value)
103 {
104 BUG_ON(host->config == 0);
105
106 outb(reg, host->config);
107 outb(value, host->config + 1);
108 }
109
110 static inline u8 wbsd_read_config(struct wbsd_host *host, u8 reg)
111 {
112 BUG_ON(host->config == 0);
113
114 outb(reg, host->config);
115 return inb(host->config + 1);
116 }
117
118 static inline void wbsd_write_index(struct wbsd_host *host, u8 index, u8 value)
119 {
120 outb(index, host->base + WBSD_IDXR);
121 outb(value, host->base + WBSD_DATAR);
122 }
123
124 static inline u8 wbsd_read_index(struct wbsd_host *host, u8 index)
125 {
126 outb(index, host->base + WBSD_IDXR);
127 return inb(host->base + WBSD_DATAR);
128 }
129
130 /*
131 * Common routines
132 */
133
134 static void wbsd_init_device(struct wbsd_host *host)
135 {
136 u8 setup, ier;
137
138 /*
139 * Reset chip (SD/MMC part) and fifo.
140 */
141 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
142 setup |= WBSD_FIFO_RESET | WBSD_SOFT_RESET;
143 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
144
145 /*
146 * Set DAT3 to input
147 */
148 setup &= ~WBSD_DAT3_H;
149 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
150 host->flags &= ~WBSD_FIGNORE_DETECT;
151
152 /*
153 * Read back default clock.
154 */
155 host->clk = wbsd_read_index(host, WBSD_IDX_CLK);
156
157 /*
158 * Power down port.
159 */
160 outb(WBSD_POWER_N, host->base + WBSD_CSR);
161
162 /*
163 * Set maximum timeout.
164 */
165 wbsd_write_index(host, WBSD_IDX_TAAC, 0x7F);
166
167 /*
168 * Test for card presence
169 */
170 if (inb(host->base + WBSD_CSR) & WBSD_CARDPRESENT)
171 host->flags |= WBSD_FCARD_PRESENT;
172 else
173 host->flags &= ~WBSD_FCARD_PRESENT;
174
175 /*
176 * Enable interesting interrupts.
177 */
178 ier = 0;
179 ier |= WBSD_EINT_CARD;
180 ier |= WBSD_EINT_FIFO_THRE;
181 ier |= WBSD_EINT_CCRC;
182 ier |= WBSD_EINT_TIMEOUT;
183 ier |= WBSD_EINT_CRC;
184 ier |= WBSD_EINT_TC;
185
186 outb(ier, host->base + WBSD_EIR);
187
188 /*
189 * Clear interrupts.
190 */
191 inb(host->base + WBSD_ISR);
192 }
193
194 static void wbsd_reset(struct wbsd_host *host)
195 {
196 u8 setup;
197
198 printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc));
199
200 /*
201 * Soft reset of chip (SD/MMC part).
202 */
203 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
204 setup |= WBSD_SOFT_RESET;
205 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
206 }
207
208 static void wbsd_request_end(struct wbsd_host *host, struct mmc_request *mrq)
209 {
210 unsigned long dmaflags;
211
212 DBGF("Ending request, cmd (%x)\n", mrq->cmd->opcode);
213
214 if (host->dma >= 0) {
215 /*
216 * Release ISA DMA controller.
217 */
218 dmaflags = claim_dma_lock();
219 disable_dma(host->dma);
220 clear_dma_ff(host->dma);
221 release_dma_lock(dmaflags);
222
223 /*
224 * Disable DMA on host.
225 */
226 wbsd_write_index(host, WBSD_IDX_DMA, 0);
227 }
228
229 host->mrq = NULL;
230
231 /*
232 * MMC layer might call back into the driver so first unlock.
233 */
234 spin_unlock(&host->lock);
235 mmc_request_done(host->mmc, mrq);
236 spin_lock(&host->lock);
237 }
238
239 /*
240 * Scatter/gather functions
241 */
242
243 static inline void wbsd_init_sg(struct wbsd_host *host, struct mmc_data *data)
244 {
245 /*
246 * Get info. about SG list from data structure.
247 */
248 host->cur_sg = data->sg;
249 host->num_sg = data->sg_len;
250
251 host->offset = 0;
252 host->remain = host->cur_sg->length;
253 }
254
255 static inline int wbsd_next_sg(struct wbsd_host *host)
256 {
257 /*
258 * Skip to next SG entry.
259 */
260 host->cur_sg++;
261 host->num_sg--;
262
263 /*
264 * Any entries left?
265 */
266 if (host->num_sg > 0) {
267 host->offset = 0;
268 host->remain = host->cur_sg->length;
269 }
270
271 return host->num_sg;
272 }
273
274 static inline char *wbsd_kmap_sg(struct wbsd_host *host)
275 {
276 host->mapped_sg = kmap_atomic(host->cur_sg->page, KM_BIO_SRC_IRQ) +
277 host->cur_sg->offset;
278 return host->mapped_sg;
279 }
280
281 static inline void wbsd_kunmap_sg(struct wbsd_host *host)
282 {
283 kunmap_atomic(host->mapped_sg, KM_BIO_SRC_IRQ);
284 }
285
286 static inline void wbsd_sg_to_dma(struct wbsd_host *host, struct mmc_data *data)
287 {
288 unsigned int len, i, size;
289 struct scatterlist *sg;
290 char *dmabuf = host->dma_buffer;
291 char *sgbuf;
292
293 size = host->size;
294
295 sg = data->sg;
296 len = data->sg_len;
297
298 /*
299 * Just loop through all entries. Size might not
300 * be the entire list though so make sure that
301 * we do not transfer too much.
302 */
303 for (i = 0; i < len; i++) {
304 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
305 if (size < sg[i].length)
306 memcpy(dmabuf, sgbuf, size);
307 else
308 memcpy(dmabuf, sgbuf, sg[i].length);
309 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
310 dmabuf += sg[i].length;
311
312 if (size < sg[i].length)
313 size = 0;
314 else
315 size -= sg[i].length;
316
317 if (size == 0)
318 break;
319 }
320
321 /*
322 * Check that we didn't get a request to transfer
323 * more data than can fit into the SG list.
324 */
325
326 BUG_ON(size != 0);
327
328 host->size -= size;
329 }
330
331 static inline void wbsd_dma_to_sg(struct wbsd_host *host, struct mmc_data *data)
332 {
333 unsigned int len, i, size;
334 struct scatterlist *sg;
335 char *dmabuf = host->dma_buffer;
336 char *sgbuf;
337
338 size = host->size;
339
340 sg = data->sg;
341 len = data->sg_len;
342
343 /*
344 * Just loop through all entries. Size might not
345 * be the entire list though so make sure that
346 * we do not transfer too much.
347 */
348 for (i = 0; i < len; i++) {
349 sgbuf = kmap_atomic(sg[i].page, KM_BIO_SRC_IRQ) + sg[i].offset;
350 if (size < sg[i].length)
351 memcpy(sgbuf, dmabuf, size);
352 else
353 memcpy(sgbuf, dmabuf, sg[i].length);
354 kunmap_atomic(sgbuf, KM_BIO_SRC_IRQ);
355 dmabuf += sg[i].length;
356
357 if (size < sg[i].length)
358 size = 0;
359 else
360 size -= sg[i].length;
361
362 if (size == 0)
363 break;
364 }
365
366 /*
367 * Check that we didn't get a request to transfer
368 * more data than can fit into the SG list.
369 */
370
371 BUG_ON(size != 0);
372
373 host->size -= size;
374 }
375
376 /*
377 * Command handling
378 */
379
380 static inline void wbsd_get_short_reply(struct wbsd_host *host,
381 struct mmc_command *cmd)
382 {
383 /*
384 * Correct response type?
385 */
386 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_SHORT) {
387 cmd->error = MMC_ERR_INVALID;
388 return;
389 }
390
391 cmd->resp[0] = wbsd_read_index(host, WBSD_IDX_RESP12) << 24;
392 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP13) << 16;
393 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP14) << 8;
394 cmd->resp[0] |= wbsd_read_index(host, WBSD_IDX_RESP15) << 0;
395 cmd->resp[1] = wbsd_read_index(host, WBSD_IDX_RESP16) << 24;
396 }
397
398 static inline void wbsd_get_long_reply(struct wbsd_host *host,
399 struct mmc_command *cmd)
400 {
401 int i;
402
403 /*
404 * Correct response type?
405 */
406 if (wbsd_read_index(host, WBSD_IDX_RSPLEN) != WBSD_RSP_LONG) {
407 cmd->error = MMC_ERR_INVALID;
408 return;
409 }
410
411 for (i = 0; i < 4; i++) {
412 cmd->resp[i] =
413 wbsd_read_index(host, WBSD_IDX_RESP1 + i * 4) << 24;
414 cmd->resp[i] |=
415 wbsd_read_index(host, WBSD_IDX_RESP2 + i * 4) << 16;
416 cmd->resp[i] |=
417 wbsd_read_index(host, WBSD_IDX_RESP3 + i * 4) << 8;
418 cmd->resp[i] |=
419 wbsd_read_index(host, WBSD_IDX_RESP4 + i * 4) << 0;
420 }
421 }
422
423 static void wbsd_send_command(struct wbsd_host *host, struct mmc_command *cmd)
424 {
425 int i;
426 u8 status, isr;
427
428 DBGF("Sending cmd (%x)\n", cmd->opcode);
429
430 /*
431 * Clear accumulated ISR. The interrupt routine
432 * will fill this one with events that occur during
433 * transfer.
434 */
435 host->isr = 0;
436
437 /*
438 * Send the command (CRC calculated by host).
439 */
440 outb(cmd->opcode, host->base + WBSD_CMDR);
441 for (i = 3; i >= 0; i--)
442 outb((cmd->arg >> (i * 8)) & 0xff, host->base + WBSD_CMDR);
443
444 cmd->error = MMC_ERR_NONE;
445
446 /*
447 * Wait for the request to complete.
448 */
449 do {
450 status = wbsd_read_index(host, WBSD_IDX_STATUS);
451 } while (status & WBSD_CARDTRAFFIC);
452
453 /*
454 * Do we expect a reply?
455 */
456 if (cmd->flags & MMC_RSP_PRESENT) {
457 /*
458 * Read back status.
459 */
460 isr = host->isr;
461
462 /* Card removed? */
463 if (isr & WBSD_INT_CARD)
464 cmd->error = MMC_ERR_TIMEOUT;
465 /* Timeout? */
466 else if (isr & WBSD_INT_TIMEOUT)
467 cmd->error = MMC_ERR_TIMEOUT;
468 /* CRC? */
469 else if ((cmd->flags & MMC_RSP_CRC) && (isr & WBSD_INT_CRC))
470 cmd->error = MMC_ERR_BADCRC;
471 /* All ok */
472 else {
473 if (cmd->flags & MMC_RSP_136)
474 wbsd_get_long_reply(host, cmd);
475 else
476 wbsd_get_short_reply(host, cmd);
477 }
478 }
479
480 DBGF("Sent cmd (%x), res %d\n", cmd->opcode, cmd->error);
481 }
482
483 /*
484 * Data functions
485 */
486
487 static void wbsd_empty_fifo(struct wbsd_host *host)
488 {
489 struct mmc_data *data = host->mrq->cmd->data;
490 char *buffer;
491 int i, fsr, fifo;
492
493 /*
494 * Handle excessive data.
495 */
496 if (data->bytes_xfered == host->size)
497 return;
498
499 buffer = wbsd_kmap_sg(host) + host->offset;
500
501 /*
502 * Drain the fifo. This has a tendency to loop longer
503 * than the FIFO length (usually one block).
504 */
505 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_EMPTY)) {
506 /*
507 * The size field in the FSR is broken so we have to
508 * do some guessing.
509 */
510 if (fsr & WBSD_FIFO_FULL)
511 fifo = 16;
512 else if (fsr & WBSD_FIFO_FUTHRE)
513 fifo = 8;
514 else
515 fifo = 1;
516
517 for (i = 0; i < fifo; i++) {
518 *buffer = inb(host->base + WBSD_DFR);
519 buffer++;
520 host->offset++;
521 host->remain--;
522
523 data->bytes_xfered++;
524
525 /*
526 * Transfer done?
527 */
528 if (data->bytes_xfered == host->size) {
529 wbsd_kunmap_sg(host);
530 return;
531 }
532
533 /*
534 * End of scatter list entry?
535 */
536 if (host->remain == 0) {
537 wbsd_kunmap_sg(host);
538
539 /*
540 * Get next entry. Check if last.
541 */
542 if (!wbsd_next_sg(host)) {
543 /*
544 * We should never reach this point.
545 * It means that we're trying to
546 * transfer more blocks than can fit
547 * into the scatter list.
548 */
549 BUG_ON(1);
550
551 host->size = data->bytes_xfered;
552
553 return;
554 }
555
556 buffer = wbsd_kmap_sg(host);
557 }
558 }
559 }
560
561 wbsd_kunmap_sg(host);
562
563 /*
564 * This is a very dirty hack to solve a
565 * hardware problem. The chip doesn't trigger
566 * FIFO threshold interrupts properly.
567 */
568 if ((host->size - data->bytes_xfered) < 16)
569 tasklet_schedule(&host->fifo_tasklet);
570 }
571
572 static void wbsd_fill_fifo(struct wbsd_host *host)
573 {
574 struct mmc_data *data = host->mrq->cmd->data;
575 char *buffer;
576 int i, fsr, fifo;
577
578 /*
579 * Check that we aren't being called after the
580 * entire buffer has been transfered.
581 */
582 if (data->bytes_xfered == host->size)
583 return;
584
585 buffer = wbsd_kmap_sg(host) + host->offset;
586
587 /*
588 * Fill the fifo. This has a tendency to loop longer
589 * than the FIFO length (usually one block).
590 */
591 while (!((fsr = inb(host->base + WBSD_FSR)) & WBSD_FIFO_FULL)) {
592 /*
593 * The size field in the FSR is broken so we have to
594 * do some guessing.
595 */
596 if (fsr & WBSD_FIFO_EMPTY)
597 fifo = 0;
598 else if (fsr & WBSD_FIFO_EMTHRE)
599 fifo = 8;
600 else
601 fifo = 15;
602
603 for (i = 16; i > fifo; i--) {
604 outb(*buffer, host->base + WBSD_DFR);
605 buffer++;
606 host->offset++;
607 host->remain--;
608
609 data->bytes_xfered++;
610
611 /*
612 * Transfer done?
613 */
614 if (data->bytes_xfered == host->size) {
615 wbsd_kunmap_sg(host);
616 return;
617 }
618
619 /*
620 * End of scatter list entry?
621 */
622 if (host->remain == 0) {
623 wbsd_kunmap_sg(host);
624
625 /*
626 * Get next entry. Check if last.
627 */
628 if (!wbsd_next_sg(host)) {
629 /*
630 * We should never reach this point.
631 * It means that we're trying to
632 * transfer more blocks than can fit
633 * into the scatter list.
634 */
635 BUG_ON(1);
636
637 host->size = data->bytes_xfered;
638
639 return;
640 }
641
642 buffer = wbsd_kmap_sg(host);
643 }
644 }
645 }
646
647 wbsd_kunmap_sg(host);
648
649 /*
650 * The controller stops sending interrupts for
651 * 'FIFO empty' under certain conditions. So we
652 * need to be a bit more pro-active.
653 */
654 tasklet_schedule(&host->fifo_tasklet);
655 }
656
657 static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data)
658 {
659 u16 blksize;
660 u8 setup;
661 unsigned long dmaflags;
662
663 DBGF("blksz %04x blks %04x flags %08x\n",
664 data->blksz, data->blocks, data->flags);
665 DBGF("tsac %d ms nsac %d clk\n",
666 data->timeout_ns / 1000000, data->timeout_clks);
667
668 /*
669 * Calculate size.
670 */
671 host->size = data->blocks * data->blksz;
672
673 /*
674 * Check timeout values for overflow.
675 * (Yes, some cards cause this value to overflow).
676 */
677 if (data->timeout_ns > 127000000)
678 wbsd_write_index(host, WBSD_IDX_TAAC, 127);
679 else {
680 wbsd_write_index(host, WBSD_IDX_TAAC,
681 data->timeout_ns / 1000000);
682 }
683
684 if (data->timeout_clks > 255)
685 wbsd_write_index(host, WBSD_IDX_NSAC, 255);
686 else
687 wbsd_write_index(host, WBSD_IDX_NSAC, data->timeout_clks);
688
689 /*
690 * Inform the chip of how large blocks will be
691 * sent. It needs this to determine when to
692 * calculate CRC.
693 *
694 * Space for CRC must be included in the size.
695 * Two bytes are needed for each data line.
696 */
697 if (host->bus_width == MMC_BUS_WIDTH_1) {
698 blksize = data->blksz + 2;
699
700 wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0);
701 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
702 } else if (host->bus_width == MMC_BUS_WIDTH_4) {
703 blksize = data->blksz + 2 * 4;
704
705 wbsd_write_index(host, WBSD_IDX_PBSMSB,
706 ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH);
707 wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF);
708 } else {
709 data->error = MMC_ERR_INVALID;
710 return;
711 }
712
713 /*
714 * Clear the FIFO. This is needed even for DMA
715 * transfers since the chip still uses the FIFO
716 * internally.
717 */
718 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
719 setup |= WBSD_FIFO_RESET;
720 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
721
722 /*
723 * DMA transfer?
724 */
725 if (host->dma >= 0) {
726 /*
727 * The buffer for DMA is only 64 kB.
728 */
729 BUG_ON(host->size > 0x10000);
730 if (host->size > 0x10000) {
731 data->error = MMC_ERR_INVALID;
732 return;
733 }
734
735 /*
736 * Transfer data from the SG list to
737 * the DMA buffer.
738 */
739 if (data->flags & MMC_DATA_WRITE)
740 wbsd_sg_to_dma(host, data);
741
742 /*
743 * Initialise the ISA DMA controller.
744 */
745 dmaflags = claim_dma_lock();
746 disable_dma(host->dma);
747 clear_dma_ff(host->dma);
748 if (data->flags & MMC_DATA_READ)
749 set_dma_mode(host->dma, DMA_MODE_READ & ~0x40);
750 else
751 set_dma_mode(host->dma, DMA_MODE_WRITE & ~0x40);
752 set_dma_addr(host->dma, host->dma_addr);
753 set_dma_count(host->dma, host->size);
754
755 enable_dma(host->dma);
756 release_dma_lock(dmaflags);
757
758 /*
759 * Enable DMA on the host.
760 */
761 wbsd_write_index(host, WBSD_IDX_DMA, WBSD_DMA_ENABLE);
762 } else {
763 /*
764 * This flag is used to keep printk
765 * output to a minimum.
766 */
767 host->firsterr = 1;
768
769 /*
770 * Initialise the SG list.
771 */
772 wbsd_init_sg(host, data);
773
774 /*
775 * Turn off DMA.
776 */
777 wbsd_write_index(host, WBSD_IDX_DMA, 0);
778
779 /*
780 * Set up FIFO threshold levels (and fill
781 * buffer if doing a write).
782 */
783 if (data->flags & MMC_DATA_READ) {
784 wbsd_write_index(host, WBSD_IDX_FIFOEN,
785 WBSD_FIFOEN_FULL | 8);
786 } else {
787 wbsd_write_index(host, WBSD_IDX_FIFOEN,
788 WBSD_FIFOEN_EMPTY | 8);
789 wbsd_fill_fifo(host);
790 }
791 }
792
793 data->error = MMC_ERR_NONE;
794 }
795
796 static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)
797 {
798 unsigned long dmaflags;
799 int count;
800 u8 status;
801
802 WARN_ON(host->mrq == NULL);
803
804 /*
805 * Send a stop command if needed.
806 */
807 if (data->stop)
808 wbsd_send_command(host, data->stop);
809
810 /*
811 * Wait for the controller to leave data
812 * transfer state.
813 */
814 do {
815 status = wbsd_read_index(host, WBSD_IDX_STATUS);
816 } while (status & (WBSD_BLOCK_READ | WBSD_BLOCK_WRITE));
817
818 /*
819 * DMA transfer?
820 */
821 if (host->dma >= 0) {
822 /*
823 * Disable DMA on the host.
824 */
825 wbsd_write_index(host, WBSD_IDX_DMA, 0);
826
827 /*
828 * Turn of ISA DMA controller.
829 */
830 dmaflags = claim_dma_lock();
831 disable_dma(host->dma);
832 clear_dma_ff(host->dma);
833 count = get_dma_residue(host->dma);
834 release_dma_lock(dmaflags);
835
836 /*
837 * Any leftover data?
838 */
839 if (count) {
840 printk(KERN_ERR "%s: Incomplete DMA transfer. "
841 "%d bytes left.\n",
842 mmc_hostname(host->mmc), count);
843
844 data->error = MMC_ERR_FAILED;
845 } else {
846 /*
847 * Transfer data from DMA buffer to
848 * SG list.
849 */
850 if (data->flags & MMC_DATA_READ)
851 wbsd_dma_to_sg(host, data);
852
853 data->bytes_xfered = host->size;
854 }
855 }
856
857 DBGF("Ending data transfer (%d bytes)\n", data->bytes_xfered);
858
859 wbsd_request_end(host, host->mrq);
860 }
861
862 /*****************************************************************************\
863 * *
864 * MMC layer callbacks *
865 * *
866 \*****************************************************************************/
867
868 static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
869 {
870 struct wbsd_host *host = mmc_priv(mmc);
871 struct mmc_command *cmd;
872
873 /*
874 * Disable tasklets to avoid a deadlock.
875 */
876 spin_lock_bh(&host->lock);
877
878 BUG_ON(host->mrq != NULL);
879
880 cmd = mrq->cmd;
881
882 host->mrq = mrq;
883
884 /*
885 * If there is no card in the slot then
886 * timeout immediatly.
887 */
888 if (!(host->flags & WBSD_FCARD_PRESENT)) {
889 cmd->error = MMC_ERR_TIMEOUT;
890 goto done;
891 }
892
893 /*
894 * Does the request include data?
895 */
896 if (cmd->data) {
897 wbsd_prepare_data(host, cmd->data);
898
899 if (cmd->data->error != MMC_ERR_NONE)
900 goto done;
901 }
902
903 wbsd_send_command(host, cmd);
904
905 /*
906 * If this is a data transfer the request
907 * will be finished after the data has
908 * transfered.
909 */
910 if (cmd->data && (cmd->error == MMC_ERR_NONE)) {
911 /*
912 * Dirty fix for hardware bug.
913 */
914 if (host->dma == -1)
915 tasklet_schedule(&host->fifo_tasklet);
916
917 spin_unlock_bh(&host->lock);
918
919 return;
920 }
921
922 done:
923 wbsd_request_end(host, mrq);
924
925 spin_unlock_bh(&host->lock);
926 }
927
928 static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
929 {
930 struct wbsd_host *host = mmc_priv(mmc);
931 u8 clk, setup, pwr;
932
933 spin_lock_bh(&host->lock);
934
935 /*
936 * Reset the chip on each power off.
937 * Should clear out any weird states.
938 */
939 if (ios->power_mode == MMC_POWER_OFF)
940 wbsd_init_device(host);
941
942 if (ios->clock >= 24000000)
943 clk = WBSD_CLK_24M;
944 else if (ios->clock >= 16000000)
945 clk = WBSD_CLK_16M;
946 else if (ios->clock >= 12000000)
947 clk = WBSD_CLK_12M;
948 else
949 clk = WBSD_CLK_375K;
950
951 /*
952 * Only write to the clock register when
953 * there is an actual change.
954 */
955 if (clk != host->clk) {
956 wbsd_write_index(host, WBSD_IDX_CLK, clk);
957 host->clk = clk;
958 }
959
960 /*
961 * Power up card.
962 */
963 if (ios->power_mode != MMC_POWER_OFF) {
964 pwr = inb(host->base + WBSD_CSR);
965 pwr &= ~WBSD_POWER_N;
966 outb(pwr, host->base + WBSD_CSR);
967 }
968
969 /*
970 * MMC cards need to have pin 1 high during init.
971 * It wreaks havoc with the card detection though so
972 * that needs to be disabled.
973 */
974 setup = wbsd_read_index(host, WBSD_IDX_SETUP);
975 if (ios->chip_select == MMC_CS_HIGH) {
976 BUG_ON(ios->bus_width != MMC_BUS_WIDTH_1);
977 setup |= WBSD_DAT3_H;
978 host->flags |= WBSD_FIGNORE_DETECT;
979 } else {
980 if (setup & WBSD_DAT3_H) {
981 setup &= ~WBSD_DAT3_H;
982
983 /*
984 * We cannot resume card detection immediatly
985 * because of capacitance and delays in the chip.
986 */
987 mod_timer(&host->ignore_timer, jiffies + HZ / 100);
988 }
989 }
990 wbsd_write_index(host, WBSD_IDX_SETUP, setup);
991
992 /*
993 * Store bus width for later. Will be used when
994 * setting up the data transfer.
995 */
996 host->bus_width = ios->bus_width;
997
998 spin_unlock_bh(&host->lock);
999 }
1000
1001 static int wbsd_get_ro(struct mmc_host *mmc)
1002 {
1003 struct wbsd_host *host = mmc_priv(mmc);
1004 u8 csr;
1005
1006 spin_lock_bh(&host->lock);
1007
1008 csr = inb(host->base + WBSD_CSR);
1009 csr |= WBSD_MSLED;
1010 outb(csr, host->base + WBSD_CSR);
1011
1012 mdelay(1);
1013
1014 csr = inb(host->base + WBSD_CSR);
1015 csr &= ~WBSD_MSLED;
1016 outb(csr, host->base + WBSD_CSR);
1017
1018 spin_unlock_bh(&host->lock);
1019
1020 return csr & WBSD_WRPT;
1021 }
1022
1023 static struct mmc_host_ops wbsd_ops = {
1024 .request = wbsd_request,
1025 .set_ios = wbsd_set_ios,
1026 .get_ro = wbsd_get_ro,
1027 };
1028
1029 /*****************************************************************************\
1030 * *
1031 * Interrupt handling *
1032 * *
1033 \*****************************************************************************/
1034
1035 /*
1036 * Helper function to reset detection ignore
1037 */
1038
1039 static void wbsd_reset_ignore(unsigned long data)
1040 {
1041 struct wbsd_host *host = (struct wbsd_host *)data;
1042
1043 BUG_ON(host == NULL);
1044
1045 DBG("Resetting card detection ignore\n");
1046
1047 spin_lock_bh(&host->lock);
1048
1049 host->flags &= ~WBSD_FIGNORE_DETECT;
1050
1051 /*
1052 * Card status might have changed during the
1053 * blackout.
1054 */
1055 tasklet_schedule(&host->card_tasklet);
1056
1057 spin_unlock_bh(&host->lock);
1058 }
1059
1060 /*
1061 * Tasklets
1062 */
1063
1064 static inline struct mmc_data *wbsd_get_data(struct wbsd_host *host)
1065 {
1066 WARN_ON(!host->mrq);
1067 if (!host->mrq)
1068 return NULL;
1069
1070 WARN_ON(!host->mrq->cmd);
1071 if (!host->mrq->cmd)
1072 return NULL;
1073
1074 WARN_ON(!host->mrq->cmd->data);
1075 if (!host->mrq->cmd->data)
1076 return NULL;
1077
1078 return host->mrq->cmd->data;
1079 }
1080
1081 static void wbsd_tasklet_card(unsigned long param)
1082 {
1083 struct wbsd_host *host = (struct wbsd_host *)param;
1084 u8 csr;
1085 int delay = -1;
1086
1087 spin_lock(&host->lock);
1088
1089 if (host->flags & WBSD_FIGNORE_DETECT) {
1090 spin_unlock(&host->lock);
1091 return;
1092 }
1093
1094 csr = inb(host->base + WBSD_CSR);
1095 WARN_ON(csr == 0xff);
1096
1097 if (csr & WBSD_CARDPRESENT) {
1098 if (!(host->flags & WBSD_FCARD_PRESENT)) {
1099 DBG("Card inserted\n");
1100 host->flags |= WBSD_FCARD_PRESENT;
1101
1102 delay = 500;
1103 }
1104 } else if (host->flags & WBSD_FCARD_PRESENT) {
1105 DBG("Card removed\n");
1106 host->flags &= ~WBSD_FCARD_PRESENT;
1107
1108 if (host->mrq) {
1109 printk(KERN_ERR "%s: Card removed during transfer!\n",
1110 mmc_hostname(host->mmc));
1111 wbsd_reset(host);
1112
1113 host->mrq->cmd->error = MMC_ERR_FAILED;
1114 tasklet_schedule(&host->finish_tasklet);
1115 }
1116
1117 delay = 0;
1118 }
1119
1120 /*
1121 * Unlock first since we might get a call back.
1122 */
1123
1124 spin_unlock(&host->lock);
1125
1126 if (delay != -1)
1127 mmc_detect_change(host->mmc, msecs_to_jiffies(delay));
1128 }
1129
1130 static void wbsd_tasklet_fifo(unsigned long param)
1131 {
1132 struct wbsd_host *host = (struct wbsd_host *)param;
1133 struct mmc_data *data;
1134
1135 spin_lock(&host->lock);
1136
1137 if (!host->mrq)
1138 goto end;
1139
1140 data = wbsd_get_data(host);
1141 if (!data)
1142 goto end;
1143
1144 if (data->flags & MMC_DATA_WRITE)
1145 wbsd_fill_fifo(host);
1146 else
1147 wbsd_empty_fifo(host);
1148
1149 /*
1150 * Done?
1151 */
1152 if (host->size == data->bytes_xfered) {
1153 wbsd_write_index(host, WBSD_IDX_FIFOEN, 0);
1154 tasklet_schedule(&host->finish_tasklet);
1155 }
1156
1157 end:
1158 spin_unlock(&host->lock);
1159 }
1160
1161 static void wbsd_tasklet_crc(unsigned long param)
1162 {
1163 struct wbsd_host *host = (struct wbsd_host *)param;
1164 struct mmc_data *data;
1165
1166 spin_lock(&host->lock);
1167
1168 if (!host->mrq)
1169 goto end;
1170
1171 data = wbsd_get_data(host);
1172 if (!data)
1173 goto end;
1174
1175 DBGF("CRC error\n");
1176
1177 data->error = MMC_ERR_BADCRC;
1178
1179 tasklet_schedule(&host->finish_tasklet);
1180
1181 end:
1182 spin_unlock(&host->lock);
1183 }
1184
1185 static void wbsd_tasklet_timeout(unsigned long param)
1186 {
1187 struct wbsd_host *host = (struct wbsd_host *)param;
1188 struct mmc_data *data;
1189
1190 spin_lock(&host->lock);
1191
1192 if (!host->mrq)
1193 goto end;
1194
1195 data = wbsd_get_data(host);
1196 if (!data)
1197 goto end;
1198
1199 DBGF("Timeout\n");
1200
1201 data->error = MMC_ERR_TIMEOUT;
1202
1203 tasklet_schedule(&host->finish_tasklet);
1204
1205 end:
1206 spin_unlock(&host->lock);
1207 }
1208
1209 static void wbsd_tasklet_finish(unsigned long param)
1210 {
1211 struct wbsd_host *host = (struct wbsd_host *)param;
1212 struct mmc_data *data;
1213
1214 spin_lock(&host->lock);
1215
1216 WARN_ON(!host->mrq);
1217 if (!host->mrq)
1218 goto end;
1219
1220 data = wbsd_get_data(host);
1221 if (!data)
1222 goto end;
1223
1224 wbsd_finish_data(host, data);
1225
1226 end:
1227 spin_unlock(&host->lock);
1228 }
1229
1230 static void wbsd_tasklet_block(unsigned long param)
1231 {
1232 struct wbsd_host *host = (struct wbsd_host *)param;
1233 struct mmc_data *data;
1234
1235 spin_lock(&host->lock);
1236
1237 if ((wbsd_read_index(host, WBSD_IDX_CRCSTATUS) & WBSD_CRC_MASK) !=
1238 WBSD_CRC_OK) {
1239 data = wbsd_get_data(host);
1240 if (!data)
1241 goto end;
1242
1243 DBGF("CRC error\n");
1244
1245 data->error = MMC_ERR_BADCRC;
1246
1247 tasklet_schedule(&host->finish_tasklet);
1248 }
1249
1250 end:
1251 spin_unlock(&host->lock);
1252 }
1253
1254 /*
1255 * Interrupt handling
1256 */
1257
1258 static irqreturn_t wbsd_irq(int irq, void *dev_id, struct pt_regs *regs)
1259 {
1260 struct wbsd_host *host = dev_id;
1261 int isr;
1262
1263 isr = inb(host->base + WBSD_ISR);
1264
1265 /*
1266 * Was it actually our hardware that caused the interrupt?
1267 */
1268 if (isr == 0xff || isr == 0x00)
1269 return IRQ_NONE;
1270
1271 host->isr |= isr;
1272
1273 /*
1274 * Schedule tasklets as needed.
1275 */
1276 if (isr & WBSD_INT_CARD)
1277 tasklet_schedule(&host->card_tasklet);
1278 if (isr & WBSD_INT_FIFO_THRE)
1279 tasklet_schedule(&host->fifo_tasklet);
1280 if (isr & WBSD_INT_CRC)
1281 tasklet_hi_schedule(&host->crc_tasklet);
1282 if (isr & WBSD_INT_TIMEOUT)
1283 tasklet_hi_schedule(&host->timeout_tasklet);
1284 if (isr & WBSD_INT_BUSYEND)
1285 tasklet_hi_schedule(&host->block_tasklet);
1286 if (isr & WBSD_INT_TC)
1287 tasklet_schedule(&host->finish_tasklet);
1288
1289 return IRQ_HANDLED;
1290 }
1291
1292 /*****************************************************************************\
1293 * *
1294 * Device initialisation and shutdown *
1295 * *
1296 \*****************************************************************************/
1297
1298 /*
1299 * Allocate/free MMC structure.
1300 */
1301
1302 static int __devinit wbsd_alloc_mmc(struct device *dev)
1303 {
1304 struct mmc_host *mmc;
1305 struct wbsd_host *host;
1306
1307 /*
1308 * Allocate MMC structure.
1309 */
1310 mmc = mmc_alloc_host(sizeof(struct wbsd_host), dev);
1311 if (!mmc)
1312 return -ENOMEM;
1313
1314 host = mmc_priv(mmc);
1315 host->mmc = mmc;
1316
1317 host->dma = -1;
1318
1319 /*
1320 * Set host parameters.
1321 */
1322 mmc->ops = &wbsd_ops;
1323 mmc->f_min = 375000;
1324 mmc->f_max = 24000000;
1325 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1326 mmc->caps = MMC_CAP_4_BIT_DATA;
1327
1328 spin_lock_init(&host->lock);
1329
1330 /*
1331 * Set up timers
1332 */
1333 init_timer(&host->ignore_timer);
1334 host->ignore_timer.data = (unsigned long)host;
1335 host->ignore_timer.function = wbsd_reset_ignore;
1336
1337 /*
1338 * Maximum number of segments. Worst case is one sector per segment
1339 * so this will be 64kB/512.
1340 */
1341 mmc->max_hw_segs = 128;
1342 mmc->max_phys_segs = 128;
1343
1344 /*
1345 * Maximum number of sectors in one transfer. Also limited by 64kB
1346 * buffer.
1347 */
1348 mmc->max_sectors = 128;
1349
1350 /*
1351 * Maximum segment size. Could be one segment with the maximum number
1352 * of segments.
1353 */
1354 mmc->max_seg_size = mmc->max_sectors * 512;
1355
1356 dev_set_drvdata(dev, mmc);
1357
1358 return 0;
1359 }
1360
1361 static void __devexit wbsd_free_mmc(struct device *dev)
1362 {
1363 struct mmc_host *mmc;
1364 struct wbsd_host *host;
1365
1366 mmc = dev_get_drvdata(dev);
1367 if (!mmc)
1368 return;
1369
1370 host = mmc_priv(mmc);
1371 BUG_ON(host == NULL);
1372
1373 del_timer_sync(&host->ignore_timer);
1374
1375 mmc_free_host(mmc);
1376
1377 dev_set_drvdata(dev, NULL);
1378 }
1379
1380 /*
1381 * Scan for known chip id:s
1382 */
1383
1384 static int __devinit wbsd_scan(struct wbsd_host *host)
1385 {
1386 int i, j, k;
1387 int id;
1388
1389 /*
1390 * Iterate through all ports, all codes to
1391 * find hardware that is in our known list.
1392 */
1393 for (i = 0; i < ARRAY_SIZE(config_ports); i++) {
1394 if (!request_region(config_ports[i], 2, DRIVER_NAME))
1395 continue;
1396
1397 for (j = 0; j < ARRAY_SIZE(unlock_codes); j++) {
1398 id = 0xFFFF;
1399
1400 host->config = config_ports[i];
1401 host->unlock_code = unlock_codes[j];
1402
1403 wbsd_unlock_config(host);
1404
1405 outb(WBSD_CONF_ID_HI, config_ports[i]);
1406 id = inb(config_ports[i] + 1) << 8;
1407
1408 outb(WBSD_CONF_ID_LO, config_ports[i]);
1409 id |= inb(config_ports[i] + 1);
1410
1411 wbsd_lock_config(host);
1412
1413 for (k = 0; k < ARRAY_SIZE(valid_ids); k++) {
1414 if (id == valid_ids[k]) {
1415 host->chip_id = id;
1416
1417 return 0;
1418 }
1419 }
1420
1421 if (id != 0xFFFF) {
1422 DBG("Unknown hardware (id %x) found at %x\n",
1423 id, config_ports[i]);
1424 }
1425 }
1426
1427 release_region(config_ports[i], 2);
1428 }
1429
1430 host->config = 0;
1431 host->unlock_code = 0;
1432
1433 return -ENODEV;
1434 }
1435
1436 /*
1437 * Allocate/free io port ranges
1438 */
1439
1440 static int __devinit wbsd_request_region(struct wbsd_host *host, int base)
1441 {
1442 if (io & 0x7)
1443 return -EINVAL;
1444
1445 if (!request_region(base, 8, DRIVER_NAME))
1446 return -EIO;
1447
1448 host->base = io;
1449
1450 return 0;
1451 }
1452
1453 static void __devexit wbsd_release_regions(struct wbsd_host *host)
1454 {
1455 if (host->base)
1456 release_region(host->base, 8);
1457
1458 host->base = 0;
1459
1460 if (host->config)
1461 release_region(host->config, 2);
1462
1463 host->config = 0;
1464 }
1465
1466 /*
1467 * Allocate/free DMA port and buffer
1468 */
1469
1470 static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma)
1471 {
1472 if (dma < 0)
1473 return;
1474
1475 if (request_dma(dma, DRIVER_NAME))
1476 goto err;
1477
1478 /*
1479 * We need to allocate a special buffer in
1480 * order for ISA to be able to DMA to it.
1481 */
1482 host->dma_buffer = kmalloc(WBSD_DMA_SIZE,
1483 GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN);
1484 if (!host->dma_buffer)
1485 goto free;
1486
1487 /*
1488 * Translate the address to a physical address.
1489 */
1490 host->dma_addr = dma_map_single(host->mmc->dev, host->dma_buffer,
1491 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1492
1493 /*
1494 * ISA DMA must be aligned on a 64k basis.
1495 */
1496 if ((host->dma_addr & 0xffff) != 0)
1497 goto kfree;
1498 /*
1499 * ISA cannot access memory above 16 MB.
1500 */
1501 else if (host->dma_addr >= 0x1000000)
1502 goto kfree;
1503
1504 host->dma = dma;
1505
1506 return;
1507
1508 kfree:
1509 /*
1510 * If we've gotten here then there is some kind of alignment bug
1511 */
1512 BUG_ON(1);
1513
1514 dma_unmap_single(host->mmc->dev, host->dma_addr,
1515 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1516 host->dma_addr = (dma_addr_t)NULL;
1517
1518 kfree(host->dma_buffer);
1519 host->dma_buffer = NULL;
1520
1521 free:
1522 free_dma(dma);
1523
1524 err:
1525 printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. "
1526 "Falling back on FIFO.\n", dma);
1527 }
1528
1529 static void __devexit wbsd_release_dma(struct wbsd_host *host)
1530 {
1531 if (host->dma_addr) {
1532 dma_unmap_single(host->mmc->dev, host->dma_addr,
1533 WBSD_DMA_SIZE, DMA_BIDIRECTIONAL);
1534 }
1535 kfree(host->dma_buffer);
1536 if (host->dma >= 0)
1537 free_dma(host->dma);
1538
1539 host->dma = -1;
1540 host->dma_buffer = NULL;
1541 host->dma_addr = (dma_addr_t)NULL;
1542 }
1543
1544 /*
1545 * Allocate/free IRQ.
1546 */
1547
1548 static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq)
1549 {
1550 int ret;
1551
1552 /*
1553 * Allocate interrupt.
1554 */
1555
1556 ret = request_irq(irq, wbsd_irq, IRQF_SHARED, DRIVER_NAME, host);
1557 if (ret)
1558 return ret;
1559
1560 host->irq = irq;
1561
1562 /*
1563 * Set up tasklets.
1564 */
1565 tasklet_init(&host->card_tasklet, wbsd_tasklet_card,
1566 (unsigned long)host);
1567 tasklet_init(&host->fifo_tasklet, wbsd_tasklet_fifo,
1568 (unsigned long)host);
1569 tasklet_init(&host->crc_tasklet, wbsd_tasklet_crc,
1570 (unsigned long)host);
1571 tasklet_init(&host->timeout_tasklet, wbsd_tasklet_timeout,
1572 (unsigned long)host);
1573 tasklet_init(&host->finish_tasklet, wbsd_tasklet_finish,
1574 (unsigned long)host);
1575 tasklet_init(&host->block_tasklet, wbsd_tasklet_block,
1576 (unsigned long)host);
1577
1578 return 0;
1579 }
1580
1581 static void __devexit wbsd_release_irq(struct wbsd_host *host)
1582 {
1583 if (!host->irq)
1584 return;
1585
1586 free_irq(host->irq, host);
1587
1588 host->irq = 0;
1589
1590 tasklet_kill(&host->card_tasklet);
1591 tasklet_kill(&host->fifo_tasklet);
1592 tasklet_kill(&host->crc_tasklet);
1593 tasklet_kill(&host->timeout_tasklet);
1594 tasklet_kill(&host->finish_tasklet);
1595 tasklet_kill(&host->block_tasklet);
1596 }
1597
1598 /*
1599 * Allocate all resources for the host.
1600 */
1601
1602 static int __devinit wbsd_request_resources(struct wbsd_host *host,
1603 int base, int irq, int dma)
1604 {
1605 int ret;
1606
1607 /*
1608 * Allocate I/O ports.
1609 */
1610 ret = wbsd_request_region(host, base);
1611 if (ret)
1612 return ret;
1613
1614 /*
1615 * Allocate interrupt.
1616 */
1617 ret = wbsd_request_irq(host, irq);
1618 if (ret)
1619 return ret;
1620
1621 /*
1622 * Allocate DMA.
1623 */
1624 wbsd_request_dma(host, dma);
1625
1626 return 0;
1627 }
1628
1629 /*
1630 * Release all resources for the host.
1631 */
1632
1633 static void __devexit wbsd_release_resources(struct wbsd_host *host)
1634 {
1635 wbsd_release_dma(host);
1636 wbsd_release_irq(host);
1637 wbsd_release_regions(host);
1638 }
1639
1640 /*
1641 * Configure the resources the chip should use.
1642 */
1643
1644 static void wbsd_chip_config(struct wbsd_host *host)
1645 {
1646 wbsd_unlock_config(host);
1647
1648 /*
1649 * Reset the chip.
1650 */
1651 wbsd_write_config(host, WBSD_CONF_SWRST, 1);
1652 wbsd_write_config(host, WBSD_CONF_SWRST, 0);
1653
1654 /*
1655 * Select SD/MMC function.
1656 */
1657 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1658
1659 /*
1660 * Set up card detection.
1661 */
1662 wbsd_write_config(host, WBSD_CONF_PINS, WBSD_PINS_DETECT_GP11);
1663
1664 /*
1665 * Configure chip
1666 */
1667 wbsd_write_config(host, WBSD_CONF_PORT_HI, host->base >> 8);
1668 wbsd_write_config(host, WBSD_CONF_PORT_LO, host->base & 0xff);
1669
1670 wbsd_write_config(host, WBSD_CONF_IRQ, host->irq);
1671
1672 if (host->dma >= 0)
1673 wbsd_write_config(host, WBSD_CONF_DRQ, host->dma);
1674
1675 /*
1676 * Enable and power up chip.
1677 */
1678 wbsd_write_config(host, WBSD_CONF_ENABLE, 1);
1679 wbsd_write_config(host, WBSD_CONF_POWER, 0x20);
1680
1681 wbsd_lock_config(host);
1682 }
1683
1684 /*
1685 * Check that configured resources are correct.
1686 */
1687
1688 static int wbsd_chip_validate(struct wbsd_host *host)
1689 {
1690 int base, irq, dma;
1691
1692 wbsd_unlock_config(host);
1693
1694 /*
1695 * Select SD/MMC function.
1696 */
1697 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1698
1699 /*
1700 * Read configuration.
1701 */
1702 base = wbsd_read_config(host, WBSD_CONF_PORT_HI) << 8;
1703 base |= wbsd_read_config(host, WBSD_CONF_PORT_LO);
1704
1705 irq = wbsd_read_config(host, WBSD_CONF_IRQ);
1706
1707 dma = wbsd_read_config(host, WBSD_CONF_DRQ);
1708
1709 wbsd_lock_config(host);
1710
1711 /*
1712 * Validate against given configuration.
1713 */
1714 if (base != host->base)
1715 return 0;
1716 if (irq != host->irq)
1717 return 0;
1718 if ((dma != host->dma) && (host->dma != -1))
1719 return 0;
1720
1721 return 1;
1722 }
1723
1724 /*
1725 * Powers down the SD function
1726 */
1727
1728 static void wbsd_chip_poweroff(struct wbsd_host *host)
1729 {
1730 wbsd_unlock_config(host);
1731
1732 wbsd_write_config(host, WBSD_CONF_DEVICE, DEVICE_SD);
1733 wbsd_write_config(host, WBSD_CONF_ENABLE, 0);
1734
1735 wbsd_lock_config(host);
1736 }
1737
1738 /*****************************************************************************\
1739 * *
1740 * Devices setup and shutdown *
1741 * *
1742 \*****************************************************************************/
1743
1744 static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,
1745 int pnp)
1746 {
1747 struct wbsd_host *host = NULL;
1748 struct mmc_host *mmc = NULL;
1749 int ret;
1750
1751 ret = wbsd_alloc_mmc(dev);
1752 if (ret)
1753 return ret;
1754
1755 mmc = dev_get_drvdata(dev);
1756 host = mmc_priv(mmc);
1757
1758 /*
1759 * Scan for hardware.
1760 */
1761 ret = wbsd_scan(host);
1762 if (ret) {
1763 if (pnp && (ret == -ENODEV)) {
1764 printk(KERN_WARNING DRIVER_NAME
1765 ": Unable to confirm device presence. You may "
1766 "experience lock-ups.\n");
1767 } else {
1768 wbsd_free_mmc(dev);
1769 return ret;
1770 }
1771 }
1772
1773 /*
1774 * Request resources.
1775 */
1776 ret = wbsd_request_resources(host, io, irq, dma);
1777 if (ret) {
1778 wbsd_release_resources(host);
1779 wbsd_free_mmc(dev);
1780 return ret;
1781 }
1782
1783 /*
1784 * See if chip needs to be configured.
1785 */
1786 if (pnp) {
1787 if ((host->config != 0) && !wbsd_chip_validate(host)) {
1788 printk(KERN_WARNING DRIVER_NAME
1789 ": PnP active but chip not configured! "
1790 "You probably have a buggy BIOS. "
1791 "Configuring chip manually.\n");
1792 wbsd_chip_config(host);
1793 }
1794 } else
1795 wbsd_chip_config(host);
1796
1797 /*
1798 * Power Management stuff. No idea how this works.
1799 * Not tested.
1800 */
1801 #ifdef CONFIG_PM
1802 if (host->config) {
1803 wbsd_unlock_config(host);
1804 wbsd_write_config(host, WBSD_CONF_PME, 0xA0);
1805 wbsd_lock_config(host);
1806 }
1807 #endif
1808 /*
1809 * Allow device to initialise itself properly.
1810 */
1811 mdelay(5);
1812
1813 /*
1814 * Reset the chip into a known state.
1815 */
1816 wbsd_init_device(host);
1817
1818 mmc_add_host(mmc);
1819
1820 printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc));
1821 if (host->chip_id != 0)
1822 printk(" id %x", (int)host->chip_id);
1823 printk(" at 0x%x irq %d", (int)host->base, (int)host->irq);
1824 if (host->dma >= 0)
1825 printk(" dma %d", (int)host->dma);
1826 else
1827 printk(" FIFO");
1828 if (pnp)
1829 printk(" PnP");
1830 printk("\n");
1831
1832 return 0;
1833 }
1834
1835 static void __devexit wbsd_shutdown(struct device *dev, int pnp)
1836 {
1837 struct mmc_host *mmc = dev_get_drvdata(dev);
1838 struct wbsd_host *host;
1839
1840 if (!mmc)
1841 return;
1842
1843 host = mmc_priv(mmc);
1844
1845 mmc_remove_host(mmc);
1846
1847 /*
1848 * Power down the SD/MMC function.
1849 */
1850 if (!pnp)
1851 wbsd_chip_poweroff(host);
1852
1853 wbsd_release_resources(host);
1854
1855 wbsd_free_mmc(dev);
1856 }
1857
1858 /*
1859 * Non-PnP
1860 */
1861
1862 static int __devinit wbsd_probe(struct platform_device *dev)
1863 {
1864 return wbsd_init(&dev->dev, io, irq, dma, 0);
1865 }
1866
1867 static int __devexit wbsd_remove(struct platform_device *dev)
1868 {
1869 wbsd_shutdown(&dev->dev, 0);
1870
1871 return 0;
1872 }
1873
1874 /*
1875 * PnP
1876 */
1877
1878 #ifdef CONFIG_PNP
1879
1880 static int __devinit
1881 wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)
1882 {
1883 int io, irq, dma;
1884
1885 /*
1886 * Get resources from PnP layer.
1887 */
1888 io = pnp_port_start(pnpdev, 0);
1889 irq = pnp_irq(pnpdev, 0);
1890 if (pnp_dma_valid(pnpdev, 0))
1891 dma = pnp_dma(pnpdev, 0);
1892 else
1893 dma = -1;
1894
1895 DBGF("PnP resources: port %3x irq %d dma %d\n", io, irq, dma);
1896
1897 return wbsd_init(&pnpdev->dev, io, irq, dma, 1);
1898 }
1899
1900 static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)
1901 {
1902 wbsd_shutdown(&dev->dev, 1);
1903 }
1904
1905 #endif /* CONFIG_PNP */
1906
1907 /*
1908 * Power management
1909 */
1910
1911 #ifdef CONFIG_PM
1912
1913 static int wbsd_suspend(struct wbsd_host *host, pm_message_t state)
1914 {
1915 BUG_ON(host == NULL);
1916
1917 return mmc_suspend_host(host->mmc, state);
1918 }
1919
1920 static int wbsd_resume(struct wbsd_host *host)
1921 {
1922 BUG_ON(host == NULL);
1923
1924 wbsd_init_device(host);
1925
1926 return mmc_resume_host(host->mmc);
1927 }
1928
1929 static int wbsd_platform_suspend(struct platform_device *dev,
1930 pm_message_t state)
1931 {
1932 struct mmc_host *mmc = platform_get_drvdata(dev);
1933 struct wbsd_host *host;
1934 int ret;
1935
1936 if (mmc == NULL)
1937 return 0;
1938
1939 DBGF("Suspending...\n");
1940
1941 host = mmc_priv(mmc);
1942
1943 ret = wbsd_suspend(host, state);
1944 if (ret)
1945 return ret;
1946
1947 wbsd_chip_poweroff(host);
1948
1949 return 0;
1950 }
1951
1952 static int wbsd_platform_resume(struct platform_device *dev)
1953 {
1954 struct mmc_host *mmc = platform_get_drvdata(dev);
1955 struct wbsd_host *host;
1956
1957 if (mmc == NULL)
1958 return 0;
1959
1960 DBGF("Resuming...\n");
1961
1962 host = mmc_priv(mmc);
1963
1964 wbsd_chip_config(host);
1965
1966 /*
1967 * Allow device to initialise itself properly.
1968 */
1969 mdelay(5);
1970
1971 return wbsd_resume(host);
1972 }
1973
1974 #ifdef CONFIG_PNP
1975
1976 static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
1977 {
1978 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1979 struct wbsd_host *host;
1980
1981 if (mmc == NULL)
1982 return 0;
1983
1984 DBGF("Suspending...\n");
1985
1986 host = mmc_priv(mmc);
1987
1988 return wbsd_suspend(host, state);
1989 }
1990
1991 static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)
1992 {
1993 struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev);
1994 struct wbsd_host *host;
1995
1996 if (mmc == NULL)
1997 return 0;
1998
1999 DBGF("Resuming...\n");
2000
2001 host = mmc_priv(mmc);
2002
2003 /*
2004 * See if chip needs to be configured.
2005 */
2006 if (host->config != 0) {
2007 if (!wbsd_chip_validate(host)) {
2008 printk(KERN_WARNING DRIVER_NAME
2009 ": PnP active but chip not configured! "
2010 "You probably have a buggy BIOS. "
2011 "Configuring chip manually.\n");
2012 wbsd_chip_config(host);
2013 }
2014 }
2015
2016 /*
2017 * Allow device to initialise itself properly.
2018 */
2019 mdelay(5);
2020
2021 return wbsd_resume(host);
2022 }
2023
2024 #endif /* CONFIG_PNP */
2025
2026 #else /* CONFIG_PM */
2027
2028 #define wbsd_platform_suspend NULL
2029 #define wbsd_platform_resume NULL
2030
2031 #define wbsd_pnp_suspend NULL
2032 #define wbsd_pnp_resume NULL
2033
2034 #endif /* CONFIG_PM */
2035
2036 static struct platform_device *wbsd_device;
2037
2038 static struct platform_driver wbsd_driver = {
2039 .probe = wbsd_probe,
2040 .remove = __devexit_p(wbsd_remove),
2041
2042 .suspend = wbsd_platform_suspend,
2043 .resume = wbsd_platform_resume,
2044 .driver = {
2045 .name = DRIVER_NAME,
2046 },
2047 };
2048
2049 #ifdef CONFIG_PNP
2050
2051 static struct pnp_driver wbsd_pnp_driver = {
2052 .name = DRIVER_NAME,
2053 .id_table = pnp_dev_table,
2054 .probe = wbsd_pnp_probe,
2055 .remove = __devexit_p(wbsd_pnp_remove),
2056
2057 .suspend = wbsd_pnp_suspend,
2058 .resume = wbsd_pnp_resume,
2059 };
2060
2061 #endif /* CONFIG_PNP */
2062
2063 /*
2064 * Module loading/unloading
2065 */
2066
2067 static int __init wbsd_drv_init(void)
2068 {
2069 int result;
2070
2071 printk(KERN_INFO DRIVER_NAME
2072 ": Winbond W83L51xD SD/MMC card interface driver, "
2073 DRIVER_VERSION "\n");
2074 printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
2075
2076 #ifdef CONFIG_PNP
2077
2078 if (!nopnp) {
2079 result = pnp_register_driver(&wbsd_pnp_driver);
2080 if (result < 0)
2081 return result;
2082 }
2083 #endif /* CONFIG_PNP */
2084
2085 if (nopnp) {
2086 result = platform_driver_register(&wbsd_driver);
2087 if (result < 0)
2088 return result;
2089
2090 wbsd_device = platform_device_alloc(DRIVER_NAME, -1);
2091 if (!wbsd_device) {
2092 platform_driver_unregister(&wbsd_driver);
2093 return -ENOMEM;
2094 }
2095
2096 result = platform_device_add(wbsd_device);
2097 if (result) {
2098 platform_device_put(wbsd_device);
2099 platform_driver_unregister(&wbsd_driver);
2100 return result;
2101 }
2102 }
2103
2104 return 0;
2105 }
2106
2107 static void __exit wbsd_drv_exit(void)
2108 {
2109 #ifdef CONFIG_PNP
2110
2111 if (!nopnp)
2112 pnp_unregister_driver(&wbsd_pnp_driver);
2113
2114 #endif /* CONFIG_PNP */
2115
2116 if (nopnp) {
2117 platform_device_unregister(wbsd_device);
2118
2119 platform_driver_unregister(&wbsd_driver);
2120 }
2121
2122 DBG("unloaded\n");
2123 }
2124
2125 module_init(wbsd_drv_init);
2126 module_exit(wbsd_drv_exit);
2127 #ifdef CONFIG_PNP
2128 module_param(nopnp, uint, 0444);
2129 #endif
2130 module_param(io, uint, 0444);
2131 module_param(irq, uint, 0444);
2132 module_param(dma, int, 0444);
2133
2134 MODULE_LICENSE("GPL");
2135 MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>");
2136 MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver");
2137 MODULE_VERSION(DRIVER_VERSION);
2138
2139 #ifdef CONFIG_PNP
2140 MODULE_PARM_DESC(nopnp, "Scan for device instead of relying on PNP. (default 0)");
2141 #endif
2142 MODULE_PARM_DESC(io, "I/O base to allocate. Must be 8 byte aligned. (default 0x248)");
2143 MODULE_PARM_DESC(irq, "IRQ to allocate. (default 6)");
2144 MODULE_PARM_DESC(dma, "DMA channel to allocate. -1 for no DMA. (default 2)");