Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_sx4.c
1 /*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33 /*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/init.h>
86 #include <linux/blkdev.h>
87 #include <linux/delay.h>
88 #include <linux/interrupt.h>
89 #include <linux/device.h>
90 #include <scsi/scsi_host.h>
91 #include <scsi/scsi_cmnd.h>
92 #include <linux/libata.h>
93 #include "sata_promise.h"
94
95 #define DRV_NAME "sata_sx4"
96 #define DRV_VERSION "0.12"
97
98
99 enum {
100 PDC_MMIO_BAR = 3,
101 PDC_DIMM_BAR = 4,
102
103 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104
105 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
106 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
107 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
108 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109
110 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111
112 PDC_20621_SEQCTL = 0x400,
113 PDC_20621_SEQMASK = 0x480,
114 PDC_20621_GENERAL_CTL = 0x484,
115 PDC_20621_PAGE_SIZE = (32 * 1024),
116
117 /* chosen, not constant, values; we design our own DIMM mem map */
118 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
119 PDC_20621_DIMM_BASE = 0x00200000,
120 PDC_20621_DIMM_DATA = (64 * 1024),
121 PDC_DIMM_DATA_STEP = (256 * 1024),
122 PDC_DIMM_WINDOW_STEP = (8 * 1024),
123 PDC_DIMM_HOST_PRD = (6 * 1024),
124 PDC_DIMM_HOST_PKT = (128 * 0),
125 PDC_DIMM_HPKT_PRD = (128 * 1),
126 PDC_DIMM_ATA_PKT = (128 * 2),
127 PDC_DIMM_APKT_PRD = (128 * 3),
128 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
129 PDC_PAGE_WINDOW = 0x40,
130 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
131 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
132 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133
134 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135
136 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
137 (1<<23),
138
139 board_20621 = 0, /* FastTrak S150 SX4 */
140
141 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
142 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
143 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144
145 PDC_MAX_HDMA = 32,
146 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147
148 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
149 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
150 PDC_I2C_CONTROL = 0x48,
151 PDC_I2C_ADDR_DATA = 0x4C,
152 PDC_DIMM0_CONTROL = 0x80,
153 PDC_DIMM1_CONTROL = 0x84,
154 PDC_SDRAM_CONTROL = 0x88,
155 PDC_I2C_WRITE = 0, /* master -> slave */
156 PDC_I2C_READ = (1 << 6), /* master <- slave */
157 PDC_I2C_START = (1 << 7), /* start I2C proto */
158 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
159 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
160 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
161 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
162 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
163 PDC_DIMM_SPD_ROW_NUM = 3,
164 PDC_DIMM_SPD_COLUMN_NUM = 4,
165 PDC_DIMM_SPD_MODULE_ROW = 5,
166 PDC_DIMM_SPD_TYPE = 11,
167 PDC_DIMM_SPD_FRESH_RATE = 12,
168 PDC_DIMM_SPD_BANK_NUM = 17,
169 PDC_DIMM_SPD_CAS_LATENCY = 18,
170 PDC_DIMM_SPD_ATTRIBUTE = 21,
171 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
172 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
173 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
174 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
175 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
176 PDC_CTL_STATUS = 0x08,
177 PDC_DIMM_WINDOW_CTLR = 0x0C,
178 PDC_TIME_CONTROL = 0x3C,
179 PDC_TIME_PERIOD = 0x40,
180 PDC_TIME_COUNTER = 0x44,
181 PDC_GENERAL_CTLR = 0x484,
182 PCI_PLL_INIT = 0x8A531824,
183 PCI_X_TCOUNT = 0xEE1E5CFF,
184
185 /* PDC_TIME_CONTROL bits */
186 PDC_TIMER_BUZZER = (1 << 10),
187 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
188 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
189 PDC_TIMER_ENABLE = (1 << 7),
190 PDC_TIMER_MASK_INT = (1 << 5),
191 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
192 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
193 PDC_TIMER_ENABLE |
194 PDC_TIMER_MASK_INT,
195 };
196
197 #define ECC_ERASE_BUF_SZ (128 * 1024)
198
199 struct pdc_port_priv {
200 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
201 u8 *pkt;
202 dma_addr_t pkt_dma;
203 };
204
205 struct pdc_host_priv {
206 unsigned int doing_hdma;
207 unsigned int hdma_prod;
208 unsigned int hdma_cons;
209 struct {
210 struct ata_queued_cmd *qc;
211 unsigned int seq;
212 unsigned long pkt_ofs;
213 } hdma[32];
214 };
215
216
217 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
218 static void pdc_error_handler(struct ata_port *ap);
219 static void pdc_freeze(struct ata_port *ap);
220 static void pdc_thaw(struct ata_port *ap);
221 static int pdc_port_start(struct ata_port *ap);
222 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
223 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
225 static unsigned int pdc20621_dimm_init(struct ata_host *host);
226 static int pdc20621_detect_dimm(struct ata_host *host);
227 static unsigned int pdc20621_i2c_read(struct ata_host *host,
228 u32 device, u32 subaddr, u32 *pdata);
229 static int pdc20621_prog_dimm0(struct ata_host *host);
230 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
231 #ifdef ATA_VERBOSE_DEBUG
232 static void pdc20621_get_from_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234 #endif
235 static void pdc20621_put_to_dimm(struct ata_host *host,
236 void *psource, u32 offset, u32 size);
237 static void pdc20621_irq_clear(struct ata_port *ap);
238 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
239 static int pdc_softreset(struct ata_link *link, unsigned int *class,
240 unsigned long deadline);
241 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
242 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
243
244
245 static struct scsi_host_template pdc_sata_sht = {
246 ATA_BASE_SHT(DRV_NAME),
247 .sg_tablesize = LIBATA_MAX_PRD,
248 .dma_boundary = ATA_DMA_BOUNDARY,
249 };
250
251 /* TODO: inherit from base port_ops after converting to new EH */
252 static struct ata_port_operations pdc_20621_ops = {
253 .inherits = &ata_sff_port_ops,
254
255 .check_atapi_dma = pdc_check_atapi_dma,
256 .qc_prep = pdc20621_qc_prep,
257 .qc_issue = pdc20621_qc_issue,
258
259 .freeze = pdc_freeze,
260 .thaw = pdc_thaw,
261 .softreset = pdc_softreset,
262 .error_handler = pdc_error_handler,
263 .lost_interrupt = ATA_OP_NULL,
264 .post_internal_cmd = pdc_post_internal_cmd,
265
266 .port_start = pdc_port_start,
267
268 .sff_tf_load = pdc_tf_load_mmio,
269 .sff_exec_command = pdc_exec_command_mmio,
270 .sff_irq_clear = pdc20621_irq_clear,
271 };
272
273 static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */
275 {
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
277 ATA_FLAG_PIO_POLLING,
278 .pio_mask = ATA_PIO4,
279 .mwdma_mask = ATA_MWDMA2,
280 .udma_mask = ATA_UDMA6,
281 .port_ops = &pdc_20621_ops,
282 },
283
284 };
285
286 static const struct pci_device_id pdc_sata_pci_tbl[] = {
287 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
288
289 { } /* terminate list */
290 };
291
292 static struct pci_driver pdc_sata_pci_driver = {
293 .name = DRV_NAME,
294 .id_table = pdc_sata_pci_tbl,
295 .probe = pdc_sata_init_one,
296 .remove = ata_pci_remove_one,
297 };
298
299
300 static int pdc_port_start(struct ata_port *ap)
301 {
302 struct device *dev = ap->host->dev;
303 struct pdc_port_priv *pp;
304
305 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
306 if (!pp)
307 return -ENOMEM;
308
309 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
310 if (!pp->pkt)
311 return -ENOMEM;
312
313 ap->private_data = pp;
314
315 return 0;
316 }
317
318 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
319 unsigned int total_len)
320 {
321 u32 addr;
322 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
323 __le32 *buf32 = (__le32 *) buf;
324
325 /* output ATA packet S/G table */
326 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
327 (PDC_DIMM_DATA_STEP * portno);
328 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
329 buf32[dw] = cpu_to_le32(addr);
330 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
331
332 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
333 PDC_20621_DIMM_BASE +
334 (PDC_DIMM_WINDOW_STEP * portno) +
335 PDC_DIMM_APKT_PRD,
336 buf32[dw], buf32[dw + 1]);
337 }
338
339 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
340 unsigned int total_len)
341 {
342 u32 addr;
343 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
344 __le32 *buf32 = (__le32 *) buf;
345
346 /* output Host DMA packet S/G table */
347 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
348 (PDC_DIMM_DATA_STEP * portno);
349
350 buf32[dw] = cpu_to_le32(addr);
351 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
352
353 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
354 PDC_20621_DIMM_BASE +
355 (PDC_DIMM_WINDOW_STEP * portno) +
356 PDC_DIMM_HPKT_PRD,
357 buf32[dw], buf32[dw + 1]);
358 }
359
360 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
361 unsigned int devno, u8 *buf,
362 unsigned int portno)
363 {
364 unsigned int i, dw;
365 __le32 *buf32 = (__le32 *) buf;
366 u8 dev_reg;
367
368 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
369 (PDC_DIMM_WINDOW_STEP * portno) +
370 PDC_DIMM_APKT_PRD;
371 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
372
373 i = PDC_DIMM_ATA_PKT;
374
375 /*
376 * Set up ATA packet
377 */
378 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
379 buf[i++] = PDC_PKT_READ;
380 else if (tf->protocol == ATA_PROT_NODATA)
381 buf[i++] = PDC_PKT_NODATA;
382 else
383 buf[i++] = 0;
384 buf[i++] = 0; /* reserved */
385 buf[i++] = portno + 1; /* seq. id */
386 buf[i++] = 0xff; /* delay seq. id */
387
388 /* dimm dma S/G, and next-pkt */
389 dw = i >> 2;
390 if (tf->protocol == ATA_PROT_NODATA)
391 buf32[dw] = 0;
392 else
393 buf32[dw] = cpu_to_le32(dimm_sg);
394 buf32[dw + 1] = 0;
395 i += 8;
396
397 if (devno == 0)
398 dev_reg = ATA_DEVICE_OBS;
399 else
400 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
401
402 /* select device */
403 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
404 buf[i++] = dev_reg;
405
406 /* device control register */
407 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
408 buf[i++] = tf->ctl;
409
410 return i;
411 }
412
413 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
414 unsigned int portno)
415 {
416 unsigned int dw;
417 u32 tmp;
418 __le32 *buf32 = (__le32 *) buf;
419
420 unsigned int host_sg = PDC_20621_DIMM_BASE +
421 (PDC_DIMM_WINDOW_STEP * portno) +
422 PDC_DIMM_HOST_PRD;
423 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
424 (PDC_DIMM_WINDOW_STEP * portno) +
425 PDC_DIMM_HPKT_PRD;
426 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
427 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
428
429 dw = PDC_DIMM_HOST_PKT >> 2;
430
431 /*
432 * Set up Host DMA packet
433 */
434 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
435 tmp = PDC_PKT_READ;
436 else
437 tmp = 0;
438 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
439 tmp |= (0xff << 24); /* delay seq. id */
440 buf32[dw + 0] = cpu_to_le32(tmp);
441 buf32[dw + 1] = cpu_to_le32(host_sg);
442 buf32[dw + 2] = cpu_to_le32(dimm_sg);
443 buf32[dw + 3] = 0;
444
445 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
446 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
447 PDC_DIMM_HOST_PKT,
448 buf32[dw + 0],
449 buf32[dw + 1],
450 buf32[dw + 2],
451 buf32[dw + 3]);
452 }
453
454 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
455 {
456 struct scatterlist *sg;
457 struct ata_port *ap = qc->ap;
458 struct pdc_port_priv *pp = ap->private_data;
459 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
460 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
461 unsigned int portno = ap->port_no;
462 unsigned int i, si, idx, total_len = 0, sgt_len;
463 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
464
465 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
466
467 VPRINTK("ata%u: ENTER\n", ap->print_id);
468
469 /* hard-code chip #0 */
470 mmio += PDC_CHIP0_OFS;
471
472 /*
473 * Build S/G table
474 */
475 idx = 0;
476 for_each_sg(qc->sg, sg, qc->n_elem, si) {
477 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
478 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
479 total_len += sg_dma_len(sg);
480 }
481 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
482 sgt_len = idx * 4;
483
484 /*
485 * Build ATA, host DMA packets
486 */
487 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
488 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
489
490 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
491 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
492
493 if (qc->tf.flags & ATA_TFLAG_LBA48)
494 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
495 else
496 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
497
498 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
499
500 /* copy three S/G tables and two packets to DIMM MMIO window */
501 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
502 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
503 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
504 PDC_DIMM_HOST_PRD,
505 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
506
507 /* force host FIFO dump */
508 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
509
510 readl(dimm_mmio); /* MMIO PCI posting flush */
511
512 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
513 }
514
515 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
516 {
517 struct ata_port *ap = qc->ap;
518 struct pdc_port_priv *pp = ap->private_data;
519 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
520 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
521 unsigned int portno = ap->port_no;
522 unsigned int i;
523
524 VPRINTK("ata%u: ENTER\n", ap->print_id);
525
526 /* hard-code chip #0 */
527 mmio += PDC_CHIP0_OFS;
528
529 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
530
531 if (qc->tf.flags & ATA_TFLAG_LBA48)
532 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
533 else
534 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
535
536 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
537
538 /* copy three S/G tables and two packets to DIMM MMIO window */
539 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
540 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
541
542 /* force host FIFO dump */
543 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
544
545 readl(dimm_mmio); /* MMIO PCI posting flush */
546
547 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
548 }
549
550 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
551 {
552 switch (qc->tf.protocol) {
553 case ATA_PROT_DMA:
554 pdc20621_dma_prep(qc);
555 break;
556 case ATA_PROT_NODATA:
557 pdc20621_nodata_prep(qc);
558 break;
559 default:
560 break;
561 }
562 }
563
564 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
565 unsigned int seq,
566 u32 pkt_ofs)
567 {
568 struct ata_port *ap = qc->ap;
569 struct ata_host *host = ap->host;
570 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
571
572 /* hard-code chip #0 */
573 mmio += PDC_CHIP0_OFS;
574
575 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
576 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
577
578 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
579 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
580 }
581
582 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
583 unsigned int seq,
584 u32 pkt_ofs)
585 {
586 struct ata_port *ap = qc->ap;
587 struct pdc_host_priv *pp = ap->host->private_data;
588 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
589
590 if (!pp->doing_hdma) {
591 __pdc20621_push_hdma(qc, seq, pkt_ofs);
592 pp->doing_hdma = 1;
593 return;
594 }
595
596 pp->hdma[idx].qc = qc;
597 pp->hdma[idx].seq = seq;
598 pp->hdma[idx].pkt_ofs = pkt_ofs;
599 pp->hdma_prod++;
600 }
601
602 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
603 {
604 struct ata_port *ap = qc->ap;
605 struct pdc_host_priv *pp = ap->host->private_data;
606 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
607
608 /* if nothing on queue, we're done */
609 if (pp->hdma_prod == pp->hdma_cons) {
610 pp->doing_hdma = 0;
611 return;
612 }
613
614 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
615 pp->hdma[idx].pkt_ofs);
616 pp->hdma_cons++;
617 }
618
619 #ifdef ATA_VERBOSE_DEBUG
620 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
621 {
622 struct ata_port *ap = qc->ap;
623 unsigned int port_no = ap->port_no;
624 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
625
626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
627 dimm_mmio += PDC_DIMM_HOST_PKT;
628
629 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
630 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
631 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
632 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
633 }
634 #else
635 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
636 #endif /* ATA_VERBOSE_DEBUG */
637
638 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
639 {
640 struct ata_port *ap = qc->ap;
641 struct ata_host *host = ap->host;
642 unsigned int port_no = ap->port_no;
643 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
645 u8 seq = (u8) (port_no + 1);
646 unsigned int port_ofs;
647
648 /* hard-code chip #0 */
649 mmio += PDC_CHIP0_OFS;
650
651 VPRINTK("ata%u: ENTER\n", ap->print_id);
652
653 wmb(); /* flush PRD, pkt writes */
654
655 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
656
657 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
658 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
659 seq += 4;
660
661 pdc20621_dump_hdma(qc);
662 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
663 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
664 port_ofs + PDC_DIMM_HOST_PKT,
665 port_ofs + PDC_DIMM_HOST_PKT,
666 seq);
667 } else {
668 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
669 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
670
671 writel(port_ofs + PDC_DIMM_ATA_PKT,
672 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
674 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
675 port_ofs + PDC_DIMM_ATA_PKT,
676 port_ofs + PDC_DIMM_ATA_PKT,
677 seq);
678 }
679 }
680
681 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
682 {
683 switch (qc->tf.protocol) {
684 case ATA_PROT_NODATA:
685 if (qc->tf.flags & ATA_TFLAG_POLLING)
686 break;
687 /*FALLTHROUGH*/
688 case ATA_PROT_DMA:
689 pdc20621_packet_start(qc);
690 return 0;
691
692 case ATAPI_PROT_DMA:
693 BUG();
694 break;
695
696 default:
697 break;
698 }
699
700 return ata_sff_qc_issue(qc);
701 }
702
703 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
704 struct ata_queued_cmd *qc,
705 unsigned int doing_hdma,
706 void __iomem *mmio)
707 {
708 unsigned int port_no = ap->port_no;
709 unsigned int port_ofs =
710 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
711 u8 status;
712 unsigned int handled = 0;
713
714 VPRINTK("ENTER\n");
715
716 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
717 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
718
719 /* step two - DMA from DIMM to host */
720 if (doing_hdma) {
721 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
722 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
723 /* get drive status; clear intr; complete txn */
724 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
725 ata_qc_complete(qc);
726 pdc20621_pop_hdma(qc);
727 }
728
729 /* step one - exec ATA command */
730 else {
731 u8 seq = (u8) (port_no + 1 + 4);
732 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
733 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
734
735 /* submit hdma pkt */
736 pdc20621_dump_hdma(qc);
737 pdc20621_push_hdma(qc, seq,
738 port_ofs + PDC_DIMM_HOST_PKT);
739 }
740 handled = 1;
741
742 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
743
744 /* step one - DMA from host to DIMM */
745 if (doing_hdma) {
746 u8 seq = (u8) (port_no + 1);
747 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
748 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
749
750 /* submit ata pkt */
751 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
752 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
753 writel(port_ofs + PDC_DIMM_ATA_PKT,
754 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
755 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
756 }
757
758 /* step two - execute ATA command */
759 else {
760 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
761 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
762 /* get drive status; clear intr; complete txn */
763 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
764 ata_qc_complete(qc);
765 pdc20621_pop_hdma(qc);
766 }
767 handled = 1;
768
769 /* command completion, but no data xfer */
770 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
771
772 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
773 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
774 qc->err_mask |= ac_err_mask(status);
775 ata_qc_complete(qc);
776 handled = 1;
777
778 } else {
779 ap->stats.idle_irq++;
780 }
781
782 return handled;
783 }
784
785 static void pdc20621_irq_clear(struct ata_port *ap)
786 {
787 ioread8(ap->ioaddr.status_addr);
788 }
789
790 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
791 {
792 struct ata_host *host = dev_instance;
793 struct ata_port *ap;
794 u32 mask = 0;
795 unsigned int i, tmp, port_no;
796 unsigned int handled = 0;
797 void __iomem *mmio_base;
798
799 VPRINTK("ENTER\n");
800
801 if (!host || !host->iomap[PDC_MMIO_BAR]) {
802 VPRINTK("QUICK EXIT\n");
803 return IRQ_NONE;
804 }
805
806 mmio_base = host->iomap[PDC_MMIO_BAR];
807
808 /* reading should also clear interrupts */
809 mmio_base += PDC_CHIP0_OFS;
810 mask = readl(mmio_base + PDC_20621_SEQMASK);
811 VPRINTK("mask == 0x%x\n", mask);
812
813 if (mask == 0xffffffff) {
814 VPRINTK("QUICK EXIT 2\n");
815 return IRQ_NONE;
816 }
817 mask &= 0xffff; /* only 16 tags possible */
818 if (!mask) {
819 VPRINTK("QUICK EXIT 3\n");
820 return IRQ_NONE;
821 }
822
823 spin_lock(&host->lock);
824
825 for (i = 1; i < 9; i++) {
826 port_no = i - 1;
827 if (port_no > 3)
828 port_no -= 4;
829 if (port_no >= host->n_ports)
830 ap = NULL;
831 else
832 ap = host->ports[port_no];
833 tmp = mask & (1 << i);
834 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
835 if (tmp && ap) {
836 struct ata_queued_cmd *qc;
837
838 qc = ata_qc_from_tag(ap, ap->link.active_tag);
839 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
840 handled += pdc20621_host_intr(ap, qc, (i > 4),
841 mmio_base);
842 }
843 }
844
845 spin_unlock(&host->lock);
846
847 VPRINTK("mask == 0x%x\n", mask);
848
849 VPRINTK("EXIT\n");
850
851 return IRQ_RETVAL(handled);
852 }
853
854 static void pdc_freeze(struct ata_port *ap)
855 {
856 void __iomem *mmio = ap->ioaddr.cmd_addr;
857 u32 tmp;
858
859 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
860
861 tmp = readl(mmio + PDC_CTLSTAT);
862 tmp |= PDC_MASK_INT;
863 tmp &= ~PDC_DMA_ENABLE;
864 writel(tmp, mmio + PDC_CTLSTAT);
865 readl(mmio + PDC_CTLSTAT); /* flush */
866 }
867
868 static void pdc_thaw(struct ata_port *ap)
869 {
870 void __iomem *mmio = ap->ioaddr.cmd_addr;
871 u32 tmp;
872
873 /* FIXME: start HDMA engine, if zero ATA engines running */
874
875 /* clear IRQ */
876 ioread8(ap->ioaddr.status_addr);
877
878 /* turn IRQ back on */
879 tmp = readl(mmio + PDC_CTLSTAT);
880 tmp &= ~PDC_MASK_INT;
881 writel(tmp, mmio + PDC_CTLSTAT);
882 readl(mmio + PDC_CTLSTAT); /* flush */
883 }
884
885 static void pdc_reset_port(struct ata_port *ap)
886 {
887 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
888 unsigned int i;
889 u32 tmp;
890
891 /* FIXME: handle HDMA copy engine */
892
893 for (i = 11; i > 0; i--) {
894 tmp = readl(mmio);
895 if (tmp & PDC_RESET)
896 break;
897
898 udelay(100);
899
900 tmp |= PDC_RESET;
901 writel(tmp, mmio);
902 }
903
904 tmp &= ~PDC_RESET;
905 writel(tmp, mmio);
906 readl(mmio); /* flush */
907 }
908
909 static int pdc_softreset(struct ata_link *link, unsigned int *class,
910 unsigned long deadline)
911 {
912 pdc_reset_port(link->ap);
913 return ata_sff_softreset(link, class, deadline);
914 }
915
916 static void pdc_error_handler(struct ata_port *ap)
917 {
918 if (!(ap->pflags & ATA_PFLAG_FROZEN))
919 pdc_reset_port(ap);
920
921 ata_sff_error_handler(ap);
922 }
923
924 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
925 {
926 struct ata_port *ap = qc->ap;
927
928 /* make DMA engine forget about the failed command */
929 if (qc->flags & ATA_QCFLAG_FAILED)
930 pdc_reset_port(ap);
931 }
932
933 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
934 {
935 u8 *scsicmd = qc->scsicmd->cmnd;
936 int pio = 1; /* atapi dma off by default */
937
938 /* Whitelist commands that may use DMA. */
939 switch (scsicmd[0]) {
940 case WRITE_12:
941 case WRITE_10:
942 case WRITE_6:
943 case READ_12:
944 case READ_10:
945 case READ_6:
946 case 0xad: /* READ_DVD_STRUCTURE */
947 case 0xbe: /* READ_CD */
948 pio = 0;
949 }
950 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
951 if (scsicmd[0] == WRITE_10) {
952 unsigned int lba =
953 (scsicmd[2] << 24) |
954 (scsicmd[3] << 16) |
955 (scsicmd[4] << 8) |
956 scsicmd[5];
957 if (lba >= 0xFFFF4FA2)
958 pio = 1;
959 }
960 return pio;
961 }
962
963 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
964 {
965 WARN_ON(tf->protocol == ATA_PROT_DMA ||
966 tf->protocol == ATAPI_PROT_DMA);
967 ata_sff_tf_load(ap, tf);
968 }
969
970
971 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
972 {
973 WARN_ON(tf->protocol == ATA_PROT_DMA ||
974 tf->protocol == ATAPI_PROT_DMA);
975 ata_sff_exec_command(ap, tf);
976 }
977
978
979 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
980 {
981 port->cmd_addr = base;
982 port->data_addr = base;
983 port->feature_addr =
984 port->error_addr = base + 0x4;
985 port->nsect_addr = base + 0x8;
986 port->lbal_addr = base + 0xc;
987 port->lbam_addr = base + 0x10;
988 port->lbah_addr = base + 0x14;
989 port->device_addr = base + 0x18;
990 port->command_addr =
991 port->status_addr = base + 0x1c;
992 port->altstatus_addr =
993 port->ctl_addr = base + 0x38;
994 }
995
996
997 #ifdef ATA_VERBOSE_DEBUG
998 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
999 u32 offset, u32 size)
1000 {
1001 u32 window_size;
1002 u16 idx;
1003 u8 page_mask;
1004 long dist;
1005 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1006 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1007
1008 /* hard-code chip #0 */
1009 mmio += PDC_CHIP0_OFS;
1010
1011 page_mask = 0x00;
1012 window_size = 0x2000 * 4; /* 32K byte uchar size */
1013 idx = (u16) (offset / window_size);
1014
1015 writel(0x01, mmio + PDC_GENERAL_CTLR);
1016 readl(mmio + PDC_GENERAL_CTLR);
1017 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1018 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1019
1020 offset -= (idx * window_size);
1021 idx++;
1022 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1023 (long) (window_size - offset);
1024 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1025 dist);
1026
1027 psource += dist;
1028 size -= dist;
1029 for (; (long) size >= (long) window_size ;) {
1030 writel(0x01, mmio + PDC_GENERAL_CTLR);
1031 readl(mmio + PDC_GENERAL_CTLR);
1032 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1033 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1034 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1035 window_size / 4);
1036 psource += window_size;
1037 size -= window_size;
1038 idx++;
1039 }
1040
1041 if (size) {
1042 writel(0x01, mmio + PDC_GENERAL_CTLR);
1043 readl(mmio + PDC_GENERAL_CTLR);
1044 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1045 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1046 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1047 size / 4);
1048 }
1049 }
1050 #endif
1051
1052
1053 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1054 u32 offset, u32 size)
1055 {
1056 u32 window_size;
1057 u16 idx;
1058 u8 page_mask;
1059 long dist;
1060 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1061 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1062
1063 /* hard-code chip #0 */
1064 mmio += PDC_CHIP0_OFS;
1065
1066 page_mask = 0x00;
1067 window_size = 0x2000 * 4; /* 32K byte uchar size */
1068 idx = (u16) (offset / window_size);
1069
1070 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1071 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1072 offset -= (idx * window_size);
1073 idx++;
1074 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1075 (long) (window_size - offset);
1076 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1077 writel(0x01, mmio + PDC_GENERAL_CTLR);
1078 readl(mmio + PDC_GENERAL_CTLR);
1079
1080 psource += dist;
1081 size -= dist;
1082 for (; (long) size >= (long) window_size ;) {
1083 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1084 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1085 memcpy_toio(dimm_mmio, psource, window_size / 4);
1086 writel(0x01, mmio + PDC_GENERAL_CTLR);
1087 readl(mmio + PDC_GENERAL_CTLR);
1088 psource += window_size;
1089 size -= window_size;
1090 idx++;
1091 }
1092
1093 if (size) {
1094 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1095 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1096 memcpy_toio(dimm_mmio, psource, size / 4);
1097 writel(0x01, mmio + PDC_GENERAL_CTLR);
1098 readl(mmio + PDC_GENERAL_CTLR);
1099 }
1100 }
1101
1102
1103 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1104 u32 subaddr, u32 *pdata)
1105 {
1106 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1107 u32 i2creg = 0;
1108 u32 status;
1109 u32 count = 0;
1110
1111 /* hard-code chip #0 */
1112 mmio += PDC_CHIP0_OFS;
1113
1114 i2creg |= device << 24;
1115 i2creg |= subaddr << 16;
1116
1117 /* Set the device and subaddress */
1118 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1119 readl(mmio + PDC_I2C_ADDR_DATA);
1120
1121 /* Write Control to perform read operation, mask int */
1122 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1123 mmio + PDC_I2C_CONTROL);
1124
1125 for (count = 0; count <= 1000; count ++) {
1126 status = readl(mmio + PDC_I2C_CONTROL);
1127 if (status & PDC_I2C_COMPLETE) {
1128 status = readl(mmio + PDC_I2C_ADDR_DATA);
1129 break;
1130 } else if (count == 1000)
1131 return 0;
1132 }
1133
1134 *pdata = (status >> 8) & 0x000000ff;
1135 return 1;
1136 }
1137
1138
1139 static int pdc20621_detect_dimm(struct ata_host *host)
1140 {
1141 u32 data = 0;
1142 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1143 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1144 if (data == 100)
1145 return 100;
1146 } else
1147 return 0;
1148
1149 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1150 if (data <= 0x75)
1151 return 133;
1152 } else
1153 return 0;
1154
1155 return 0;
1156 }
1157
1158
1159 static int pdc20621_prog_dimm0(struct ata_host *host)
1160 {
1161 u32 spd0[50];
1162 u32 data = 0;
1163 int size, i;
1164 u8 bdimmsize;
1165 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1166 static const struct {
1167 unsigned int reg;
1168 unsigned int ofs;
1169 } pdc_i2c_read_data [] = {
1170 { PDC_DIMM_SPD_TYPE, 11 },
1171 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1172 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1173 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1174 { PDC_DIMM_SPD_ROW_NUM, 3 },
1175 { PDC_DIMM_SPD_BANK_NUM, 17 },
1176 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1177 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1178 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1179 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1180 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1181 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1182 };
1183
1184 /* hard-code chip #0 */
1185 mmio += PDC_CHIP0_OFS;
1186
1187 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1188 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1189 pdc_i2c_read_data[i].reg,
1190 &spd0[pdc_i2c_read_data[i].ofs]);
1191
1192 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1193 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1194 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1195 data |= (((((spd0[29] > spd0[28])
1196 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1197 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1198
1199 if (spd0[18] & 0x08)
1200 data |= ((0x03) << 14);
1201 else if (spd0[18] & 0x04)
1202 data |= ((0x02) << 14);
1203 else if (spd0[18] & 0x01)
1204 data |= ((0x01) << 14);
1205 else
1206 data |= (0 << 14);
1207
1208 /*
1209 Calculate the size of bDIMMSize (power of 2) and
1210 merge the DIMM size by program start/end address.
1211 */
1212
1213 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1214 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1215 data |= (((size / 16) - 1) << 16);
1216 data |= (0 << 23);
1217 data |= 8;
1218 writel(data, mmio + PDC_DIMM0_CONTROL);
1219 readl(mmio + PDC_DIMM0_CONTROL);
1220 return size;
1221 }
1222
1223
1224 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1225 {
1226 u32 data, spd0;
1227 int error, i;
1228 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1229
1230 /* hard-code chip #0 */
1231 mmio += PDC_CHIP0_OFS;
1232
1233 /*
1234 Set To Default : DIMM Module Global Control Register (0x022259F1)
1235 DIMM Arbitration Disable (bit 20)
1236 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1237 Refresh Enable (bit 17)
1238 */
1239
1240 data = 0x022259F1;
1241 writel(data, mmio + PDC_SDRAM_CONTROL);
1242 readl(mmio + PDC_SDRAM_CONTROL);
1243
1244 /* Turn on for ECC */
1245 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1246 PDC_DIMM_SPD_TYPE, &spd0);
1247 if (spd0 == 0x02) {
1248 data |= (0x01 << 16);
1249 writel(data, mmio + PDC_SDRAM_CONTROL);
1250 readl(mmio + PDC_SDRAM_CONTROL);
1251 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1252 }
1253
1254 /* DIMM Initialization Select/Enable (bit 18/19) */
1255 data &= (~(1<<18));
1256 data |= (1<<19);
1257 writel(data, mmio + PDC_SDRAM_CONTROL);
1258
1259 error = 1;
1260 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1261 data = readl(mmio + PDC_SDRAM_CONTROL);
1262 if (!(data & (1<<19))) {
1263 error = 0;
1264 break;
1265 }
1266 msleep(i*100);
1267 }
1268 return error;
1269 }
1270
1271
1272 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1273 {
1274 int speed, size, length;
1275 u32 addr, spd0, pci_status;
1276 u32 time_period = 0;
1277 u32 tcount = 0;
1278 u32 ticks = 0;
1279 u32 clock = 0;
1280 u32 fparam = 0;
1281 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1282
1283 /* hard-code chip #0 */
1284 mmio += PDC_CHIP0_OFS;
1285
1286 /* Initialize PLL based upon PCI Bus Frequency */
1287
1288 /* Initialize Time Period Register */
1289 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1290 time_period = readl(mmio + PDC_TIME_PERIOD);
1291 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1292
1293 /* Enable timer */
1294 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1295 readl(mmio + PDC_TIME_CONTROL);
1296
1297 /* Wait 3 seconds */
1298 msleep(3000);
1299
1300 /*
1301 When timer is enabled, counter is decreased every internal
1302 clock cycle.
1303 */
1304
1305 tcount = readl(mmio + PDC_TIME_COUNTER);
1306 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1307
1308 /*
1309 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1310 register should be >= (0xffffffff - 3x10^8).
1311 */
1312 if (tcount >= PCI_X_TCOUNT) {
1313 ticks = (time_period - tcount);
1314 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1315
1316 clock = (ticks / 300000);
1317 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1318
1319 clock = (clock * 33);
1320 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1321
1322 /* PLL F Param (bit 22:16) */
1323 fparam = (1400000 / clock) - 2;
1324 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1325
1326 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1327 pci_status = (0x8a001824 | (fparam << 16));
1328 } else
1329 pci_status = PCI_PLL_INIT;
1330
1331 /* Initialize PLL. */
1332 VPRINTK("pci_status: 0x%x\n", pci_status);
1333 writel(pci_status, mmio + PDC_CTL_STATUS);
1334 readl(mmio + PDC_CTL_STATUS);
1335
1336 /*
1337 Read SPD of DIMM by I2C interface,
1338 and program the DIMM Module Controller.
1339 */
1340 if (!(speed = pdc20621_detect_dimm(host))) {
1341 printk(KERN_ERR "Detect Local DIMM Fail\n");
1342 return 1; /* DIMM error */
1343 }
1344 VPRINTK("Local DIMM Speed = %d\n", speed);
1345
1346 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1347 size = pdc20621_prog_dimm0(host);
1348 VPRINTK("Local DIMM Size = %dMB\n", size);
1349
1350 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1351 if (pdc20621_prog_dimm_global(host)) {
1352 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1353 return 1;
1354 }
1355
1356 #ifdef ATA_VERBOSE_DEBUG
1357 {
1358 u8 test_parttern1[40] =
1359 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1360 'N','o','t',' ','Y','e','t',' ',
1361 'D','e','f','i','n','e','d',' ',
1362 '1','.','1','0',
1363 '9','8','0','3','1','6','1','2',0,0};
1364 u8 test_parttern2[40] = {0};
1365
1366 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1367 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1368
1369 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1370 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1371 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1372 test_parttern2[1], &(test_parttern2[2]));
1373 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1374 40);
1375 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1376 test_parttern2[1], &(test_parttern2[2]));
1377
1378 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1379 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1380 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1381 test_parttern2[1], &(test_parttern2[2]));
1382 }
1383 #endif
1384
1385 /* ECC initiliazation. */
1386
1387 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1388 PDC_DIMM_SPD_TYPE, &spd0);
1389 if (spd0 == 0x02) {
1390 void *buf;
1391 VPRINTK("Start ECC initialization\n");
1392 addr = 0;
1393 length = size * 1024 * 1024;
1394 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1395 while (addr < length) {
1396 pdc20621_put_to_dimm(host, buf, addr,
1397 ECC_ERASE_BUF_SZ);
1398 addr += ECC_ERASE_BUF_SZ;
1399 }
1400 kfree(buf);
1401 VPRINTK("Finish ECC initialization\n");
1402 }
1403 return 0;
1404 }
1405
1406
1407 static void pdc_20621_init(struct ata_host *host)
1408 {
1409 u32 tmp;
1410 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1411
1412 /* hard-code chip #0 */
1413 mmio += PDC_CHIP0_OFS;
1414
1415 /*
1416 * Select page 0x40 for our 32k DIMM window
1417 */
1418 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1419 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1420 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1421
1422 /*
1423 * Reset Host DMA
1424 */
1425 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1426 tmp |= PDC_RESET;
1427 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1428 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1429
1430 udelay(10);
1431
1432 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1433 tmp &= ~PDC_RESET;
1434 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1435 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1436 }
1437
1438 static int pdc_sata_init_one(struct pci_dev *pdev,
1439 const struct pci_device_id *ent)
1440 {
1441 const struct ata_port_info *ppi[] =
1442 { &pdc_port_info[ent->driver_data], NULL };
1443 struct ata_host *host;
1444 struct pdc_host_priv *hpriv;
1445 int i, rc;
1446
1447 ata_print_version_once(&pdev->dev, DRV_VERSION);
1448
1449 /* allocate host */
1450 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1451 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1452 if (!host || !hpriv)
1453 return -ENOMEM;
1454
1455 host->private_data = hpriv;
1456
1457 /* acquire resources and fill host */
1458 rc = pcim_enable_device(pdev);
1459 if (rc)
1460 return rc;
1461
1462 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1463 DRV_NAME);
1464 if (rc == -EBUSY)
1465 pcim_pin_device(pdev);
1466 if (rc)
1467 return rc;
1468 host->iomap = pcim_iomap_table(pdev);
1469
1470 for (i = 0; i < 4; i++) {
1471 struct ata_port *ap = host->ports[i];
1472 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1473 unsigned int offset = 0x200 + i * 0x80;
1474
1475 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1476
1477 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1478 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1479 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1480 }
1481
1482 /* configure and activate */
1483 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1484 if (rc)
1485 return rc;
1486 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1487 if (rc)
1488 return rc;
1489
1490 if (pdc20621_dimm_init(host))
1491 return -ENOMEM;
1492 pdc_20621_init(host);
1493
1494 pci_set_master(pdev);
1495 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1496 IRQF_SHARED, &pdc_sata_sht);
1497 }
1498
1499 module_pci_driver(pdc_sata_pci_driver);
1500
1501 MODULE_AUTHOR("Jeff Garzik");
1502 MODULE_DESCRIPTION("Promise SATA low-level driver");
1503 MODULE_LICENSE("GPL");
1504 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1505 MODULE_VERSION(DRV_VERSION);