Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / ata / sata_sx4.c
1 /*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Tejun Heo <tj@kernel.org>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/driver-api/libata.rst
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33 /*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/blkdev.h>
86 #include <linux/delay.h>
87 #include <linux/interrupt.h>
88 #include <linux/device.h>
89 #include <scsi/scsi_host.h>
90 #include <scsi/scsi_cmnd.h>
91 #include <linux/libata.h>
92 #include "sata_promise.h"
93
94 #define DRV_NAME "sata_sx4"
95 #define DRV_VERSION "0.12"
96
97
98 enum {
99 PDC_MMIO_BAR = 3,
100 PDC_DIMM_BAR = 4,
101
102 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
103
104 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
105 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
106 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
107 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
108
109 PDC_CTLSTAT = 0x60, /* IDEn control / status */
110
111 PDC_20621_SEQCTL = 0x400,
112 PDC_20621_SEQMASK = 0x480,
113 PDC_20621_GENERAL_CTL = 0x484,
114 PDC_20621_PAGE_SIZE = (32 * 1024),
115
116 /* chosen, not constant, values; we design our own DIMM mem map */
117 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
118 PDC_20621_DIMM_BASE = 0x00200000,
119 PDC_20621_DIMM_DATA = (64 * 1024),
120 PDC_DIMM_DATA_STEP = (256 * 1024),
121 PDC_DIMM_WINDOW_STEP = (8 * 1024),
122 PDC_DIMM_HOST_PRD = (6 * 1024),
123 PDC_DIMM_HOST_PKT = (128 * 0),
124 PDC_DIMM_HPKT_PRD = (128 * 1),
125 PDC_DIMM_ATA_PKT = (128 * 2),
126 PDC_DIMM_APKT_PRD = (128 * 3),
127 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
128 PDC_PAGE_WINDOW = 0x40,
129 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
130 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
131 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
132
133 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
134
135 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
136 (1<<23),
137
138 board_20621 = 0, /* FastTrak S150 SX4 */
139
140 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
141 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
142 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
143
144 PDC_MAX_HDMA = 32,
145 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
146
147 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
148 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
149 PDC_I2C_CONTROL = 0x48,
150 PDC_I2C_ADDR_DATA = 0x4C,
151 PDC_DIMM0_CONTROL = 0x80,
152 PDC_DIMM1_CONTROL = 0x84,
153 PDC_SDRAM_CONTROL = 0x88,
154 PDC_I2C_WRITE = 0, /* master -> slave */
155 PDC_I2C_READ = (1 << 6), /* master <- slave */
156 PDC_I2C_START = (1 << 7), /* start I2C proto */
157 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
158 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
159 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
160 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
161 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
162 PDC_DIMM_SPD_ROW_NUM = 3,
163 PDC_DIMM_SPD_COLUMN_NUM = 4,
164 PDC_DIMM_SPD_MODULE_ROW = 5,
165 PDC_DIMM_SPD_TYPE = 11,
166 PDC_DIMM_SPD_FRESH_RATE = 12,
167 PDC_DIMM_SPD_BANK_NUM = 17,
168 PDC_DIMM_SPD_CAS_LATENCY = 18,
169 PDC_DIMM_SPD_ATTRIBUTE = 21,
170 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
171 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
172 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
173 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
174 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
175 PDC_CTL_STATUS = 0x08,
176 PDC_DIMM_WINDOW_CTLR = 0x0C,
177 PDC_TIME_CONTROL = 0x3C,
178 PDC_TIME_PERIOD = 0x40,
179 PDC_TIME_COUNTER = 0x44,
180 PDC_GENERAL_CTLR = 0x484,
181 PCI_PLL_INIT = 0x8A531824,
182 PCI_X_TCOUNT = 0xEE1E5CFF,
183
184 /* PDC_TIME_CONTROL bits */
185 PDC_TIMER_BUZZER = (1 << 10),
186 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
187 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
188 PDC_TIMER_ENABLE = (1 << 7),
189 PDC_TIMER_MASK_INT = (1 << 5),
190 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
191 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
192 PDC_TIMER_ENABLE |
193 PDC_TIMER_MASK_INT,
194 };
195
196 #define ECC_ERASE_BUF_SZ (128 * 1024)
197
198 struct pdc_port_priv {
199 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
200 u8 *pkt;
201 dma_addr_t pkt_dma;
202 };
203
204 struct pdc_host_priv {
205 unsigned int doing_hdma;
206 unsigned int hdma_prod;
207 unsigned int hdma_cons;
208 struct {
209 struct ata_queued_cmd *qc;
210 unsigned int seq;
211 unsigned long pkt_ofs;
212 } hdma[32];
213 };
214
215
216 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
217 static void pdc_error_handler(struct ata_port *ap);
218 static void pdc_freeze(struct ata_port *ap);
219 static void pdc_thaw(struct ata_port *ap);
220 static int pdc_port_start(struct ata_port *ap);
221 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
222 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
223 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static unsigned int pdc20621_dimm_init(struct ata_host *host);
225 static int pdc20621_detect_dimm(struct ata_host *host);
226 static unsigned int pdc20621_i2c_read(struct ata_host *host,
227 u32 device, u32 subaddr, u32 *pdata);
228 static int pdc20621_prog_dimm0(struct ata_host *host);
229 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
230 #ifdef ATA_VERBOSE_DEBUG
231 static void pdc20621_get_from_dimm(struct ata_host *host,
232 void *psource, u32 offset, u32 size);
233 #endif
234 static void pdc20621_put_to_dimm(struct ata_host *host,
235 void *psource, u32 offset, u32 size);
236 static void pdc20621_irq_clear(struct ata_port *ap);
237 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
238 static int pdc_softreset(struct ata_link *link, unsigned int *class,
239 unsigned long deadline);
240 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
241 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
242
243
244 static struct scsi_host_template pdc_sata_sht = {
245 ATA_BASE_SHT(DRV_NAME),
246 .sg_tablesize = LIBATA_MAX_PRD,
247 .dma_boundary = ATA_DMA_BOUNDARY,
248 };
249
250 /* TODO: inherit from base port_ops after converting to new EH */
251 static struct ata_port_operations pdc_20621_ops = {
252 .inherits = &ata_sff_port_ops,
253
254 .check_atapi_dma = pdc_check_atapi_dma,
255 .qc_prep = pdc20621_qc_prep,
256 .qc_issue = pdc20621_qc_issue,
257
258 .freeze = pdc_freeze,
259 .thaw = pdc_thaw,
260 .softreset = pdc_softreset,
261 .error_handler = pdc_error_handler,
262 .lost_interrupt = ATA_OP_NULL,
263 .post_internal_cmd = pdc_post_internal_cmd,
264
265 .port_start = pdc_port_start,
266
267 .sff_tf_load = pdc_tf_load_mmio,
268 .sff_exec_command = pdc_exec_command_mmio,
269 .sff_irq_clear = pdc20621_irq_clear,
270 };
271
272 static const struct ata_port_info pdc_port_info[] = {
273 /* board_20621 */
274 {
275 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_ATAPI |
276 ATA_FLAG_PIO_POLLING,
277 .pio_mask = ATA_PIO4,
278 .mwdma_mask = ATA_MWDMA2,
279 .udma_mask = ATA_UDMA6,
280 .port_ops = &pdc_20621_ops,
281 },
282
283 };
284
285 static const struct pci_device_id pdc_sata_pci_tbl[] = {
286 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
287
288 { } /* terminate list */
289 };
290
291 static struct pci_driver pdc_sata_pci_driver = {
292 .name = DRV_NAME,
293 .id_table = pdc_sata_pci_tbl,
294 .probe = pdc_sata_init_one,
295 .remove = ata_pci_remove_one,
296 };
297
298
299 static int pdc_port_start(struct ata_port *ap)
300 {
301 struct device *dev = ap->host->dev;
302 struct pdc_port_priv *pp;
303
304 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
305 if (!pp)
306 return -ENOMEM;
307
308 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
309 if (!pp->pkt)
310 return -ENOMEM;
311
312 ap->private_data = pp;
313
314 return 0;
315 }
316
317 static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
318 unsigned int total_len)
319 {
320 u32 addr;
321 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
322 __le32 *buf32 = (__le32 *) buf;
323
324 /* output ATA packet S/G table */
325 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
326 (PDC_DIMM_DATA_STEP * portno);
327 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
328 buf32[dw] = cpu_to_le32(addr);
329 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
330
331 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
332 PDC_20621_DIMM_BASE +
333 (PDC_DIMM_WINDOW_STEP * portno) +
334 PDC_DIMM_APKT_PRD,
335 buf32[dw], buf32[dw + 1]);
336 }
337
338 static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
339 unsigned int total_len)
340 {
341 u32 addr;
342 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
343 __le32 *buf32 = (__le32 *) buf;
344
345 /* output Host DMA packet S/G table */
346 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
347 (PDC_DIMM_DATA_STEP * portno);
348
349 buf32[dw] = cpu_to_le32(addr);
350 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
351
352 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
353 PDC_20621_DIMM_BASE +
354 (PDC_DIMM_WINDOW_STEP * portno) +
355 PDC_DIMM_HPKT_PRD,
356 buf32[dw], buf32[dw + 1]);
357 }
358
359 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
360 unsigned int devno, u8 *buf,
361 unsigned int portno)
362 {
363 unsigned int i, dw;
364 __le32 *buf32 = (__le32 *) buf;
365 u8 dev_reg;
366
367 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
368 (PDC_DIMM_WINDOW_STEP * portno) +
369 PDC_DIMM_APKT_PRD;
370 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
371
372 i = PDC_DIMM_ATA_PKT;
373
374 /*
375 * Set up ATA packet
376 */
377 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
378 buf[i++] = PDC_PKT_READ;
379 else if (tf->protocol == ATA_PROT_NODATA)
380 buf[i++] = PDC_PKT_NODATA;
381 else
382 buf[i++] = 0;
383 buf[i++] = 0; /* reserved */
384 buf[i++] = portno + 1; /* seq. id */
385 buf[i++] = 0xff; /* delay seq. id */
386
387 /* dimm dma S/G, and next-pkt */
388 dw = i >> 2;
389 if (tf->protocol == ATA_PROT_NODATA)
390 buf32[dw] = 0;
391 else
392 buf32[dw] = cpu_to_le32(dimm_sg);
393 buf32[dw + 1] = 0;
394 i += 8;
395
396 if (devno == 0)
397 dev_reg = ATA_DEVICE_OBS;
398 else
399 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
400
401 /* select device */
402 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
403 buf[i++] = dev_reg;
404
405 /* device control register */
406 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
407 buf[i++] = tf->ctl;
408
409 return i;
410 }
411
412 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
413 unsigned int portno)
414 {
415 unsigned int dw;
416 u32 tmp;
417 __le32 *buf32 = (__le32 *) buf;
418
419 unsigned int host_sg = PDC_20621_DIMM_BASE +
420 (PDC_DIMM_WINDOW_STEP * portno) +
421 PDC_DIMM_HOST_PRD;
422 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
423 (PDC_DIMM_WINDOW_STEP * portno) +
424 PDC_DIMM_HPKT_PRD;
425 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
426 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
427
428 dw = PDC_DIMM_HOST_PKT >> 2;
429
430 /*
431 * Set up Host DMA packet
432 */
433 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
434 tmp = PDC_PKT_READ;
435 else
436 tmp = 0;
437 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
438 tmp |= (0xff << 24); /* delay seq. id */
439 buf32[dw + 0] = cpu_to_le32(tmp);
440 buf32[dw + 1] = cpu_to_le32(host_sg);
441 buf32[dw + 2] = cpu_to_le32(dimm_sg);
442 buf32[dw + 3] = 0;
443
444 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
445 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
446 PDC_DIMM_HOST_PKT,
447 buf32[dw + 0],
448 buf32[dw + 1],
449 buf32[dw + 2],
450 buf32[dw + 3]);
451 }
452
453 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
454 {
455 struct scatterlist *sg;
456 struct ata_port *ap = qc->ap;
457 struct pdc_port_priv *pp = ap->private_data;
458 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
459 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
460 unsigned int portno = ap->port_no;
461 unsigned int i, si, idx, total_len = 0, sgt_len;
462 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
463
464 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
465
466 VPRINTK("ata%u: ENTER\n", ap->print_id);
467
468 /* hard-code chip #0 */
469 mmio += PDC_CHIP0_OFS;
470
471 /*
472 * Build S/G table
473 */
474 idx = 0;
475 for_each_sg(qc->sg, sg, qc->n_elem, si) {
476 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
477 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
478 total_len += sg_dma_len(sg);
479 }
480 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
481 sgt_len = idx * 4;
482
483 /*
484 * Build ATA, host DMA packets
485 */
486 pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len);
487 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
488
489 pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len);
490 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
491
492 if (qc->tf.flags & ATA_TFLAG_LBA48)
493 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
494 else
495 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
496
497 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
498
499 /* copy three S/G tables and two packets to DIMM MMIO window */
500 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
501 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
502 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
503 PDC_DIMM_HOST_PRD,
504 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
505
506 /* force host FIFO dump */
507 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
508
509 readl(dimm_mmio); /* MMIO PCI posting flush */
510
511 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
512 }
513
514 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
515 {
516 struct ata_port *ap = qc->ap;
517 struct pdc_port_priv *pp = ap->private_data;
518 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
519 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
520 unsigned int portno = ap->port_no;
521 unsigned int i;
522
523 VPRINTK("ata%u: ENTER\n", ap->print_id);
524
525 /* hard-code chip #0 */
526 mmio += PDC_CHIP0_OFS;
527
528 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
529
530 if (qc->tf.flags & ATA_TFLAG_LBA48)
531 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
532 else
533 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
534
535 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
536
537 /* copy three S/G tables and two packets to DIMM MMIO window */
538 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
539 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
540
541 /* force host FIFO dump */
542 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
543
544 readl(dimm_mmio); /* MMIO PCI posting flush */
545
546 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
547 }
548
549 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
550 {
551 switch (qc->tf.protocol) {
552 case ATA_PROT_DMA:
553 pdc20621_dma_prep(qc);
554 break;
555 case ATA_PROT_NODATA:
556 pdc20621_nodata_prep(qc);
557 break;
558 default:
559 break;
560 }
561 }
562
563 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
564 unsigned int seq,
565 u32 pkt_ofs)
566 {
567 struct ata_port *ap = qc->ap;
568 struct ata_host *host = ap->host;
569 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
570
571 /* hard-code chip #0 */
572 mmio += PDC_CHIP0_OFS;
573
574 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
575 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
576
577 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
578 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
579 }
580
581 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
582 unsigned int seq,
583 u32 pkt_ofs)
584 {
585 struct ata_port *ap = qc->ap;
586 struct pdc_host_priv *pp = ap->host->private_data;
587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
588
589 if (!pp->doing_hdma) {
590 __pdc20621_push_hdma(qc, seq, pkt_ofs);
591 pp->doing_hdma = 1;
592 return;
593 }
594
595 pp->hdma[idx].qc = qc;
596 pp->hdma[idx].seq = seq;
597 pp->hdma[idx].pkt_ofs = pkt_ofs;
598 pp->hdma_prod++;
599 }
600
601 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
602 {
603 struct ata_port *ap = qc->ap;
604 struct pdc_host_priv *pp = ap->host->private_data;
605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
606
607 /* if nothing on queue, we're done */
608 if (pp->hdma_prod == pp->hdma_cons) {
609 pp->doing_hdma = 0;
610 return;
611 }
612
613 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
614 pp->hdma[idx].pkt_ofs);
615 pp->hdma_cons++;
616 }
617
618 #ifdef ATA_VERBOSE_DEBUG
619 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
620 {
621 struct ata_port *ap = qc->ap;
622 unsigned int port_no = ap->port_no;
623 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
624
625 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
626 dimm_mmio += PDC_DIMM_HOST_PKT;
627
628 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
629 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
630 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
631 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
632 }
633 #else
634 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
635 #endif /* ATA_VERBOSE_DEBUG */
636
637 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
638 {
639 struct ata_port *ap = qc->ap;
640 struct ata_host *host = ap->host;
641 unsigned int port_no = ap->port_no;
642 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
643 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
644 u8 seq = (u8) (port_no + 1);
645 unsigned int port_ofs;
646
647 /* hard-code chip #0 */
648 mmio += PDC_CHIP0_OFS;
649
650 VPRINTK("ata%u: ENTER\n", ap->print_id);
651
652 wmb(); /* flush PRD, pkt writes */
653
654 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
655
656 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
657 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
658 seq += 4;
659
660 pdc20621_dump_hdma(qc);
661 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
662 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
663 port_ofs + PDC_DIMM_HOST_PKT,
664 port_ofs + PDC_DIMM_HOST_PKT,
665 seq);
666 } else {
667 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
668 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
669
670 writel(port_ofs + PDC_DIMM_ATA_PKT,
671 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
672 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
673 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
674 port_ofs + PDC_DIMM_ATA_PKT,
675 port_ofs + PDC_DIMM_ATA_PKT,
676 seq);
677 }
678 }
679
680 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
681 {
682 switch (qc->tf.protocol) {
683 case ATA_PROT_NODATA:
684 if (qc->tf.flags & ATA_TFLAG_POLLING)
685 break;
686 /*FALLTHROUGH*/
687 case ATA_PROT_DMA:
688 pdc20621_packet_start(qc);
689 return 0;
690
691 case ATAPI_PROT_DMA:
692 BUG();
693 break;
694
695 default:
696 break;
697 }
698
699 return ata_sff_qc_issue(qc);
700 }
701
702 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
703 struct ata_queued_cmd *qc,
704 unsigned int doing_hdma,
705 void __iomem *mmio)
706 {
707 unsigned int port_no = ap->port_no;
708 unsigned int port_ofs =
709 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
710 u8 status;
711 unsigned int handled = 0;
712
713 VPRINTK("ENTER\n");
714
715 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
716 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
717
718 /* step two - DMA from DIMM to host */
719 if (doing_hdma) {
720 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
721 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
722 /* get drive status; clear intr; complete txn */
723 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
724 ata_qc_complete(qc);
725 pdc20621_pop_hdma(qc);
726 }
727
728 /* step one - exec ATA command */
729 else {
730 u8 seq = (u8) (port_no + 1 + 4);
731 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
732 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
733
734 /* submit hdma pkt */
735 pdc20621_dump_hdma(qc);
736 pdc20621_push_hdma(qc, seq,
737 port_ofs + PDC_DIMM_HOST_PKT);
738 }
739 handled = 1;
740
741 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
742
743 /* step one - DMA from host to DIMM */
744 if (doing_hdma) {
745 u8 seq = (u8) (port_no + 1);
746 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
747 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
748
749 /* submit ata pkt */
750 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
751 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
752 writel(port_ofs + PDC_DIMM_ATA_PKT,
753 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
754 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
755 }
756
757 /* step two - execute ATA command */
758 else {
759 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
760 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
761 /* get drive status; clear intr; complete txn */
762 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
763 ata_qc_complete(qc);
764 pdc20621_pop_hdma(qc);
765 }
766 handled = 1;
767
768 /* command completion, but no data xfer */
769 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
770
771 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
772 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
773 qc->err_mask |= ac_err_mask(status);
774 ata_qc_complete(qc);
775 handled = 1;
776
777 } else {
778 ap->stats.idle_irq++;
779 }
780
781 return handled;
782 }
783
784 static void pdc20621_irq_clear(struct ata_port *ap)
785 {
786 ioread8(ap->ioaddr.status_addr);
787 }
788
789 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
790 {
791 struct ata_host *host = dev_instance;
792 struct ata_port *ap;
793 u32 mask = 0;
794 unsigned int i, tmp, port_no;
795 unsigned int handled = 0;
796 void __iomem *mmio_base;
797
798 VPRINTK("ENTER\n");
799
800 if (!host || !host->iomap[PDC_MMIO_BAR]) {
801 VPRINTK("QUICK EXIT\n");
802 return IRQ_NONE;
803 }
804
805 mmio_base = host->iomap[PDC_MMIO_BAR];
806
807 /* reading should also clear interrupts */
808 mmio_base += PDC_CHIP0_OFS;
809 mask = readl(mmio_base + PDC_20621_SEQMASK);
810 VPRINTK("mask == 0x%x\n", mask);
811
812 if (mask == 0xffffffff) {
813 VPRINTK("QUICK EXIT 2\n");
814 return IRQ_NONE;
815 }
816 mask &= 0xffff; /* only 16 tags possible */
817 if (!mask) {
818 VPRINTK("QUICK EXIT 3\n");
819 return IRQ_NONE;
820 }
821
822 spin_lock(&host->lock);
823
824 for (i = 1; i < 9; i++) {
825 port_no = i - 1;
826 if (port_no > 3)
827 port_no -= 4;
828 if (port_no >= host->n_ports)
829 ap = NULL;
830 else
831 ap = host->ports[port_no];
832 tmp = mask & (1 << i);
833 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
834 if (tmp && ap) {
835 struct ata_queued_cmd *qc;
836
837 qc = ata_qc_from_tag(ap, ap->link.active_tag);
838 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
839 handled += pdc20621_host_intr(ap, qc, (i > 4),
840 mmio_base);
841 }
842 }
843
844 spin_unlock(&host->lock);
845
846 VPRINTK("mask == 0x%x\n", mask);
847
848 VPRINTK("EXIT\n");
849
850 return IRQ_RETVAL(handled);
851 }
852
853 static void pdc_freeze(struct ata_port *ap)
854 {
855 void __iomem *mmio = ap->ioaddr.cmd_addr;
856 u32 tmp;
857
858 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
859
860 tmp = readl(mmio + PDC_CTLSTAT);
861 tmp |= PDC_MASK_INT;
862 tmp &= ~PDC_DMA_ENABLE;
863 writel(tmp, mmio + PDC_CTLSTAT);
864 readl(mmio + PDC_CTLSTAT); /* flush */
865 }
866
867 static void pdc_thaw(struct ata_port *ap)
868 {
869 void __iomem *mmio = ap->ioaddr.cmd_addr;
870 u32 tmp;
871
872 /* FIXME: start HDMA engine, if zero ATA engines running */
873
874 /* clear IRQ */
875 ioread8(ap->ioaddr.status_addr);
876
877 /* turn IRQ back on */
878 tmp = readl(mmio + PDC_CTLSTAT);
879 tmp &= ~PDC_MASK_INT;
880 writel(tmp, mmio + PDC_CTLSTAT);
881 readl(mmio + PDC_CTLSTAT); /* flush */
882 }
883
884 static void pdc_reset_port(struct ata_port *ap)
885 {
886 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
887 unsigned int i;
888 u32 tmp;
889
890 /* FIXME: handle HDMA copy engine */
891
892 for (i = 11; i > 0; i--) {
893 tmp = readl(mmio);
894 if (tmp & PDC_RESET)
895 break;
896
897 udelay(100);
898
899 tmp |= PDC_RESET;
900 writel(tmp, mmio);
901 }
902
903 tmp &= ~PDC_RESET;
904 writel(tmp, mmio);
905 readl(mmio); /* flush */
906 }
907
908 static int pdc_softreset(struct ata_link *link, unsigned int *class,
909 unsigned long deadline)
910 {
911 pdc_reset_port(link->ap);
912 return ata_sff_softreset(link, class, deadline);
913 }
914
915 static void pdc_error_handler(struct ata_port *ap)
916 {
917 if (!(ap->pflags & ATA_PFLAG_FROZEN))
918 pdc_reset_port(ap);
919
920 ata_sff_error_handler(ap);
921 }
922
923 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
924 {
925 struct ata_port *ap = qc->ap;
926
927 /* make DMA engine forget about the failed command */
928 if (qc->flags & ATA_QCFLAG_FAILED)
929 pdc_reset_port(ap);
930 }
931
932 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
933 {
934 u8 *scsicmd = qc->scsicmd->cmnd;
935 int pio = 1; /* atapi dma off by default */
936
937 /* Whitelist commands that may use DMA. */
938 switch (scsicmd[0]) {
939 case WRITE_12:
940 case WRITE_10:
941 case WRITE_6:
942 case READ_12:
943 case READ_10:
944 case READ_6:
945 case 0xad: /* READ_DVD_STRUCTURE */
946 case 0xbe: /* READ_CD */
947 pio = 0;
948 }
949 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
950 if (scsicmd[0] == WRITE_10) {
951 unsigned int lba =
952 (scsicmd[2] << 24) |
953 (scsicmd[3] << 16) |
954 (scsicmd[4] << 8) |
955 scsicmd[5];
956 if (lba >= 0xFFFF4FA2)
957 pio = 1;
958 }
959 return pio;
960 }
961
962 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
963 {
964 WARN_ON(tf->protocol == ATA_PROT_DMA ||
965 tf->protocol == ATAPI_PROT_DMA);
966 ata_sff_tf_load(ap, tf);
967 }
968
969
970 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
971 {
972 WARN_ON(tf->protocol == ATA_PROT_DMA ||
973 tf->protocol == ATAPI_PROT_DMA);
974 ata_sff_exec_command(ap, tf);
975 }
976
977
978 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
979 {
980 port->cmd_addr = base;
981 port->data_addr = base;
982 port->feature_addr =
983 port->error_addr = base + 0x4;
984 port->nsect_addr = base + 0x8;
985 port->lbal_addr = base + 0xc;
986 port->lbam_addr = base + 0x10;
987 port->lbah_addr = base + 0x14;
988 port->device_addr = base + 0x18;
989 port->command_addr =
990 port->status_addr = base + 0x1c;
991 port->altstatus_addr =
992 port->ctl_addr = base + 0x38;
993 }
994
995
996 #ifdef ATA_VERBOSE_DEBUG
997 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
998 u32 offset, u32 size)
999 {
1000 u32 window_size;
1001 u16 idx;
1002 u8 page_mask;
1003 long dist;
1004 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1005 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1006
1007 /* hard-code chip #0 */
1008 mmio += PDC_CHIP0_OFS;
1009
1010 page_mask = 0x00;
1011 window_size = 0x2000 * 4; /* 32K byte uchar size */
1012 idx = (u16) (offset / window_size);
1013
1014 writel(0x01, mmio + PDC_GENERAL_CTLR);
1015 readl(mmio + PDC_GENERAL_CTLR);
1016 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1017 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1018
1019 offset -= (idx * window_size);
1020 idx++;
1021 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1022 (long) (window_size - offset);
1023 memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
1024
1025 psource += dist;
1026 size -= dist;
1027 for (; (long) size >= (long) window_size ;) {
1028 writel(0x01, mmio + PDC_GENERAL_CTLR);
1029 readl(mmio + PDC_GENERAL_CTLR);
1030 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1031 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1032 memcpy_fromio(psource, dimm_mmio, window_size / 4);
1033 psource += window_size;
1034 size -= window_size;
1035 idx++;
1036 }
1037
1038 if (size) {
1039 writel(0x01, mmio + PDC_GENERAL_CTLR);
1040 readl(mmio + PDC_GENERAL_CTLR);
1041 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1042 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1043 memcpy_fromio(psource, dimm_mmio, size / 4);
1044 }
1045 }
1046 #endif
1047
1048
1049 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1050 u32 offset, u32 size)
1051 {
1052 u32 window_size;
1053 u16 idx;
1054 u8 page_mask;
1055 long dist;
1056 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1057 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1058
1059 /* hard-code chip #0 */
1060 mmio += PDC_CHIP0_OFS;
1061
1062 page_mask = 0x00;
1063 window_size = 0x2000 * 4; /* 32K byte uchar size */
1064 idx = (u16) (offset / window_size);
1065
1066 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1067 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1068 offset -= (idx * window_size);
1069 idx++;
1070 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1071 (long) (window_size - offset);
1072 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1073 writel(0x01, mmio + PDC_GENERAL_CTLR);
1074 readl(mmio + PDC_GENERAL_CTLR);
1075
1076 psource += dist;
1077 size -= dist;
1078 for (; (long) size >= (long) window_size ;) {
1079 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1080 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1081 memcpy_toio(dimm_mmio, psource, window_size / 4);
1082 writel(0x01, mmio + PDC_GENERAL_CTLR);
1083 readl(mmio + PDC_GENERAL_CTLR);
1084 psource += window_size;
1085 size -= window_size;
1086 idx++;
1087 }
1088
1089 if (size) {
1090 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1091 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1092 memcpy_toio(dimm_mmio, psource, size / 4);
1093 writel(0x01, mmio + PDC_GENERAL_CTLR);
1094 readl(mmio + PDC_GENERAL_CTLR);
1095 }
1096 }
1097
1098
1099 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1100 u32 subaddr, u32 *pdata)
1101 {
1102 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1103 u32 i2creg = 0;
1104 u32 status;
1105 u32 count = 0;
1106
1107 /* hard-code chip #0 */
1108 mmio += PDC_CHIP0_OFS;
1109
1110 i2creg |= device << 24;
1111 i2creg |= subaddr << 16;
1112
1113 /* Set the device and subaddress */
1114 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1115 readl(mmio + PDC_I2C_ADDR_DATA);
1116
1117 /* Write Control to perform read operation, mask int */
1118 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1119 mmio + PDC_I2C_CONTROL);
1120
1121 for (count = 0; count <= 1000; count ++) {
1122 status = readl(mmio + PDC_I2C_CONTROL);
1123 if (status & PDC_I2C_COMPLETE) {
1124 status = readl(mmio + PDC_I2C_ADDR_DATA);
1125 break;
1126 } else if (count == 1000)
1127 return 0;
1128 }
1129
1130 *pdata = (status >> 8) & 0x000000ff;
1131 return 1;
1132 }
1133
1134
1135 static int pdc20621_detect_dimm(struct ata_host *host)
1136 {
1137 u32 data = 0;
1138 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1139 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1140 if (data == 100)
1141 return 100;
1142 } else
1143 return 0;
1144
1145 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1146 if (data <= 0x75)
1147 return 133;
1148 } else
1149 return 0;
1150
1151 return 0;
1152 }
1153
1154
1155 static int pdc20621_prog_dimm0(struct ata_host *host)
1156 {
1157 u32 spd0[50];
1158 u32 data = 0;
1159 int size, i;
1160 u8 bdimmsize;
1161 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1162 static const struct {
1163 unsigned int reg;
1164 unsigned int ofs;
1165 } pdc_i2c_read_data [] = {
1166 { PDC_DIMM_SPD_TYPE, 11 },
1167 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1168 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1169 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1170 { PDC_DIMM_SPD_ROW_NUM, 3 },
1171 { PDC_DIMM_SPD_BANK_NUM, 17 },
1172 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1173 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1174 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1175 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1176 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1177 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1178 };
1179
1180 /* hard-code chip #0 */
1181 mmio += PDC_CHIP0_OFS;
1182
1183 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1184 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1185 pdc_i2c_read_data[i].reg,
1186 &spd0[pdc_i2c_read_data[i].ofs]);
1187
1188 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1189 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1190 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1191 data |= (((((spd0[29] > spd0[28])
1192 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1193 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1194
1195 if (spd0[18] & 0x08)
1196 data |= ((0x03) << 14);
1197 else if (spd0[18] & 0x04)
1198 data |= ((0x02) << 14);
1199 else if (spd0[18] & 0x01)
1200 data |= ((0x01) << 14);
1201 else
1202 data |= (0 << 14);
1203
1204 /*
1205 Calculate the size of bDIMMSize (power of 2) and
1206 merge the DIMM size by program start/end address.
1207 */
1208
1209 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1210 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1211 data |= (((size / 16) - 1) << 16);
1212 data |= (0 << 23);
1213 data |= 8;
1214 writel(data, mmio + PDC_DIMM0_CONTROL);
1215 readl(mmio + PDC_DIMM0_CONTROL);
1216 return size;
1217 }
1218
1219
1220 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1221 {
1222 u32 data, spd0;
1223 int error, i;
1224 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1225
1226 /* hard-code chip #0 */
1227 mmio += PDC_CHIP0_OFS;
1228
1229 /*
1230 Set To Default : DIMM Module Global Control Register (0x022259F1)
1231 DIMM Arbitration Disable (bit 20)
1232 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1233 Refresh Enable (bit 17)
1234 */
1235
1236 data = 0x022259F1;
1237 writel(data, mmio + PDC_SDRAM_CONTROL);
1238 readl(mmio + PDC_SDRAM_CONTROL);
1239
1240 /* Turn on for ECC */
1241 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1242 PDC_DIMM_SPD_TYPE, &spd0)) {
1243 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1244 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1245 return 1;
1246 }
1247 if (spd0 == 0x02) {
1248 data |= (0x01 << 16);
1249 writel(data, mmio + PDC_SDRAM_CONTROL);
1250 readl(mmio + PDC_SDRAM_CONTROL);
1251 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1252 }
1253
1254 /* DIMM Initialization Select/Enable (bit 18/19) */
1255 data &= (~(1<<18));
1256 data |= (1<<19);
1257 writel(data, mmio + PDC_SDRAM_CONTROL);
1258
1259 error = 1;
1260 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1261 data = readl(mmio + PDC_SDRAM_CONTROL);
1262 if (!(data & (1<<19))) {
1263 error = 0;
1264 break;
1265 }
1266 msleep(i*100);
1267 }
1268 return error;
1269 }
1270
1271
1272 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1273 {
1274 int speed, size, length;
1275 u32 addr, spd0, pci_status;
1276 u32 time_period = 0;
1277 u32 tcount = 0;
1278 u32 ticks = 0;
1279 u32 clock = 0;
1280 u32 fparam = 0;
1281 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1282
1283 /* hard-code chip #0 */
1284 mmio += PDC_CHIP0_OFS;
1285
1286 /* Initialize PLL based upon PCI Bus Frequency */
1287
1288 /* Initialize Time Period Register */
1289 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1290 time_period = readl(mmio + PDC_TIME_PERIOD);
1291 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1292
1293 /* Enable timer */
1294 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1295 readl(mmio + PDC_TIME_CONTROL);
1296
1297 /* Wait 3 seconds */
1298 msleep(3000);
1299
1300 /*
1301 When timer is enabled, counter is decreased every internal
1302 clock cycle.
1303 */
1304
1305 tcount = readl(mmio + PDC_TIME_COUNTER);
1306 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1307
1308 /*
1309 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1310 register should be >= (0xffffffff - 3x10^8).
1311 */
1312 if (tcount >= PCI_X_TCOUNT) {
1313 ticks = (time_period - tcount);
1314 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1315
1316 clock = (ticks / 300000);
1317 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1318
1319 clock = (clock * 33);
1320 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1321
1322 /* PLL F Param (bit 22:16) */
1323 fparam = (1400000 / clock) - 2;
1324 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1325
1326 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1327 pci_status = (0x8a001824 | (fparam << 16));
1328 } else
1329 pci_status = PCI_PLL_INIT;
1330
1331 /* Initialize PLL. */
1332 VPRINTK("pci_status: 0x%x\n", pci_status);
1333 writel(pci_status, mmio + PDC_CTL_STATUS);
1334 readl(mmio + PDC_CTL_STATUS);
1335
1336 /*
1337 Read SPD of DIMM by I2C interface,
1338 and program the DIMM Module Controller.
1339 */
1340 if (!(speed = pdc20621_detect_dimm(host))) {
1341 printk(KERN_ERR "Detect Local DIMM Fail\n");
1342 return 1; /* DIMM error */
1343 }
1344 VPRINTK("Local DIMM Speed = %d\n", speed);
1345
1346 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1347 size = pdc20621_prog_dimm0(host);
1348 VPRINTK("Local DIMM Size = %dMB\n", size);
1349
1350 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1351 if (pdc20621_prog_dimm_global(host)) {
1352 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1353 return 1;
1354 }
1355
1356 #ifdef ATA_VERBOSE_DEBUG
1357 {
1358 u8 test_parttern1[40] =
1359 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1360 'N','o','t',' ','Y','e','t',' ',
1361 'D','e','f','i','n','e','d',' ',
1362 '1','.','1','0',
1363 '9','8','0','3','1','6','1','2',0,0};
1364 u8 test_parttern2[40] = {0};
1365
1366 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1367 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1368
1369 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1370 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1371 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1372 test_parttern2[1], &(test_parttern2[2]));
1373 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1374 40);
1375 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1376 test_parttern2[1], &(test_parttern2[2]));
1377
1378 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1379 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1380 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1381 test_parttern2[1], &(test_parttern2[2]));
1382 }
1383 #endif
1384
1385 /* ECC initiliazation. */
1386
1387 if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1388 PDC_DIMM_SPD_TYPE, &spd0)) {
1389 pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
1390 PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
1391 return 1;
1392 }
1393 if (spd0 == 0x02) {
1394 void *buf;
1395 VPRINTK("Start ECC initialization\n");
1396 addr = 0;
1397 length = size * 1024 * 1024;
1398 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1399 if (!buf)
1400 return 1;
1401 while (addr < length) {
1402 pdc20621_put_to_dimm(host, buf, addr,
1403 ECC_ERASE_BUF_SZ);
1404 addr += ECC_ERASE_BUF_SZ;
1405 }
1406 kfree(buf);
1407 VPRINTK("Finish ECC initialization\n");
1408 }
1409 return 0;
1410 }
1411
1412
1413 static void pdc_20621_init(struct ata_host *host)
1414 {
1415 u32 tmp;
1416 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1417
1418 /* hard-code chip #0 */
1419 mmio += PDC_CHIP0_OFS;
1420
1421 /*
1422 * Select page 0x40 for our 32k DIMM window
1423 */
1424 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1425 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1426 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1427
1428 /*
1429 * Reset Host DMA
1430 */
1431 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1432 tmp |= PDC_RESET;
1433 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1434 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1435
1436 udelay(10);
1437
1438 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1439 tmp &= ~PDC_RESET;
1440 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1441 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1442 }
1443
1444 static int pdc_sata_init_one(struct pci_dev *pdev,
1445 const struct pci_device_id *ent)
1446 {
1447 const struct ata_port_info *ppi[] =
1448 { &pdc_port_info[ent->driver_data], NULL };
1449 struct ata_host *host;
1450 struct pdc_host_priv *hpriv;
1451 int i, rc;
1452
1453 ata_print_version_once(&pdev->dev, DRV_VERSION);
1454
1455 /* allocate host */
1456 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1457 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1458 if (!host || !hpriv)
1459 return -ENOMEM;
1460
1461 host->private_data = hpriv;
1462
1463 /* acquire resources and fill host */
1464 rc = pcim_enable_device(pdev);
1465 if (rc)
1466 return rc;
1467
1468 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1469 DRV_NAME);
1470 if (rc == -EBUSY)
1471 pcim_pin_device(pdev);
1472 if (rc)
1473 return rc;
1474 host->iomap = pcim_iomap_table(pdev);
1475
1476 for (i = 0; i < 4; i++) {
1477 struct ata_port *ap = host->ports[i];
1478 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1479 unsigned int offset = 0x200 + i * 0x80;
1480
1481 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1482
1483 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1484 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1485 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1486 }
1487
1488 /* configure and activate */
1489 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
1490 if (rc)
1491 return rc;
1492 rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
1493 if (rc)
1494 return rc;
1495
1496 if (pdc20621_dimm_init(host))
1497 return -ENOMEM;
1498 pdc_20621_init(host);
1499
1500 pci_set_master(pdev);
1501 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1502 IRQF_SHARED, &pdc_sata_sht);
1503 }
1504
1505 module_pci_driver(pdc_sata_pci_driver);
1506
1507 MODULE_AUTHOR("Jeff Garzik");
1508 MODULE_DESCRIPTION("Promise SATA low-level driver");
1509 MODULE_LICENSE("GPL");
1510 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1511 MODULE_VERSION(DRV_VERSION);