include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_sx4.c
1 /*
2 * sata_sx4.c - Promise SATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * Hardware documentation available under NDA.
30 *
31 */
32
33 /*
34 Theory of operation
35 -------------------
36
37 The SX4 (PDC20621) chip features a single Host DMA (HDMA) copy
38 engine, DIMM memory, and four ATA engines (one per SATA port).
39 Data is copied to/from DIMM memory by the HDMA engine, before
40 handing off to one (or more) of the ATA engines. The ATA
41 engines operate solely on DIMM memory.
42
43 The SX4 behaves like a PATA chip, with no SATA controls or
44 knowledge whatsoever, leading to the presumption that
45 PATA<->SATA bridges exist on SX4 boards, external to the
46 PDC20621 chip itself.
47
48 The chip is quite capable, supporting an XOR engine and linked
49 hardware commands (permits a string to transactions to be
50 submitted and waited-on as a single unit), and an optional
51 microprocessor.
52
53 The limiting factor is largely software. This Linux driver was
54 written to multiplex the single HDMA engine to copy disk
55 transactions into a fixed DIMM memory space, from where an ATA
56 engine takes over. As a result, each WRITE looks like this:
57
58 submit HDMA packet to hardware
59 hardware copies data from system memory to DIMM
60 hardware raises interrupt
61
62 submit ATA packet to hardware
63 hardware executes ATA WRITE command, w/ data in DIMM
64 hardware raises interrupt
65
66 and each READ looks like this:
67
68 submit ATA packet to hardware
69 hardware executes ATA READ command, w/ data in DIMM
70 hardware raises interrupt
71
72 submit HDMA packet to hardware
73 hardware copies data from DIMM to system memory
74 hardware raises interrupt
75
76 This is a very slow, lock-step way of doing things that can
77 certainly be improved by motivated kernel hackers.
78
79 */
80
81 #include <linux/kernel.h>
82 #include <linux/module.h>
83 #include <linux/pci.h>
84 #include <linux/slab.h>
85 #include <linux/init.h>
86 #include <linux/blkdev.h>
87 #include <linux/delay.h>
88 #include <linux/interrupt.h>
89 #include <linux/device.h>
90 #include <scsi/scsi_host.h>
91 #include <scsi/scsi_cmnd.h>
92 #include <linux/libata.h>
93 #include "sata_promise.h"
94
95 #define DRV_NAME "sata_sx4"
96 #define DRV_VERSION "0.12"
97
98
99 enum {
100 PDC_MMIO_BAR = 3,
101 PDC_DIMM_BAR = 4,
102
103 PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
104
105 PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
106 PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */
107 PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */
108 PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */
109
110 PDC_CTLSTAT = 0x60, /* IDEn control / status */
111
112 PDC_20621_SEQCTL = 0x400,
113 PDC_20621_SEQMASK = 0x480,
114 PDC_20621_GENERAL_CTL = 0x484,
115 PDC_20621_PAGE_SIZE = (32 * 1024),
116
117 /* chosen, not constant, values; we design our own DIMM mem map */
118 PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */
119 PDC_20621_DIMM_BASE = 0x00200000,
120 PDC_20621_DIMM_DATA = (64 * 1024),
121 PDC_DIMM_DATA_STEP = (256 * 1024),
122 PDC_DIMM_WINDOW_STEP = (8 * 1024),
123 PDC_DIMM_HOST_PRD = (6 * 1024),
124 PDC_DIMM_HOST_PKT = (128 * 0),
125 PDC_DIMM_HPKT_PRD = (128 * 1),
126 PDC_DIMM_ATA_PKT = (128 * 2),
127 PDC_DIMM_APKT_PRD = (128 * 3),
128 PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128,
129 PDC_PAGE_WINDOW = 0x40,
130 PDC_PAGE_DATA = PDC_PAGE_WINDOW +
131 (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE),
132 PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE,
133
134 PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */
135
136 PDC_20621_ERR_MASK = (1<<19) | (1<<20) | (1<<21) | (1<<22) |
137 (1<<23),
138
139 board_20621 = 0, /* FastTrak S150 SX4 */
140
141 PDC_MASK_INT = (1 << 10), /* HDMA/ATA mask int */
142 PDC_RESET = (1 << 11), /* HDMA/ATA reset */
143 PDC_DMA_ENABLE = (1 << 7), /* DMA start/stop */
144
145 PDC_MAX_HDMA = 32,
146 PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1),
147
148 PDC_DIMM0_SPD_DEV_ADDRESS = 0x50,
149 PDC_DIMM1_SPD_DEV_ADDRESS = 0x51,
150 PDC_I2C_CONTROL = 0x48,
151 PDC_I2C_ADDR_DATA = 0x4C,
152 PDC_DIMM0_CONTROL = 0x80,
153 PDC_DIMM1_CONTROL = 0x84,
154 PDC_SDRAM_CONTROL = 0x88,
155 PDC_I2C_WRITE = 0, /* master -> slave */
156 PDC_I2C_READ = (1 << 6), /* master <- slave */
157 PDC_I2C_START = (1 << 7), /* start I2C proto */
158 PDC_I2C_MASK_INT = (1 << 5), /* mask I2C interrupt */
159 PDC_I2C_COMPLETE = (1 << 16), /* I2C normal compl. */
160 PDC_I2C_NO_ACK = (1 << 20), /* slave no-ack addr */
161 PDC_DIMM_SPD_SUBADDRESS_START = 0x00,
162 PDC_DIMM_SPD_SUBADDRESS_END = 0x7F,
163 PDC_DIMM_SPD_ROW_NUM = 3,
164 PDC_DIMM_SPD_COLUMN_NUM = 4,
165 PDC_DIMM_SPD_MODULE_ROW = 5,
166 PDC_DIMM_SPD_TYPE = 11,
167 PDC_DIMM_SPD_FRESH_RATE = 12,
168 PDC_DIMM_SPD_BANK_NUM = 17,
169 PDC_DIMM_SPD_CAS_LATENCY = 18,
170 PDC_DIMM_SPD_ATTRIBUTE = 21,
171 PDC_DIMM_SPD_ROW_PRE_CHARGE = 27,
172 PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28,
173 PDC_DIMM_SPD_RAS_CAS_DELAY = 29,
174 PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30,
175 PDC_DIMM_SPD_SYSTEM_FREQ = 126,
176 PDC_CTL_STATUS = 0x08,
177 PDC_DIMM_WINDOW_CTLR = 0x0C,
178 PDC_TIME_CONTROL = 0x3C,
179 PDC_TIME_PERIOD = 0x40,
180 PDC_TIME_COUNTER = 0x44,
181 PDC_GENERAL_CTLR = 0x484,
182 PCI_PLL_INIT = 0x8A531824,
183 PCI_X_TCOUNT = 0xEE1E5CFF,
184
185 /* PDC_TIME_CONTROL bits */
186 PDC_TIMER_BUZZER = (1 << 10),
187 PDC_TIMER_MODE_PERIODIC = 0, /* bits 9:8 == 00 */
188 PDC_TIMER_MODE_ONCE = (1 << 8), /* bits 9:8 == 01 */
189 PDC_TIMER_ENABLE = (1 << 7),
190 PDC_TIMER_MASK_INT = (1 << 5),
191 PDC_TIMER_SEQ_MASK = 0x1f, /* SEQ ID for timer */
192 PDC_TIMER_DEFAULT = PDC_TIMER_MODE_ONCE |
193 PDC_TIMER_ENABLE |
194 PDC_TIMER_MASK_INT,
195 };
196
197 #define ECC_ERASE_BUF_SZ (128 * 1024)
198
199 struct pdc_port_priv {
200 u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512];
201 u8 *pkt;
202 dma_addr_t pkt_dma;
203 };
204
205 struct pdc_host_priv {
206 unsigned int doing_hdma;
207 unsigned int hdma_prod;
208 unsigned int hdma_cons;
209 struct {
210 struct ata_queued_cmd *qc;
211 unsigned int seq;
212 unsigned long pkt_ofs;
213 } hdma[32];
214 };
215
216
217 static int pdc_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
218 static void pdc_error_handler(struct ata_port *ap);
219 static void pdc_freeze(struct ata_port *ap);
220 static void pdc_thaw(struct ata_port *ap);
221 static int pdc_port_start(struct ata_port *ap);
222 static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
223 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
224 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
225 static unsigned int pdc20621_dimm_init(struct ata_host *host);
226 static int pdc20621_detect_dimm(struct ata_host *host);
227 static unsigned int pdc20621_i2c_read(struct ata_host *host,
228 u32 device, u32 subaddr, u32 *pdata);
229 static int pdc20621_prog_dimm0(struct ata_host *host);
230 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
231 #ifdef ATA_VERBOSE_DEBUG
232 static void pdc20621_get_from_dimm(struct ata_host *host,
233 void *psource, u32 offset, u32 size);
234 #endif
235 static void pdc20621_put_to_dimm(struct ata_host *host,
236 void *psource, u32 offset, u32 size);
237 static void pdc20621_irq_clear(struct ata_port *ap);
238 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc);
239 static int pdc_softreset(struct ata_link *link, unsigned int *class,
240 unsigned long deadline);
241 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc);
242 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc);
243
244
245 static struct scsi_host_template pdc_sata_sht = {
246 ATA_BASE_SHT(DRV_NAME),
247 .sg_tablesize = LIBATA_MAX_PRD,
248 .dma_boundary = ATA_DMA_BOUNDARY,
249 };
250
251 /* TODO: inherit from base port_ops after converting to new EH */
252 static struct ata_port_operations pdc_20621_ops = {
253 .inherits = &ata_sff_port_ops,
254
255 .check_atapi_dma = pdc_check_atapi_dma,
256 .qc_prep = pdc20621_qc_prep,
257 .qc_issue = pdc20621_qc_issue,
258
259 .freeze = pdc_freeze,
260 .thaw = pdc_thaw,
261 .softreset = pdc_softreset,
262 .error_handler = pdc_error_handler,
263 .lost_interrupt = ATA_OP_NULL,
264 .post_internal_cmd = pdc_post_internal_cmd,
265
266 .port_start = pdc_port_start,
267
268 .sff_tf_load = pdc_tf_load_mmio,
269 .sff_exec_command = pdc_exec_command_mmio,
270 .sff_irq_clear = pdc20621_irq_clear,
271 };
272
273 static const struct ata_port_info pdc_port_info[] = {
274 /* board_20621 */
275 {
276 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
277 ATA_FLAG_SRST | ATA_FLAG_MMIO |
278 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
279 .pio_mask = ATA_PIO4,
280 .mwdma_mask = ATA_MWDMA2,
281 .udma_mask = ATA_UDMA6,
282 .port_ops = &pdc_20621_ops,
283 },
284
285 };
286
287 static const struct pci_device_id pdc_sata_pci_tbl[] = {
288 { PCI_VDEVICE(PROMISE, 0x6622), board_20621 },
289
290 { } /* terminate list */
291 };
292
293 static struct pci_driver pdc_sata_pci_driver = {
294 .name = DRV_NAME,
295 .id_table = pdc_sata_pci_tbl,
296 .probe = pdc_sata_init_one,
297 .remove = ata_pci_remove_one,
298 };
299
300
301 static int pdc_port_start(struct ata_port *ap)
302 {
303 struct device *dev = ap->host->dev;
304 struct pdc_port_priv *pp;
305 int rc;
306
307 rc = ata_port_start(ap);
308 if (rc)
309 return rc;
310
311 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
312 if (!pp)
313 return -ENOMEM;
314
315 pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL);
316 if (!pp->pkt)
317 return -ENOMEM;
318
319 ap->private_data = pp;
320
321 return 0;
322 }
323
324 static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf,
325 unsigned int portno,
326 unsigned int total_len)
327 {
328 u32 addr;
329 unsigned int dw = PDC_DIMM_APKT_PRD >> 2;
330 __le32 *buf32 = (__le32 *) buf;
331
332 /* output ATA packet S/G table */
333 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
334 (PDC_DIMM_DATA_STEP * portno);
335 VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
336 buf32[dw] = cpu_to_le32(addr);
337 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
338
339 VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
340 PDC_20621_DIMM_BASE +
341 (PDC_DIMM_WINDOW_STEP * portno) +
342 PDC_DIMM_APKT_PRD,
343 buf32[dw], buf32[dw + 1]);
344 }
345
346 static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf,
347 unsigned int portno,
348 unsigned int total_len)
349 {
350 u32 addr;
351 unsigned int dw = PDC_DIMM_HPKT_PRD >> 2;
352 __le32 *buf32 = (__le32 *) buf;
353
354 /* output Host DMA packet S/G table */
355 addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
356 (PDC_DIMM_DATA_STEP * portno);
357
358 buf32[dw] = cpu_to_le32(addr);
359 buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
360
361 VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
362 PDC_20621_DIMM_BASE +
363 (PDC_DIMM_WINDOW_STEP * portno) +
364 PDC_DIMM_HPKT_PRD,
365 buf32[dw], buf32[dw + 1]);
366 }
367
368 static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
369 unsigned int devno, u8 *buf,
370 unsigned int portno)
371 {
372 unsigned int i, dw;
373 __le32 *buf32 = (__le32 *) buf;
374 u8 dev_reg;
375
376 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
377 (PDC_DIMM_WINDOW_STEP * portno) +
378 PDC_DIMM_APKT_PRD;
379 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
380
381 i = PDC_DIMM_ATA_PKT;
382
383 /*
384 * Set up ATA packet
385 */
386 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
387 buf[i++] = PDC_PKT_READ;
388 else if (tf->protocol == ATA_PROT_NODATA)
389 buf[i++] = PDC_PKT_NODATA;
390 else
391 buf[i++] = 0;
392 buf[i++] = 0; /* reserved */
393 buf[i++] = portno + 1; /* seq. id */
394 buf[i++] = 0xff; /* delay seq. id */
395
396 /* dimm dma S/G, and next-pkt */
397 dw = i >> 2;
398 if (tf->protocol == ATA_PROT_NODATA)
399 buf32[dw] = 0;
400 else
401 buf32[dw] = cpu_to_le32(dimm_sg);
402 buf32[dw + 1] = 0;
403 i += 8;
404
405 if (devno == 0)
406 dev_reg = ATA_DEVICE_OBS;
407 else
408 dev_reg = ATA_DEVICE_OBS | ATA_DEV1;
409
410 /* select device */
411 buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE;
412 buf[i++] = dev_reg;
413
414 /* device control register */
415 buf[i++] = (1 << 5) | PDC_REG_DEVCTL;
416 buf[i++] = tf->ctl;
417
418 return i;
419 }
420
421 static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
422 unsigned int portno)
423 {
424 unsigned int dw;
425 u32 tmp;
426 __le32 *buf32 = (__le32 *) buf;
427
428 unsigned int host_sg = PDC_20621_DIMM_BASE +
429 (PDC_DIMM_WINDOW_STEP * portno) +
430 PDC_DIMM_HOST_PRD;
431 unsigned int dimm_sg = PDC_20621_DIMM_BASE +
432 (PDC_DIMM_WINDOW_STEP * portno) +
433 PDC_DIMM_HPKT_PRD;
434 VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
435 VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
436
437 dw = PDC_DIMM_HOST_PKT >> 2;
438
439 /*
440 * Set up Host DMA packet
441 */
442 if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE)))
443 tmp = PDC_PKT_READ;
444 else
445 tmp = 0;
446 tmp |= ((portno + 1 + 4) << 16); /* seq. id */
447 tmp |= (0xff << 24); /* delay seq. id */
448 buf32[dw + 0] = cpu_to_le32(tmp);
449 buf32[dw + 1] = cpu_to_le32(host_sg);
450 buf32[dw + 2] = cpu_to_le32(dimm_sg);
451 buf32[dw + 3] = 0;
452
453 VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
454 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
455 PDC_DIMM_HOST_PKT,
456 buf32[dw + 0],
457 buf32[dw + 1],
458 buf32[dw + 2],
459 buf32[dw + 3]);
460 }
461
462 static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
463 {
464 struct scatterlist *sg;
465 struct ata_port *ap = qc->ap;
466 struct pdc_port_priv *pp = ap->private_data;
467 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
468 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
469 unsigned int portno = ap->port_no;
470 unsigned int i, si, idx, total_len = 0, sgt_len;
471 __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
472
473 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
474
475 VPRINTK("ata%u: ENTER\n", ap->print_id);
476
477 /* hard-code chip #0 */
478 mmio += PDC_CHIP0_OFS;
479
480 /*
481 * Build S/G table
482 */
483 idx = 0;
484 for_each_sg(qc->sg, sg, qc->n_elem, si) {
485 buf[idx++] = cpu_to_le32(sg_dma_address(sg));
486 buf[idx++] = cpu_to_le32(sg_dma_len(sg));
487 total_len += sg_dma_len(sg);
488 }
489 buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT);
490 sgt_len = idx * 4;
491
492 /*
493 * Build ATA, host DMA packets
494 */
495 pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
496 pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno);
497
498 pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len);
499 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
500
501 if (qc->tf.flags & ATA_TFLAG_LBA48)
502 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
503 else
504 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
505
506 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
507
508 /* copy three S/G tables and two packets to DIMM MMIO window */
509 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
510 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
511 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) +
512 PDC_DIMM_HOST_PRD,
513 &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len);
514
515 /* force host FIFO dump */
516 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
517
518 readl(dimm_mmio); /* MMIO PCI posting flush */
519
520 VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
521 }
522
523 static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
524 {
525 struct ata_port *ap = qc->ap;
526 struct pdc_port_priv *pp = ap->private_data;
527 void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
528 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
529 unsigned int portno = ap->port_no;
530 unsigned int i;
531
532 VPRINTK("ata%u: ENTER\n", ap->print_id);
533
534 /* hard-code chip #0 */
535 mmio += PDC_CHIP0_OFS;
536
537 i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno);
538
539 if (qc->tf.flags & ATA_TFLAG_LBA48)
540 i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i);
541 else
542 i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i);
543
544 pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i);
545
546 /* copy three S/G tables and two packets to DIMM MMIO window */
547 memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP),
548 &pp->dimm_buf, PDC_DIMM_HEADER_SZ);
549
550 /* force host FIFO dump */
551 writel(0x00000001, mmio + PDC_20621_GENERAL_CTL);
552
553 readl(dimm_mmio); /* MMIO PCI posting flush */
554
555 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
556 }
557
558 static void pdc20621_qc_prep(struct ata_queued_cmd *qc)
559 {
560 switch (qc->tf.protocol) {
561 case ATA_PROT_DMA:
562 pdc20621_dma_prep(qc);
563 break;
564 case ATA_PROT_NODATA:
565 pdc20621_nodata_prep(qc);
566 break;
567 default:
568 break;
569 }
570 }
571
572 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
573 unsigned int seq,
574 u32 pkt_ofs)
575 {
576 struct ata_port *ap = qc->ap;
577 struct ata_host *host = ap->host;
578 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
579
580 /* hard-code chip #0 */
581 mmio += PDC_CHIP0_OFS;
582
583 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
584 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
585
586 writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT);
587 readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */
588 }
589
590 static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
591 unsigned int seq,
592 u32 pkt_ofs)
593 {
594 struct ata_port *ap = qc->ap;
595 struct pdc_host_priv *pp = ap->host->private_data;
596 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
597
598 if (!pp->doing_hdma) {
599 __pdc20621_push_hdma(qc, seq, pkt_ofs);
600 pp->doing_hdma = 1;
601 return;
602 }
603
604 pp->hdma[idx].qc = qc;
605 pp->hdma[idx].seq = seq;
606 pp->hdma[idx].pkt_ofs = pkt_ofs;
607 pp->hdma_prod++;
608 }
609
610 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
611 {
612 struct ata_port *ap = qc->ap;
613 struct pdc_host_priv *pp = ap->host->private_data;
614 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
615
616 /* if nothing on queue, we're done */
617 if (pp->hdma_prod == pp->hdma_cons) {
618 pp->doing_hdma = 0;
619 return;
620 }
621
622 __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq,
623 pp->hdma[idx].pkt_ofs);
624 pp->hdma_cons++;
625 }
626
627 #ifdef ATA_VERBOSE_DEBUG
628 static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
629 {
630 struct ata_port *ap = qc->ap;
631 unsigned int port_no = ap->port_no;
632 void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
633
634 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
635 dimm_mmio += PDC_DIMM_HOST_PKT;
636
637 printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
638 printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
639 printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
640 printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
641 }
642 #else
643 static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
644 #endif /* ATA_VERBOSE_DEBUG */
645
646 static void pdc20621_packet_start(struct ata_queued_cmd *qc)
647 {
648 struct ata_port *ap = qc->ap;
649 struct ata_host *host = ap->host;
650 unsigned int port_no = ap->port_no;
651 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
652 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
653 u8 seq = (u8) (port_no + 1);
654 unsigned int port_ofs;
655
656 /* hard-code chip #0 */
657 mmio += PDC_CHIP0_OFS;
658
659 VPRINTK("ata%u: ENTER\n", ap->print_id);
660
661 wmb(); /* flush PRD, pkt writes */
662
663 port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
664
665 /* if writing, we (1) DMA to DIMM, then (2) do ATA command */
666 if (rw && qc->tf.protocol == ATA_PROT_DMA) {
667 seq += 4;
668
669 pdc20621_dump_hdma(qc);
670 pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
671 VPRINTK("queued ofs 0x%x (%u), seq %u\n",
672 port_ofs + PDC_DIMM_HOST_PKT,
673 port_ofs + PDC_DIMM_HOST_PKT,
674 seq);
675 } else {
676 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
677 readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
678
679 writel(port_ofs + PDC_DIMM_ATA_PKT,
680 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
681 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
682 VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
683 port_ofs + PDC_DIMM_ATA_PKT,
684 port_ofs + PDC_DIMM_ATA_PKT,
685 seq);
686 }
687 }
688
689 static unsigned int pdc20621_qc_issue(struct ata_queued_cmd *qc)
690 {
691 switch (qc->tf.protocol) {
692 case ATA_PROT_NODATA:
693 if (qc->tf.flags & ATA_TFLAG_POLLING)
694 break;
695 /*FALLTHROUGH*/
696 case ATA_PROT_DMA:
697 pdc20621_packet_start(qc);
698 return 0;
699
700 case ATAPI_PROT_DMA:
701 BUG();
702 break;
703
704 default:
705 break;
706 }
707
708 return ata_sff_qc_issue(qc);
709 }
710
711 static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
712 struct ata_queued_cmd *qc,
713 unsigned int doing_hdma,
714 void __iomem *mmio)
715 {
716 unsigned int port_no = ap->port_no;
717 unsigned int port_ofs =
718 PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
719 u8 status;
720 unsigned int handled = 0;
721
722 VPRINTK("ENTER\n");
723
724 if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
725 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
726
727 /* step two - DMA from DIMM to host */
728 if (doing_hdma) {
729 VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
730 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
731 /* get drive status; clear intr; complete txn */
732 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
733 ata_qc_complete(qc);
734 pdc20621_pop_hdma(qc);
735 }
736
737 /* step one - exec ATA command */
738 else {
739 u8 seq = (u8) (port_no + 1 + 4);
740 VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
741 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
742
743 /* submit hdma pkt */
744 pdc20621_dump_hdma(qc);
745 pdc20621_push_hdma(qc, seq,
746 port_ofs + PDC_DIMM_HOST_PKT);
747 }
748 handled = 1;
749
750 } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */
751
752 /* step one - DMA from host to DIMM */
753 if (doing_hdma) {
754 u8 seq = (u8) (port_no + 1);
755 VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
756 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
757
758 /* submit ata pkt */
759 writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
760 readl(mmio + PDC_20621_SEQCTL + (seq * 4));
761 writel(port_ofs + PDC_DIMM_ATA_PKT,
762 ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
763 readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
764 }
765
766 /* step two - execute ATA command */
767 else {
768 VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
769 readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
770 /* get drive status; clear intr; complete txn */
771 qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
772 ata_qc_complete(qc);
773 pdc20621_pop_hdma(qc);
774 }
775 handled = 1;
776
777 /* command completion, but no data xfer */
778 } else if (qc->tf.protocol == ATA_PROT_NODATA) {
779
780 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
781 DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
782 qc->err_mask |= ac_err_mask(status);
783 ata_qc_complete(qc);
784 handled = 1;
785
786 } else {
787 ap->stats.idle_irq++;
788 }
789
790 return handled;
791 }
792
793 static void pdc20621_irq_clear(struct ata_port *ap)
794 {
795 ioread8(ap->ioaddr.status_addr);
796 }
797
798 static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
799 {
800 struct ata_host *host = dev_instance;
801 struct ata_port *ap;
802 u32 mask = 0;
803 unsigned int i, tmp, port_no;
804 unsigned int handled = 0;
805 void __iomem *mmio_base;
806
807 VPRINTK("ENTER\n");
808
809 if (!host || !host->iomap[PDC_MMIO_BAR]) {
810 VPRINTK("QUICK EXIT\n");
811 return IRQ_NONE;
812 }
813
814 mmio_base = host->iomap[PDC_MMIO_BAR];
815
816 /* reading should also clear interrupts */
817 mmio_base += PDC_CHIP0_OFS;
818 mask = readl(mmio_base + PDC_20621_SEQMASK);
819 VPRINTK("mask == 0x%x\n", mask);
820
821 if (mask == 0xffffffff) {
822 VPRINTK("QUICK EXIT 2\n");
823 return IRQ_NONE;
824 }
825 mask &= 0xffff; /* only 16 tags possible */
826 if (!mask) {
827 VPRINTK("QUICK EXIT 3\n");
828 return IRQ_NONE;
829 }
830
831 spin_lock(&host->lock);
832
833 for (i = 1; i < 9; i++) {
834 port_no = i - 1;
835 if (port_no > 3)
836 port_no -= 4;
837 if (port_no >= host->n_ports)
838 ap = NULL;
839 else
840 ap = host->ports[port_no];
841 tmp = mask & (1 << i);
842 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
843 if (tmp && ap &&
844 !(ap->flags & ATA_FLAG_DISABLED)) {
845 struct ata_queued_cmd *qc;
846
847 qc = ata_qc_from_tag(ap, ap->link.active_tag);
848 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
849 handled += pdc20621_host_intr(ap, qc, (i > 4),
850 mmio_base);
851 }
852 }
853
854 spin_unlock(&host->lock);
855
856 VPRINTK("mask == 0x%x\n", mask);
857
858 VPRINTK("EXIT\n");
859
860 return IRQ_RETVAL(handled);
861 }
862
863 static void pdc_freeze(struct ata_port *ap)
864 {
865 void __iomem *mmio = ap->ioaddr.cmd_addr;
866 u32 tmp;
867
868 /* FIXME: if all 4 ATA engines are stopped, also stop HDMA engine */
869
870 tmp = readl(mmio + PDC_CTLSTAT);
871 tmp |= PDC_MASK_INT;
872 tmp &= ~PDC_DMA_ENABLE;
873 writel(tmp, mmio + PDC_CTLSTAT);
874 readl(mmio + PDC_CTLSTAT); /* flush */
875 }
876
877 static void pdc_thaw(struct ata_port *ap)
878 {
879 void __iomem *mmio = ap->ioaddr.cmd_addr;
880 u32 tmp;
881
882 /* FIXME: start HDMA engine, if zero ATA engines running */
883
884 /* clear IRQ */
885 ioread8(ap->ioaddr.status_addr);
886
887 /* turn IRQ back on */
888 tmp = readl(mmio + PDC_CTLSTAT);
889 tmp &= ~PDC_MASK_INT;
890 writel(tmp, mmio + PDC_CTLSTAT);
891 readl(mmio + PDC_CTLSTAT); /* flush */
892 }
893
894 static void pdc_reset_port(struct ata_port *ap)
895 {
896 void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
897 unsigned int i;
898 u32 tmp;
899
900 /* FIXME: handle HDMA copy engine */
901
902 for (i = 11; i > 0; i--) {
903 tmp = readl(mmio);
904 if (tmp & PDC_RESET)
905 break;
906
907 udelay(100);
908
909 tmp |= PDC_RESET;
910 writel(tmp, mmio);
911 }
912
913 tmp &= ~PDC_RESET;
914 writel(tmp, mmio);
915 readl(mmio); /* flush */
916 }
917
918 static int pdc_softreset(struct ata_link *link, unsigned int *class,
919 unsigned long deadline)
920 {
921 pdc_reset_port(link->ap);
922 return ata_sff_softreset(link, class, deadline);
923 }
924
925 static void pdc_error_handler(struct ata_port *ap)
926 {
927 if (!(ap->pflags & ATA_PFLAG_FROZEN))
928 pdc_reset_port(ap);
929
930 ata_std_error_handler(ap);
931 }
932
933 static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
934 {
935 struct ata_port *ap = qc->ap;
936
937 /* make DMA engine forget about the failed command */
938 if (qc->flags & ATA_QCFLAG_FAILED)
939 pdc_reset_port(ap);
940 }
941
942 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc)
943 {
944 u8 *scsicmd = qc->scsicmd->cmnd;
945 int pio = 1; /* atapi dma off by default */
946
947 /* Whitelist commands that may use DMA. */
948 switch (scsicmd[0]) {
949 case WRITE_12:
950 case WRITE_10:
951 case WRITE_6:
952 case READ_12:
953 case READ_10:
954 case READ_6:
955 case 0xad: /* READ_DVD_STRUCTURE */
956 case 0xbe: /* READ_CD */
957 pio = 0;
958 }
959 /* -45150 (FFFF4FA2) to -1 (FFFFFFFF) shall use PIO mode */
960 if (scsicmd[0] == WRITE_10) {
961 unsigned int lba =
962 (scsicmd[2] << 24) |
963 (scsicmd[3] << 16) |
964 (scsicmd[4] << 8) |
965 scsicmd[5];
966 if (lba >= 0xFFFF4FA2)
967 pio = 1;
968 }
969 return pio;
970 }
971
972 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
973 {
974 WARN_ON(tf->protocol == ATA_PROT_DMA ||
975 tf->protocol == ATAPI_PROT_DMA);
976 ata_sff_tf_load(ap, tf);
977 }
978
979
980 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
981 {
982 WARN_ON(tf->protocol == ATA_PROT_DMA ||
983 tf->protocol == ATAPI_PROT_DMA);
984 ata_sff_exec_command(ap, tf);
985 }
986
987
988 static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
989 {
990 port->cmd_addr = base;
991 port->data_addr = base;
992 port->feature_addr =
993 port->error_addr = base + 0x4;
994 port->nsect_addr = base + 0x8;
995 port->lbal_addr = base + 0xc;
996 port->lbam_addr = base + 0x10;
997 port->lbah_addr = base + 0x14;
998 port->device_addr = base + 0x18;
999 port->command_addr =
1000 port->status_addr = base + 0x1c;
1001 port->altstatus_addr =
1002 port->ctl_addr = base + 0x38;
1003 }
1004
1005
1006 #ifdef ATA_VERBOSE_DEBUG
1007 static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
1008 u32 offset, u32 size)
1009 {
1010 u32 window_size;
1011 u16 idx;
1012 u8 page_mask;
1013 long dist;
1014 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1015 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1016
1017 /* hard-code chip #0 */
1018 mmio += PDC_CHIP0_OFS;
1019
1020 page_mask = 0x00;
1021 window_size = 0x2000 * 4; /* 32K byte uchar size */
1022 idx = (u16) (offset / window_size);
1023
1024 writel(0x01, mmio + PDC_GENERAL_CTLR);
1025 readl(mmio + PDC_GENERAL_CTLR);
1026 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1027 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1028
1029 offset -= (idx * window_size);
1030 idx++;
1031 dist = ((long) (window_size - (offset + size))) >= 0 ? size :
1032 (long) (window_size - offset);
1033 memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4),
1034 dist);
1035
1036 psource += dist;
1037 size -= dist;
1038 for (; (long) size >= (long) window_size ;) {
1039 writel(0x01, mmio + PDC_GENERAL_CTLR);
1040 readl(mmio + PDC_GENERAL_CTLR);
1041 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1042 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1043 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1044 window_size / 4);
1045 psource += window_size;
1046 size -= window_size;
1047 idx++;
1048 }
1049
1050 if (size) {
1051 writel(0x01, mmio + PDC_GENERAL_CTLR);
1052 readl(mmio + PDC_GENERAL_CTLR);
1053 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1054 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1055 memcpy_fromio((char *) psource, (char *) (dimm_mmio),
1056 size / 4);
1057 }
1058 }
1059 #endif
1060
1061
1062 static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
1063 u32 offset, u32 size)
1064 {
1065 u32 window_size;
1066 u16 idx;
1067 u8 page_mask;
1068 long dist;
1069 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1070 void __iomem *dimm_mmio = host->iomap[PDC_DIMM_BAR];
1071
1072 /* hard-code chip #0 */
1073 mmio += PDC_CHIP0_OFS;
1074
1075 page_mask = 0x00;
1076 window_size = 0x2000 * 4; /* 32K byte uchar size */
1077 idx = (u16) (offset / window_size);
1078
1079 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1080 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1081 offset -= (idx * window_size);
1082 idx++;
1083 dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
1084 (long) (window_size - offset);
1085 memcpy_toio(dimm_mmio + offset / 4, psource, dist);
1086 writel(0x01, mmio + PDC_GENERAL_CTLR);
1087 readl(mmio + PDC_GENERAL_CTLR);
1088
1089 psource += dist;
1090 size -= dist;
1091 for (; (long) size >= (long) window_size ;) {
1092 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1093 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1094 memcpy_toio(dimm_mmio, psource, window_size / 4);
1095 writel(0x01, mmio + PDC_GENERAL_CTLR);
1096 readl(mmio + PDC_GENERAL_CTLR);
1097 psource += window_size;
1098 size -= window_size;
1099 idx++;
1100 }
1101
1102 if (size) {
1103 writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR);
1104 readl(mmio + PDC_DIMM_WINDOW_CTLR);
1105 memcpy_toio(dimm_mmio, psource, size / 4);
1106 writel(0x01, mmio + PDC_GENERAL_CTLR);
1107 readl(mmio + PDC_GENERAL_CTLR);
1108 }
1109 }
1110
1111
1112 static unsigned int pdc20621_i2c_read(struct ata_host *host, u32 device,
1113 u32 subaddr, u32 *pdata)
1114 {
1115 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1116 u32 i2creg = 0;
1117 u32 status;
1118 u32 count = 0;
1119
1120 /* hard-code chip #0 */
1121 mmio += PDC_CHIP0_OFS;
1122
1123 i2creg |= device << 24;
1124 i2creg |= subaddr << 16;
1125
1126 /* Set the device and subaddress */
1127 writel(i2creg, mmio + PDC_I2C_ADDR_DATA);
1128 readl(mmio + PDC_I2C_ADDR_DATA);
1129
1130 /* Write Control to perform read operation, mask int */
1131 writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT,
1132 mmio + PDC_I2C_CONTROL);
1133
1134 for (count = 0; count <= 1000; count ++) {
1135 status = readl(mmio + PDC_I2C_CONTROL);
1136 if (status & PDC_I2C_COMPLETE) {
1137 status = readl(mmio + PDC_I2C_ADDR_DATA);
1138 break;
1139 } else if (count == 1000)
1140 return 0;
1141 }
1142
1143 *pdata = (status >> 8) & 0x000000ff;
1144 return 1;
1145 }
1146
1147
1148 static int pdc20621_detect_dimm(struct ata_host *host)
1149 {
1150 u32 data = 0;
1151 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1152 PDC_DIMM_SPD_SYSTEM_FREQ, &data)) {
1153 if (data == 100)
1154 return 100;
1155 } else
1156 return 0;
1157
1158 if (pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) {
1159 if (data <= 0x75)
1160 return 133;
1161 } else
1162 return 0;
1163
1164 return 0;
1165 }
1166
1167
1168 static int pdc20621_prog_dimm0(struct ata_host *host)
1169 {
1170 u32 spd0[50];
1171 u32 data = 0;
1172 int size, i;
1173 u8 bdimmsize;
1174 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1175 static const struct {
1176 unsigned int reg;
1177 unsigned int ofs;
1178 } pdc_i2c_read_data [] = {
1179 { PDC_DIMM_SPD_TYPE, 11 },
1180 { PDC_DIMM_SPD_FRESH_RATE, 12 },
1181 { PDC_DIMM_SPD_COLUMN_NUM, 4 },
1182 { PDC_DIMM_SPD_ATTRIBUTE, 21 },
1183 { PDC_DIMM_SPD_ROW_NUM, 3 },
1184 { PDC_DIMM_SPD_BANK_NUM, 17 },
1185 { PDC_DIMM_SPD_MODULE_ROW, 5 },
1186 { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 },
1187 { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 },
1188 { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 },
1189 { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 },
1190 { PDC_DIMM_SPD_CAS_LATENCY, 18 },
1191 };
1192
1193 /* hard-code chip #0 */
1194 mmio += PDC_CHIP0_OFS;
1195
1196 for (i = 0; i < ARRAY_SIZE(pdc_i2c_read_data); i++)
1197 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1198 pdc_i2c_read_data[i].reg,
1199 &spd0[pdc_i2c_read_data[i].ofs]);
1200
1201 data |= (spd0[4] - 8) | ((spd0[21] != 0) << 3) | ((spd0[3]-11) << 4);
1202 data |= ((spd0[17] / 4) << 6) | ((spd0[5] / 2) << 7) |
1203 ((((spd0[27] + 9) / 10) - 1) << 8) ;
1204 data |= (((((spd0[29] > spd0[28])
1205 ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10;
1206 data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12;
1207
1208 if (spd0[18] & 0x08)
1209 data |= ((0x03) << 14);
1210 else if (spd0[18] & 0x04)
1211 data |= ((0x02) << 14);
1212 else if (spd0[18] & 0x01)
1213 data |= ((0x01) << 14);
1214 else
1215 data |= (0 << 14);
1216
1217 /*
1218 Calculate the size of bDIMMSize (power of 2) and
1219 merge the DIMM size by program start/end address.
1220 */
1221
1222 bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3;
1223 size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */
1224 data |= (((size / 16) - 1) << 16);
1225 data |= (0 << 23);
1226 data |= 8;
1227 writel(data, mmio + PDC_DIMM0_CONTROL);
1228 readl(mmio + PDC_DIMM0_CONTROL);
1229 return size;
1230 }
1231
1232
1233 static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
1234 {
1235 u32 data, spd0;
1236 int error, i;
1237 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1238
1239 /* hard-code chip #0 */
1240 mmio += PDC_CHIP0_OFS;
1241
1242 /*
1243 Set To Default : DIMM Module Global Control Register (0x022259F1)
1244 DIMM Arbitration Disable (bit 20)
1245 DIMM Data/Control Output Driving Selection (bit12 - bit15)
1246 Refresh Enable (bit 17)
1247 */
1248
1249 data = 0x022259F1;
1250 writel(data, mmio + PDC_SDRAM_CONTROL);
1251 readl(mmio + PDC_SDRAM_CONTROL);
1252
1253 /* Turn on for ECC */
1254 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1255 PDC_DIMM_SPD_TYPE, &spd0);
1256 if (spd0 == 0x02) {
1257 data |= (0x01 << 16);
1258 writel(data, mmio + PDC_SDRAM_CONTROL);
1259 readl(mmio + PDC_SDRAM_CONTROL);
1260 printk(KERN_ERR "Local DIMM ECC Enabled\n");
1261 }
1262
1263 /* DIMM Initialization Select/Enable (bit 18/19) */
1264 data &= (~(1<<18));
1265 data |= (1<<19);
1266 writel(data, mmio + PDC_SDRAM_CONTROL);
1267
1268 error = 1;
1269 for (i = 1; i <= 10; i++) { /* polling ~5 secs */
1270 data = readl(mmio + PDC_SDRAM_CONTROL);
1271 if (!(data & (1<<19))) {
1272 error = 0;
1273 break;
1274 }
1275 msleep(i*100);
1276 }
1277 return error;
1278 }
1279
1280
1281 static unsigned int pdc20621_dimm_init(struct ata_host *host)
1282 {
1283 int speed, size, length;
1284 u32 addr, spd0, pci_status;
1285 u32 time_period = 0;
1286 u32 tcount = 0;
1287 u32 ticks = 0;
1288 u32 clock = 0;
1289 u32 fparam = 0;
1290 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1291
1292 /* hard-code chip #0 */
1293 mmio += PDC_CHIP0_OFS;
1294
1295 /* Initialize PLL based upon PCI Bus Frequency */
1296
1297 /* Initialize Time Period Register */
1298 writel(0xffffffff, mmio + PDC_TIME_PERIOD);
1299 time_period = readl(mmio + PDC_TIME_PERIOD);
1300 VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
1301
1302 /* Enable timer */
1303 writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
1304 readl(mmio + PDC_TIME_CONTROL);
1305
1306 /* Wait 3 seconds */
1307 msleep(3000);
1308
1309 /*
1310 When timer is enabled, counter is decreased every internal
1311 clock cycle.
1312 */
1313
1314 tcount = readl(mmio + PDC_TIME_COUNTER);
1315 VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
1316
1317 /*
1318 If SX4 is on PCI-X bus, after 3 seconds, the timer counter
1319 register should be >= (0xffffffff - 3x10^8).
1320 */
1321 if (tcount >= PCI_X_TCOUNT) {
1322 ticks = (time_period - tcount);
1323 VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
1324
1325 clock = (ticks / 300000);
1326 VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
1327
1328 clock = (clock * 33);
1329 VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
1330
1331 /* PLL F Param (bit 22:16) */
1332 fparam = (1400000 / clock) - 2;
1333 VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
1334
1335 /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
1336 pci_status = (0x8a001824 | (fparam << 16));
1337 } else
1338 pci_status = PCI_PLL_INIT;
1339
1340 /* Initialize PLL. */
1341 VPRINTK("pci_status: 0x%x\n", pci_status);
1342 writel(pci_status, mmio + PDC_CTL_STATUS);
1343 readl(mmio + PDC_CTL_STATUS);
1344
1345 /*
1346 Read SPD of DIMM by I2C interface,
1347 and program the DIMM Module Controller.
1348 */
1349 if (!(speed = pdc20621_detect_dimm(host))) {
1350 printk(KERN_ERR "Detect Local DIMM Fail\n");
1351 return 1; /* DIMM error */
1352 }
1353 VPRINTK("Local DIMM Speed = %d\n", speed);
1354
1355 /* Programming DIMM0 Module Control Register (index_CID0:80h) */
1356 size = pdc20621_prog_dimm0(host);
1357 VPRINTK("Local DIMM Size = %dMB\n", size);
1358
1359 /* Programming DIMM Module Global Control Register (index_CID0:88h) */
1360 if (pdc20621_prog_dimm_global(host)) {
1361 printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
1362 return 1;
1363 }
1364
1365 #ifdef ATA_VERBOSE_DEBUG
1366 {
1367 u8 test_parttern1[40] =
1368 {0x55,0xAA,'P','r','o','m','i','s','e',' ',
1369 'N','o','t',' ','Y','e','t',' ',
1370 'D','e','f','i','n','e','d',' ',
1371 '1','.','1','0',
1372 '9','8','0','3','1','6','1','2',0,0};
1373 u8 test_parttern2[40] = {0};
1374
1375 pdc20621_put_to_dimm(host, test_parttern2, 0x10040, 40);
1376 pdc20621_put_to_dimm(host, test_parttern2, 0x40, 40);
1377
1378 pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
1379 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1380 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1381 test_parttern2[1], &(test_parttern2[2]));
1382 pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
1383 40);
1384 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1385 test_parttern2[1], &(test_parttern2[2]));
1386
1387 pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
1388 pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
1389 printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
1390 test_parttern2[1], &(test_parttern2[2]));
1391 }
1392 #endif
1393
1394 /* ECC initiliazation. */
1395
1396 pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
1397 PDC_DIMM_SPD_TYPE, &spd0);
1398 if (spd0 == 0x02) {
1399 void *buf;
1400 VPRINTK("Start ECC initialization\n");
1401 addr = 0;
1402 length = size * 1024 * 1024;
1403 buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
1404 while (addr < length) {
1405 pdc20621_put_to_dimm(host, buf, addr,
1406 ECC_ERASE_BUF_SZ);
1407 addr += ECC_ERASE_BUF_SZ;
1408 }
1409 kfree(buf);
1410 VPRINTK("Finish ECC initialization\n");
1411 }
1412 return 0;
1413 }
1414
1415
1416 static void pdc_20621_init(struct ata_host *host)
1417 {
1418 u32 tmp;
1419 void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
1420
1421 /* hard-code chip #0 */
1422 mmio += PDC_CHIP0_OFS;
1423
1424 /*
1425 * Select page 0x40 for our 32k DIMM window
1426 */
1427 tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000;
1428 tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */
1429 writel(tmp, mmio + PDC_20621_DIMM_WINDOW);
1430
1431 /*
1432 * Reset Host DMA
1433 */
1434 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1435 tmp |= PDC_RESET;
1436 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1437 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1438
1439 udelay(10);
1440
1441 tmp = readl(mmio + PDC_HDMA_CTLSTAT);
1442 tmp &= ~PDC_RESET;
1443 writel(tmp, mmio + PDC_HDMA_CTLSTAT);
1444 readl(mmio + PDC_HDMA_CTLSTAT); /* flush */
1445 }
1446
1447 static int pdc_sata_init_one(struct pci_dev *pdev,
1448 const struct pci_device_id *ent)
1449 {
1450 static int printed_version;
1451 const struct ata_port_info *ppi[] =
1452 { &pdc_port_info[ent->driver_data], NULL };
1453 struct ata_host *host;
1454 struct pdc_host_priv *hpriv;
1455 int i, rc;
1456
1457 if (!printed_version++)
1458 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1459
1460 /* allocate host */
1461 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
1462 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
1463 if (!host || !hpriv)
1464 return -ENOMEM;
1465
1466 host->private_data = hpriv;
1467
1468 /* acquire resources and fill host */
1469 rc = pcim_enable_device(pdev);
1470 if (rc)
1471 return rc;
1472
1473 rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
1474 DRV_NAME);
1475 if (rc == -EBUSY)
1476 pcim_pin_device(pdev);
1477 if (rc)
1478 return rc;
1479 host->iomap = pcim_iomap_table(pdev);
1480
1481 for (i = 0; i < 4; i++) {
1482 struct ata_port *ap = host->ports[i];
1483 void __iomem *base = host->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
1484 unsigned int offset = 0x200 + i * 0x80;
1485
1486 pdc_sata_setup_port(&ap->ioaddr, base + offset);
1487
1488 ata_port_pbar_desc(ap, PDC_MMIO_BAR, -1, "mmio");
1489 ata_port_pbar_desc(ap, PDC_DIMM_BAR, -1, "dimm");
1490 ata_port_pbar_desc(ap, PDC_MMIO_BAR, offset, "port");
1491 }
1492
1493 /* configure and activate */
1494 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1495 if (rc)
1496 return rc;
1497 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1498 if (rc)
1499 return rc;
1500
1501 if (pdc20621_dimm_init(host))
1502 return -ENOMEM;
1503 pdc_20621_init(host);
1504
1505 pci_set_master(pdev);
1506 return ata_host_activate(host, pdev->irq, pdc20621_interrupt,
1507 IRQF_SHARED, &pdc_sata_sht);
1508 }
1509
1510
1511 static int __init pdc_sata_init(void)
1512 {
1513 return pci_register_driver(&pdc_sata_pci_driver);
1514 }
1515
1516
1517 static void __exit pdc_sata_exit(void)
1518 {
1519 pci_unregister_driver(&pdc_sata_pci_driver);
1520 }
1521
1522
1523 MODULE_AUTHOR("Jeff Garzik");
1524 MODULE_DESCRIPTION("Promise SATA low-level driver");
1525 MODULE_LICENSE("GPL");
1526 MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl);
1527 MODULE_VERSION(DRV_VERSION);
1528
1529 module_init(pdc_sata_init);
1530 module_exit(pdc_sata_exit);