1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/blkdev.h>
5 #include <scsi/scsi_host.h>
7 #include <linux/libata.h>
10 #include <asm/ecard.h>
12 #define DRV_NAME "pata_icside"
14 #define ICS_IDENT_OFFSET 0x2280
16 #define ICS_ARCIN_V5_INTRSTAT 0x0000
17 #define ICS_ARCIN_V5_INTROFFSET 0x0004
19 #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
20 #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
21 #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
22 #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
25 unsigned int dataoffset
;
26 unsigned int ctrloffset
;
27 unsigned int stepping
;
30 static const struct portinfo pata_icside_portinfo_v5
= {
36 static const struct portinfo pata_icside_portinfo_v6_1
= {
42 static const struct portinfo pata_icside_portinfo_v6_2
= {
48 #define PATA_ICSIDE_MAX_SG 128
50 struct pata_icside_state
{
51 void __iomem
*irq_port
;
52 void __iomem
*ioc_base
;
58 unsigned int speed
[ATA_MAX_DEVICES
];
60 struct scatterlist sg
[PATA_ICSIDE_MAX_SG
];
63 #define ICS_TYPE_A3IN 0
64 #define ICS_TYPE_A3USER 1
66 #define ICS_TYPE_V5 15
67 #define ICS_TYPE_NOTYPE ((unsigned int)-1)
69 /* ---------------- Version 5 PCB Support Functions --------------------- */
70 /* Prototype: pata_icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
71 * Purpose : enable interrupts from card
73 static void pata_icside_irqenable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
75 struct pata_icside_state
*state
= ec
->irq_data
;
77 writeb(0, state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
80 /* Prototype: pata_icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
81 * Purpose : disable interrupts from card
83 static void pata_icside_irqdisable_arcin_v5 (struct expansion_card
*ec
, int irqnr
)
85 struct pata_icside_state
*state
= ec
->irq_data
;
87 readb(state
->irq_port
+ ICS_ARCIN_V5_INTROFFSET
);
90 static const expansioncard_ops_t pata_icside_ops_arcin_v5
= {
91 .irqenable
= pata_icside_irqenable_arcin_v5
,
92 .irqdisable
= pata_icside_irqdisable_arcin_v5
,
96 /* ---------------- Version 6 PCB Support Functions --------------------- */
97 /* Prototype: pata_icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
98 * Purpose : enable interrupts from card
100 static void pata_icside_irqenable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
102 struct pata_icside_state
*state
= ec
->irq_data
;
103 void __iomem
*base
= state
->irq_port
;
105 if (!state
->port
[0].disabled
)
106 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_1
);
107 if (!state
->port
[1].disabled
)
108 writeb(0, base
+ ICS_ARCIN_V6_INTROFFSET_2
);
111 /* Prototype: pata_icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
112 * Purpose : disable interrupts from card
114 static void pata_icside_irqdisable_arcin_v6 (struct expansion_card
*ec
, int irqnr
)
116 struct pata_icside_state
*state
= ec
->irq_data
;
118 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_1
);
119 readb(state
->irq_port
+ ICS_ARCIN_V6_INTROFFSET_2
);
122 /* Prototype: pata_icside_irqprobe(struct expansion_card *ec)
123 * Purpose : detect an active interrupt from card
125 static int pata_icside_irqpending_arcin_v6(struct expansion_card
*ec
)
127 struct pata_icside_state
*state
= ec
->irq_data
;
129 return readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_1
) & 1 ||
130 readb(state
->irq_port
+ ICS_ARCIN_V6_INTRSTAT_2
) & 1;
133 static const expansioncard_ops_t pata_icside_ops_arcin_v6
= {
134 .irqenable
= pata_icside_irqenable_arcin_v6
,
135 .irqdisable
= pata_icside_irqdisable_arcin_v6
,
136 .irqpending
= pata_icside_irqpending_arcin_v6
,
143 * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
144 * There is only one DMA controller per card, which means that only
145 * one drive can be accessed at one time. NOTE! We do not enforce that
146 * here, but we rely on the main IDE driver spotting that both
147 * interfaces use the same IRQ, which should guarantee this.
151 * Configure the IOMD to give the appropriate timings for the transfer
152 * mode being requested. We take the advice of the ATA standards, and
153 * calculate the cycle time based on the transfer mode, and the EIDE
154 * MW DMA specs that the drive provides in the IDENTIFY command.
156 * We have the following IOMD DMA modes to choose from:
158 * Type Active Recovery Cycle
159 * A 250 (250) 312 (550) 562 (800)
160 * B 187 (200) 250 (550) 437 (750)
161 * C 125 (125) 125 (375) 250 (500)
162 * D 62 (50) 125 (375) 187 (425)
164 * (figures in brackets are actual measured timings on DIOR/DIOW)
166 * However, we also need to take care of the read/write active and
170 * Mode Active -- Recovery -- Cycle IOMD type
171 * MW0 215 50 215 480 A
175 static void pata_icside_set_dmamode(struct ata_port
*ap
, struct ata_device
*adev
)
177 struct pata_icside_state
*state
= ap
->host
->private_data
;
183 * DMA is based on a 16MHz clock
185 if (ata_timing_compute(adev
, adev
->dma_mode
, &t
, 1000, 1))
189 * Choose the IOMD cycle timing which ensure that the interface
190 * satisfies the measured active, recovery and cycle times.
192 if (t
.active
<= 50 && t
.recover
<= 375 && t
.cycle
<= 425)
193 iomd_type
= 'D', cycle
= 187;
194 else if (t
.active
<= 125 && t
.recover
<= 375 && t
.cycle
<= 500)
195 iomd_type
= 'C', cycle
= 250;
196 else if (t
.active
<= 200 && t
.recover
<= 550 && t
.cycle
<= 750)
197 iomd_type
= 'B', cycle
= 437;
199 iomd_type
= 'A', cycle
= 562;
201 ata_dev_printk(adev
, KERN_INFO
, "timings: act %dns rec %dns cyc %dns (%c)\n",
202 t
.active
, t
.recover
, t
.cycle
, iomd_type
);
204 state
->port
[ap
->port_no
].speed
[adev
->devno
] = cycle
;
207 static void pata_icside_bmdma_setup(struct ata_queued_cmd
*qc
)
209 struct ata_port
*ap
= qc
->ap
;
210 struct pata_icside_state
*state
= ap
->host
->private_data
;
211 struct scatterlist
*sg
, *rsg
= state
->sg
;
212 unsigned int write
= qc
->tf
.flags
& ATA_TFLAG_WRITE
;
215 * We are simplex; BUG if we try to fiddle with DMA
218 BUG_ON(dma_channel_active(state
->dma
));
221 * Copy ATAs scattered sg list into a contiguous array of sg
223 ata_for_each_sg(sg
, qc
) {
224 memcpy(rsg
, sg
, sizeof(*sg
));
229 * Route the DMA signals to the correct interface
231 writeb(state
->port
[ap
->port_no
].port_sel
, state
->ioc_base
);
233 set_dma_speed(state
->dma
, state
->port
[ap
->port_no
].speed
[qc
->dev
->devno
]);
234 set_dma_sg(state
->dma
, state
->sg
, rsg
- state
->sg
);
235 set_dma_mode(state
->dma
, write
? DMA_MODE_WRITE
: DMA_MODE_READ
);
237 /* issue r/w command */
238 ap
->ops
->exec_command(ap
, &qc
->tf
);
241 static void pata_icside_bmdma_start(struct ata_queued_cmd
*qc
)
243 struct ata_port
*ap
= qc
->ap
;
244 struct pata_icside_state
*state
= ap
->host
->private_data
;
246 BUG_ON(dma_channel_active(state
->dma
));
247 enable_dma(state
->dma
);
250 static void pata_icside_bmdma_stop(struct ata_queued_cmd
*qc
)
252 struct ata_port
*ap
= qc
->ap
;
253 struct pata_icside_state
*state
= ap
->host
->private_data
;
255 disable_dma(state
->dma
);
257 /* see ata_bmdma_stop */
261 static u8
pata_icside_bmdma_status(struct ata_port
*ap
)
263 struct pata_icside_state
*state
= ap
->host
->private_data
;
264 void __iomem
*irq_port
;
266 irq_port
= state
->irq_port
+ (ap
->port_no
? ICS_ARCIN_V6_INTRSTAT_2
:
267 ICS_ARCIN_V6_INTRSTAT_1
);
269 return readb(irq_port
) & 1 ? ATA_DMA_INTR
: 0;
272 static int icside_dma_init(struct ata_probe_ent
*ae
, struct expansion_card
*ec
)
274 struct pata_icside_state
*state
= ae
->private_data
;
277 for (i
= 0; i
< ATA_MAX_DEVICES
; i
++) {
278 state
->port
[0].speed
[i
] = 480;
279 state
->port
[1].speed
[i
] = 480;
282 if (ec
->dma
!= NO_DMA
&& !request_dma(ec
->dma
, DRV_NAME
)) {
283 state
->dma
= ec
->dma
;
284 ae
->mwdma_mask
= 0x07; /* MW0..2 */
291 static int pata_icside_port_start(struct ata_port
*ap
)
293 /* No PRD to alloc */
294 return ata_pad_alloc(ap
, ap
->dev
);
297 static struct scsi_host_template pata_icside_sht
= {
298 .module
= THIS_MODULE
,
300 .ioctl
= ata_scsi_ioctl
,
301 .queuecommand
= ata_scsi_queuecmd
,
302 .can_queue
= ATA_DEF_QUEUE
,
303 .this_id
= ATA_SHT_THIS_ID
,
304 .sg_tablesize
= PATA_ICSIDE_MAX_SG
,
305 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
306 .emulated
= ATA_SHT_EMULATED
,
307 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
308 .proc_name
= DRV_NAME
,
309 .dma_boundary
= ~0, /* no dma boundaries */
310 .slave_configure
= ata_scsi_slave_config
,
311 .slave_destroy
= ata_scsi_slave_destroy
,
312 .bios_param
= ata_std_bios_param
,
315 /* wish this was exported from libata-core */
316 static void ata_dummy_noret(struct ata_port
*port
)
321 * We need to shut down unused ports to prevent spurious interrupts.
322 * FIXME: the libata core doesn't call this function for PATA interfaces.
324 static void pata_icside_port_disable(struct ata_port
*ap
)
326 struct pata_icside_state
*state
= ap
->host
->private_data
;
328 ata_port_printk(ap
, KERN_ERR
, "disabling icside port\n");
330 ata_port_disable(ap
);
332 state
->port
[ap
->port_no
].disabled
= 1;
334 if (state
->type
== ICS_TYPE_V6
) {
336 * Disable interrupts from this port, otherwise we
337 * receive spurious interrupts from the floating
340 void __iomem
*irq_port
= state
->irq_port
+
341 (ap
->port_no
? ICS_ARCIN_V6_INTROFFSET_2
: ICS_ARCIN_V6_INTROFFSET_1
);
346 static u8
pata_icside_irq_ack(struct ata_port
*ap
, unsigned int chk_drq
)
348 unsigned int bits
= chk_drq
? ATA_BUSY
| ATA_DRQ
: ATA_BUSY
;
351 status
= ata_busy_wait(ap
, bits
, 1000);
354 printk(KERN_ERR
"abnormal status 0x%X\n", status
);
356 if (ata_msg_intr(ap
))
357 printk(KERN_INFO
"%s: irq ack: drv_stat 0x%X\n",
358 __FUNCTION__
, status
);
363 static struct ata_port_operations pata_icside_port_ops
= {
364 .port_disable
= pata_icside_port_disable
,
366 .set_dmamode
= pata_icside_set_dmamode
,
368 .tf_load
= ata_tf_load
,
369 .tf_read
= ata_tf_read
,
370 .exec_command
= ata_exec_command
,
371 .check_status
= ata_check_status
,
372 .dev_select
= ata_std_dev_select
,
374 .bmdma_setup
= pata_icside_bmdma_setup
,
375 .bmdma_start
= pata_icside_bmdma_start
,
377 .data_xfer
= ata_data_xfer_noirq
,
379 /* no need to build any PRD tables for DMA */
380 .qc_prep
= ata_noop_qc_prep
,
381 .qc_issue
= ata_qc_issue_prot
,
383 .freeze
= ata_bmdma_freeze
,
384 .thaw
= ata_bmdma_thaw
,
385 .error_handler
= ata_bmdma_error_handler
,
386 .post_internal_cmd
= pata_icside_bmdma_stop
,
388 .irq_handler
= ata_interrupt
,
389 .irq_clear
= ata_dummy_noret
,
390 .irq_on
= ata_irq_on
,
391 .irq_ack
= pata_icside_irq_ack
,
393 .port_start
= pata_icside_port_start
,
395 .bmdma_stop
= pata_icside_bmdma_stop
,
396 .bmdma_status
= pata_icside_bmdma_status
,
400 pata_icside_add_port(struct ata_probe_ent
*ae
, void __iomem
*base
,
401 const struct portinfo
*info
)
403 struct ata_ioports
*ioaddr
= &ae
->port
[ae
->n_ports
++];
404 void __iomem
*cmd
= base
+ info
->dataoffset
;
406 ioaddr
->cmd_addr
= cmd
;
407 ioaddr
->data_addr
= cmd
+ (ATA_REG_DATA
<< info
->stepping
);
408 ioaddr
->error_addr
= cmd
+ (ATA_REG_ERR
<< info
->stepping
);
409 ioaddr
->feature_addr
= cmd
+ (ATA_REG_FEATURE
<< info
->stepping
);
410 ioaddr
->nsect_addr
= cmd
+ (ATA_REG_NSECT
<< info
->stepping
);
411 ioaddr
->lbal_addr
= cmd
+ (ATA_REG_LBAL
<< info
->stepping
);
412 ioaddr
->lbam_addr
= cmd
+ (ATA_REG_LBAM
<< info
->stepping
);
413 ioaddr
->lbah_addr
= cmd
+ (ATA_REG_LBAH
<< info
->stepping
);
414 ioaddr
->device_addr
= cmd
+ (ATA_REG_DEVICE
<< info
->stepping
);
415 ioaddr
->status_addr
= cmd
+ (ATA_REG_STATUS
<< info
->stepping
);
416 ioaddr
->command_addr
= cmd
+ (ATA_REG_CMD
<< info
->stepping
);
418 ioaddr
->ctl_addr
= base
+ info
->ctrloffset
;
419 ioaddr
->altstatus_addr
= ioaddr
->ctl_addr
;
423 pata_icside_register_v5(struct ata_probe_ent
*ae
, struct expansion_card
*ec
)
425 struct pata_icside_state
*state
= ae
->private_data
;
428 base
= ecardm_iomap(info
->ec
, ECARD_RES_MEMC
, 0, 0);
432 state
->irq_port
= base
;
434 ec
->irqaddr
= base
+ ICS_ARCIN_V5_INTRSTAT
;
437 ecard_setirq(ec
, &pata_icside_ops_arcin_v5
, state
);
440 * Be on the safe side - disable interrupts
442 ec
->ops
->irqdisable(ec
, ec
->irq
);
444 pata_icside_add_port(ae
, base
, &pata_icside_portinfo_v5
);
450 pata_icside_register_v6(struct ata_probe_ent
*ae
, struct expansion_card
*ec
)
452 struct pata_icside_state
*state
= ae
->private_data
;
453 void __iomem
*ioc_base
, *easi_base
;
454 unsigned int sel
= 0;
456 ioc_base
= ecardm_iomap(ec
, ECARD_RES_IOCFAST
, 0, 0);
460 easi_base
= ioc_base
;
462 if (ecard_resource_flags(ec
, ECARD_RES_EASI
)) {
463 easi_base
= ecardm_iomap(ec
, ECARD_RES_EASI
, 0, 0);
468 * Enable access to the EASI region.
473 writeb(sel
, ioc_base
);
475 ecard_setirq(ec
, &pata_icside_ops_arcin_v6
, state
);
477 state
->irq_port
= easi_base
;
478 state
->ioc_base
= ioc_base
;
479 state
->port
[0].port_sel
= sel
;
480 state
->port
[1].port_sel
= sel
| 1;
483 * Be on the safe side - disable interrupts
485 ec
->ops
->irqdisable(ec
, ec
->irq
);
488 * Find and register the interfaces.
490 pata_icside_add_port(ae
, easi_base
, &pata_icside_portinfo_v6_1
);
491 pata_icside_add_port(ae
, easi_base
, &pata_icside_portinfo_v6_2
);
494 * FIXME: work around libata's aversion to calling port_disable.
495 * This permanently disables interrupts on port 0 - bad luck if
496 * you have a drive on that port.
498 state
->port
[0].disabled
= 1;
500 return icside_dma_init(ae
, ec
);
505 pata_icside_probe(struct expansion_card
*ec
, const struct ecard_id
*id
)
507 struct pata_icside_state
*state
;
508 struct ata_probe_ent ae
;
512 ret
= ecard_request_resources(ec
);
516 state
= kzalloc(sizeof(struct pata_icside_state
), GFP_KERNEL
);
522 state
->type
= ICS_TYPE_NOTYPE
;
525 idmem
= ecardm_iomap(ec
, ECARD_RES_IOCFAST
, 0, 0);
529 type
= readb(idmem
+ ICS_IDENT_OFFSET
) & 1;
530 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 4) & 1) << 1;
531 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 8) & 1) << 2;
532 type
|= (readb(idmem
+ ICS_IDENT_OFFSET
+ 12) & 1) << 3;
533 ecardm_iounmap(ec
, idmem
);
538 memset(&ae
, 0, sizeof(ae
));
539 INIT_LIST_HEAD(&ae
.node
);
541 ae
.port_ops
= &pata_icside_port_ops
;
542 ae
.sht
= &pata_icside_sht
;
545 ae
.port_flags
= ATA_FLAG_SLAVE_POSS
| ATA_FLAG_SRST
;
546 ae
._host_flags
= ATA_HOST_SIMPLEX
;
547 ae
.private_data
= state
;
549 switch (state
->type
) {
551 dev_warn(&ec
->dev
, "A3IN unsupported\n");
555 case ICS_TYPE_A3USER
:
556 dev_warn(&ec
->dev
, "A3USER unsupported\n");
561 ret
= pata_icside_register_v5(&ae
, ec
);
565 ret
= pata_icside_register_v6(&ae
, ec
);
569 dev_warn(&ec
->dev
, "unknown interface type\n");
575 ret
= ata_device_add(&ae
) == 0 ? -ENODEV
: 0;
582 ecard_release_resources(ec
);
587 static void pata_icside_shutdown(struct expansion_card
*ec
)
589 struct ata_host
*host
= ecard_get_drvdata(ec
);
593 * Disable interrupts from this card. We need to do
594 * this before disabling EASI since we may be accessing
595 * this register via that region.
597 local_irq_save(flags
);
598 ec
->ops
->irqdisable(ec
, ec
->irq
);
599 local_irq_restore(flags
);
602 * Reset the ROM pointer so that we can read the ROM
603 * after a soft reboot. This also disables access to
604 * the IDE taskfile via the EASI region.
607 struct pata_icside_state
*state
= host
->private_data
;
609 writeb(0, state
->ioc_base
);
613 static void __devexit
pata_icside_remove(struct expansion_card
*ec
)
615 struct ata_host
*host
= ecard_get_drvdata(ec
);
616 struct pata_icside_state
*state
= host
->private_data
;
618 ata_host_detach(host
);
620 pata_icside_shutdown(ec
);
623 * don't NULL out the drvdata - devres/libata wants it
624 * to free the ata_host structure.
626 if (state
->dma
!= NO_DMA
)
627 free_dma(state
->dma
);
630 ecard_release_resources(ec
);
633 static const struct ecard_id pata_icside_ids
[] = {
634 { MANU_ICS
, PROD_ICS_IDE
},
635 { MANU_ICS2
, PROD_ICS2_IDE
},
639 static struct ecard_driver pata_icside_driver
= {
640 .probe
= pata_icside_probe
,
641 .remove
= __devexit_p(pata_icside_remove
),
642 .shutdown
= pata_icside_shutdown
,
643 .id_table
= pata_icside_ids
,
649 static int __init
pata_icside_init(void)
651 return ecard_register_driver(&pata_icside_driver
);
654 static void __exit
pata_icside_exit(void)
656 ecard_remove_driver(&pata_icside_driver
);
659 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
660 MODULE_LICENSE("GPL");
661 MODULE_DESCRIPTION("ICS PATA driver");
663 module_init(pata_icside_init
);
664 module_exit(pata_icside_exit
);