Merge master.kernel.org:/pub/scm/linux/kernel/git/lethal/genesis-2.6 into devel-stable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / libata-sff.c
1 /*
2 * libata-sff.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/pci.h>
38 #include <linux/libata.h>
39 #include <linux/highmem.h>
40
41 #include "libata.h"
42
43 static struct workqueue_struct *ata_sff_wq;
44
45 const struct ata_port_operations ata_sff_port_ops = {
46 .inherits = &ata_base_port_ops,
47
48 .qc_prep = ata_noop_qc_prep,
49 .qc_issue = ata_sff_qc_issue,
50 .qc_fill_rtf = ata_sff_qc_fill_rtf,
51
52 .freeze = ata_sff_freeze,
53 .thaw = ata_sff_thaw,
54 .prereset = ata_sff_prereset,
55 .softreset = ata_sff_softreset,
56 .hardreset = sata_sff_hardreset,
57 .postreset = ata_sff_postreset,
58 .error_handler = ata_sff_error_handler,
59
60 .sff_dev_select = ata_sff_dev_select,
61 .sff_check_status = ata_sff_check_status,
62 .sff_tf_load = ata_sff_tf_load,
63 .sff_tf_read = ata_sff_tf_read,
64 .sff_exec_command = ata_sff_exec_command,
65 .sff_data_xfer = ata_sff_data_xfer,
66 .sff_drain_fifo = ata_sff_drain_fifo,
67
68 .lost_interrupt = ata_sff_lost_interrupt,
69 };
70 EXPORT_SYMBOL_GPL(ata_sff_port_ops);
71
72 /**
73 * ata_sff_check_status - Read device status reg & clear interrupt
74 * @ap: port where the device is
75 *
76 * Reads ATA taskfile status register for currently-selected device
77 * and return its value. This also clears pending interrupts
78 * from this device
79 *
80 * LOCKING:
81 * Inherited from caller.
82 */
83 u8 ata_sff_check_status(struct ata_port *ap)
84 {
85 return ioread8(ap->ioaddr.status_addr);
86 }
87 EXPORT_SYMBOL_GPL(ata_sff_check_status);
88
89 /**
90 * ata_sff_altstatus - Read device alternate status reg
91 * @ap: port where the device is
92 *
93 * Reads ATA taskfile alternate status register for
94 * currently-selected device and return its value.
95 *
96 * Note: may NOT be used as the check_altstatus() entry in
97 * ata_port_operations.
98 *
99 * LOCKING:
100 * Inherited from caller.
101 */
102 static u8 ata_sff_altstatus(struct ata_port *ap)
103 {
104 if (ap->ops->sff_check_altstatus)
105 return ap->ops->sff_check_altstatus(ap);
106
107 return ioread8(ap->ioaddr.altstatus_addr);
108 }
109
110 /**
111 * ata_sff_irq_status - Check if the device is busy
112 * @ap: port where the device is
113 *
114 * Determine if the port is currently busy. Uses altstatus
115 * if available in order to avoid clearing shared IRQ status
116 * when finding an IRQ source. Non ctl capable devices don't
117 * share interrupt lines fortunately for us.
118 *
119 * LOCKING:
120 * Inherited from caller.
121 */
122 static u8 ata_sff_irq_status(struct ata_port *ap)
123 {
124 u8 status;
125
126 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
127 status = ata_sff_altstatus(ap);
128 /* Not us: We are busy */
129 if (status & ATA_BUSY)
130 return status;
131 }
132 /* Clear INTRQ latch */
133 status = ap->ops->sff_check_status(ap);
134 return status;
135 }
136
137 /**
138 * ata_sff_sync - Flush writes
139 * @ap: Port to wait for.
140 *
141 * CAUTION:
142 * If we have an mmio device with no ctl and no altstatus
143 * method this will fail. No such devices are known to exist.
144 *
145 * LOCKING:
146 * Inherited from caller.
147 */
148
149 static void ata_sff_sync(struct ata_port *ap)
150 {
151 if (ap->ops->sff_check_altstatus)
152 ap->ops->sff_check_altstatus(ap);
153 else if (ap->ioaddr.altstatus_addr)
154 ioread8(ap->ioaddr.altstatus_addr);
155 }
156
157 /**
158 * ata_sff_pause - Flush writes and wait 400nS
159 * @ap: Port to pause for.
160 *
161 * CAUTION:
162 * If we have an mmio device with no ctl and no altstatus
163 * method this will fail. No such devices are known to exist.
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
169 void ata_sff_pause(struct ata_port *ap)
170 {
171 ata_sff_sync(ap);
172 ndelay(400);
173 }
174 EXPORT_SYMBOL_GPL(ata_sff_pause);
175
176 /**
177 * ata_sff_dma_pause - Pause before commencing DMA
178 * @ap: Port to pause for.
179 *
180 * Perform I/O fencing and ensure sufficient cycle delays occur
181 * for the HDMA1:0 transition
182 */
183
184 void ata_sff_dma_pause(struct ata_port *ap)
185 {
186 if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) {
187 /* An altstatus read will cause the needed delay without
188 messing up the IRQ status */
189 ata_sff_altstatus(ap);
190 return;
191 }
192 /* There are no DMA controllers without ctl. BUG here to ensure
193 we never violate the HDMA1:0 transition timing and risk
194 corruption. */
195 BUG();
196 }
197 EXPORT_SYMBOL_GPL(ata_sff_dma_pause);
198
199 /**
200 * ata_sff_busy_sleep - sleep until BSY clears, or timeout
201 * @ap: port containing status register to be polled
202 * @tmout_pat: impatience timeout in msecs
203 * @tmout: overall timeout in msecs
204 *
205 * Sleep until ATA Status register bit BSY clears,
206 * or a timeout occurs.
207 *
208 * LOCKING:
209 * Kernel thread context (may sleep).
210 *
211 * RETURNS:
212 * 0 on success, -errno otherwise.
213 */
214 int ata_sff_busy_sleep(struct ata_port *ap,
215 unsigned long tmout_pat, unsigned long tmout)
216 {
217 unsigned long timer_start, timeout;
218 u8 status;
219
220 status = ata_sff_busy_wait(ap, ATA_BUSY, 300);
221 timer_start = jiffies;
222 timeout = ata_deadline(timer_start, tmout_pat);
223 while (status != 0xff && (status & ATA_BUSY) &&
224 time_before(jiffies, timeout)) {
225 ata_msleep(ap, 50);
226 status = ata_sff_busy_wait(ap, ATA_BUSY, 3);
227 }
228
229 if (status != 0xff && (status & ATA_BUSY))
230 ata_port_printk(ap, KERN_WARNING,
231 "port is slow to respond, please be patient "
232 "(Status 0x%x)\n", status);
233
234 timeout = ata_deadline(timer_start, tmout);
235 while (status != 0xff && (status & ATA_BUSY) &&
236 time_before(jiffies, timeout)) {
237 ata_msleep(ap, 50);
238 status = ap->ops->sff_check_status(ap);
239 }
240
241 if (status == 0xff)
242 return -ENODEV;
243
244 if (status & ATA_BUSY) {
245 ata_port_printk(ap, KERN_ERR, "port failed to respond "
246 "(%lu secs, Status 0x%x)\n",
247 DIV_ROUND_UP(tmout, 1000), status);
248 return -EBUSY;
249 }
250
251 return 0;
252 }
253 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep);
254
255 static int ata_sff_check_ready(struct ata_link *link)
256 {
257 u8 status = link->ap->ops->sff_check_status(link->ap);
258
259 return ata_check_ready(status);
260 }
261
262 /**
263 * ata_sff_wait_ready - sleep until BSY clears, or timeout
264 * @link: SFF link to wait ready status for
265 * @deadline: deadline jiffies for the operation
266 *
267 * Sleep until ATA Status register bit BSY clears, or timeout
268 * occurs.
269 *
270 * LOCKING:
271 * Kernel thread context (may sleep).
272 *
273 * RETURNS:
274 * 0 on success, -errno otherwise.
275 */
276 int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline)
277 {
278 return ata_wait_ready(link, deadline, ata_sff_check_ready);
279 }
280 EXPORT_SYMBOL_GPL(ata_sff_wait_ready);
281
282 /**
283 * ata_sff_set_devctl - Write device control reg
284 * @ap: port where the device is
285 * @ctl: value to write
286 *
287 * Writes ATA taskfile device control register.
288 *
289 * Note: may NOT be used as the sff_set_devctl() entry in
290 * ata_port_operations.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295 static void ata_sff_set_devctl(struct ata_port *ap, u8 ctl)
296 {
297 if (ap->ops->sff_set_devctl)
298 ap->ops->sff_set_devctl(ap, ctl);
299 else
300 iowrite8(ctl, ap->ioaddr.ctl_addr);
301 }
302
303 /**
304 * ata_sff_dev_select - Select device 0/1 on ATA bus
305 * @ap: ATA channel to manipulate
306 * @device: ATA device (numbered from zero) to select
307 *
308 * Use the method defined in the ATA specification to
309 * make either device 0, or device 1, active on the
310 * ATA channel. Works with both PIO and MMIO.
311 *
312 * May be used as the dev_select() entry in ata_port_operations.
313 *
314 * LOCKING:
315 * caller.
316 */
317 void ata_sff_dev_select(struct ata_port *ap, unsigned int device)
318 {
319 u8 tmp;
320
321 if (device == 0)
322 tmp = ATA_DEVICE_OBS;
323 else
324 tmp = ATA_DEVICE_OBS | ATA_DEV1;
325
326 iowrite8(tmp, ap->ioaddr.device_addr);
327 ata_sff_pause(ap); /* needed; also flushes, for mmio */
328 }
329 EXPORT_SYMBOL_GPL(ata_sff_dev_select);
330
331 /**
332 * ata_dev_select - Select device 0/1 on ATA bus
333 * @ap: ATA channel to manipulate
334 * @device: ATA device (numbered from zero) to select
335 * @wait: non-zero to wait for Status register BSY bit to clear
336 * @can_sleep: non-zero if context allows sleeping
337 *
338 * Use the method defined in the ATA specification to
339 * make either device 0, or device 1, active on the
340 * ATA channel.
341 *
342 * This is a high-level version of ata_sff_dev_select(), which
343 * additionally provides the services of inserting the proper
344 * pauses and status polling, where needed.
345 *
346 * LOCKING:
347 * caller.
348 */
349 static void ata_dev_select(struct ata_port *ap, unsigned int device,
350 unsigned int wait, unsigned int can_sleep)
351 {
352 if (ata_msg_probe(ap))
353 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
354 "device %u, wait %u\n", device, wait);
355
356 if (wait)
357 ata_wait_idle(ap);
358
359 ap->ops->sff_dev_select(ap, device);
360
361 if (wait) {
362 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
363 ata_msleep(ap, 150);
364 ata_wait_idle(ap);
365 }
366 }
367
368 /**
369 * ata_sff_irq_on - Enable interrupts on a port.
370 * @ap: Port on which interrupts are enabled.
371 *
372 * Enable interrupts on a legacy IDE device using MMIO or PIO,
373 * wait for idle, clear any pending interrupts.
374 *
375 * Note: may NOT be used as the sff_irq_on() entry in
376 * ata_port_operations.
377 *
378 * LOCKING:
379 * Inherited from caller.
380 */
381 void ata_sff_irq_on(struct ata_port *ap)
382 {
383 struct ata_ioports *ioaddr = &ap->ioaddr;
384
385 if (ap->ops->sff_irq_on) {
386 ap->ops->sff_irq_on(ap);
387 return;
388 }
389
390 ap->ctl &= ~ATA_NIEN;
391 ap->last_ctl = ap->ctl;
392
393 if (ap->ops->sff_set_devctl || ioaddr->ctl_addr)
394 ata_sff_set_devctl(ap, ap->ctl);
395 ata_wait_idle(ap);
396
397 if (ap->ops->sff_irq_clear)
398 ap->ops->sff_irq_clear(ap);
399 }
400 EXPORT_SYMBOL_GPL(ata_sff_irq_on);
401
402 /**
403 * ata_sff_tf_load - send taskfile registers to host controller
404 * @ap: Port to which output is sent
405 * @tf: ATA taskfile register set
406 *
407 * Outputs ATA taskfile to standard ATA host controller.
408 *
409 * LOCKING:
410 * Inherited from caller.
411 */
412 void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
413 {
414 struct ata_ioports *ioaddr = &ap->ioaddr;
415 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
416
417 if (tf->ctl != ap->last_ctl) {
418 if (ioaddr->ctl_addr)
419 iowrite8(tf->ctl, ioaddr->ctl_addr);
420 ap->last_ctl = tf->ctl;
421 ata_wait_idle(ap);
422 }
423
424 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
425 WARN_ON_ONCE(!ioaddr->ctl_addr);
426 iowrite8(tf->hob_feature, ioaddr->feature_addr);
427 iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
428 iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
429 iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
430 iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
431 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
432 tf->hob_feature,
433 tf->hob_nsect,
434 tf->hob_lbal,
435 tf->hob_lbam,
436 tf->hob_lbah);
437 }
438
439 if (is_addr) {
440 iowrite8(tf->feature, ioaddr->feature_addr);
441 iowrite8(tf->nsect, ioaddr->nsect_addr);
442 iowrite8(tf->lbal, ioaddr->lbal_addr);
443 iowrite8(tf->lbam, ioaddr->lbam_addr);
444 iowrite8(tf->lbah, ioaddr->lbah_addr);
445 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
446 tf->feature,
447 tf->nsect,
448 tf->lbal,
449 tf->lbam,
450 tf->lbah);
451 }
452
453 if (tf->flags & ATA_TFLAG_DEVICE) {
454 iowrite8(tf->device, ioaddr->device_addr);
455 VPRINTK("device 0x%X\n", tf->device);
456 }
457
458 ata_wait_idle(ap);
459 }
460 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
461
462 /**
463 * ata_sff_tf_read - input device's ATA taskfile shadow registers
464 * @ap: Port from which input is read
465 * @tf: ATA taskfile register set for storing input
466 *
467 * Reads ATA taskfile registers for currently-selected device
468 * into @tf. Assumes the device has a fully SFF compliant task file
469 * layout and behaviour. If you device does not (eg has a different
470 * status method) then you will need to provide a replacement tf_read
471 *
472 * LOCKING:
473 * Inherited from caller.
474 */
475 void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
476 {
477 struct ata_ioports *ioaddr = &ap->ioaddr;
478
479 tf->command = ata_sff_check_status(ap);
480 tf->feature = ioread8(ioaddr->error_addr);
481 tf->nsect = ioread8(ioaddr->nsect_addr);
482 tf->lbal = ioread8(ioaddr->lbal_addr);
483 tf->lbam = ioread8(ioaddr->lbam_addr);
484 tf->lbah = ioread8(ioaddr->lbah_addr);
485 tf->device = ioread8(ioaddr->device_addr);
486
487 if (tf->flags & ATA_TFLAG_LBA48) {
488 if (likely(ioaddr->ctl_addr)) {
489 iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
490 tf->hob_feature = ioread8(ioaddr->error_addr);
491 tf->hob_nsect = ioread8(ioaddr->nsect_addr);
492 tf->hob_lbal = ioread8(ioaddr->lbal_addr);
493 tf->hob_lbam = ioread8(ioaddr->lbam_addr);
494 tf->hob_lbah = ioread8(ioaddr->lbah_addr);
495 iowrite8(tf->ctl, ioaddr->ctl_addr);
496 ap->last_ctl = tf->ctl;
497 } else
498 WARN_ON_ONCE(1);
499 }
500 }
501 EXPORT_SYMBOL_GPL(ata_sff_tf_read);
502
503 /**
504 * ata_sff_exec_command - issue ATA command to host controller
505 * @ap: port to which command is being issued
506 * @tf: ATA taskfile register set
507 *
508 * Issues ATA command, with proper synchronization with interrupt
509 * handler / other threads.
510 *
511 * LOCKING:
512 * spin_lock_irqsave(host lock)
513 */
514 void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
515 {
516 DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
517
518 iowrite8(tf->command, ap->ioaddr.command_addr);
519 ata_sff_pause(ap);
520 }
521 EXPORT_SYMBOL_GPL(ata_sff_exec_command);
522
523 /**
524 * ata_tf_to_host - issue ATA taskfile to host controller
525 * @ap: port to which command is being issued
526 * @tf: ATA taskfile register set
527 *
528 * Issues ATA taskfile register set to ATA host controller,
529 * with proper synchronization with interrupt handler and
530 * other threads.
531 *
532 * LOCKING:
533 * spin_lock_irqsave(host lock)
534 */
535 static inline void ata_tf_to_host(struct ata_port *ap,
536 const struct ata_taskfile *tf)
537 {
538 ap->ops->sff_tf_load(ap, tf);
539 ap->ops->sff_exec_command(ap, tf);
540 }
541
542 /**
543 * ata_sff_data_xfer - Transfer data by PIO
544 * @dev: device to target
545 * @buf: data buffer
546 * @buflen: buffer length
547 * @rw: read/write
548 *
549 * Transfer data from/to the device data register by PIO.
550 *
551 * LOCKING:
552 * Inherited from caller.
553 *
554 * RETURNS:
555 * Bytes consumed.
556 */
557 unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf,
558 unsigned int buflen, int rw)
559 {
560 struct ata_port *ap = dev->link->ap;
561 void __iomem *data_addr = ap->ioaddr.data_addr;
562 unsigned int words = buflen >> 1;
563
564 /* Transfer multiple of 2 bytes */
565 if (rw == READ)
566 ioread16_rep(data_addr, buf, words);
567 else
568 iowrite16_rep(data_addr, buf, words);
569
570 /* Transfer trailing byte, if any. */
571 if (unlikely(buflen & 0x01)) {
572 unsigned char pad[2];
573
574 /* Point buf to the tail of buffer */
575 buf += buflen - 1;
576
577 /*
578 * Use io*16_rep() accessors here as well to avoid pointlessly
579 * swapping bytes to and from on the big endian machines...
580 */
581 if (rw == READ) {
582 ioread16_rep(data_addr, pad, 1);
583 *buf = pad[0];
584 } else {
585 pad[0] = *buf;
586 iowrite16_rep(data_addr, pad, 1);
587 }
588 words++;
589 }
590
591 return words << 1;
592 }
593 EXPORT_SYMBOL_GPL(ata_sff_data_xfer);
594
595 /**
596 * ata_sff_data_xfer32 - Transfer data by PIO
597 * @dev: device to target
598 * @buf: data buffer
599 * @buflen: buffer length
600 * @rw: read/write
601 *
602 * Transfer data from/to the device data register by PIO using 32bit
603 * I/O operations.
604 *
605 * LOCKING:
606 * Inherited from caller.
607 *
608 * RETURNS:
609 * Bytes consumed.
610 */
611
612 unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
613 unsigned int buflen, int rw)
614 {
615 struct ata_port *ap = dev->link->ap;
616 void __iomem *data_addr = ap->ioaddr.data_addr;
617 unsigned int words = buflen >> 2;
618 int slop = buflen & 3;
619
620 if (!(ap->pflags & ATA_PFLAG_PIO32))
621 return ata_sff_data_xfer(dev, buf, buflen, rw);
622
623 /* Transfer multiple of 4 bytes */
624 if (rw == READ)
625 ioread32_rep(data_addr, buf, words);
626 else
627 iowrite32_rep(data_addr, buf, words);
628
629 /* Transfer trailing bytes, if any */
630 if (unlikely(slop)) {
631 unsigned char pad[4];
632
633 /* Point buf to the tail of buffer */
634 buf += buflen - slop;
635
636 /*
637 * Use io*_rep() accessors here as well to avoid pointlessly
638 * swapping bytes to and from on the big endian machines...
639 */
640 if (rw == READ) {
641 if (slop < 3)
642 ioread16_rep(data_addr, pad, 1);
643 else
644 ioread32_rep(data_addr, pad, 1);
645 memcpy(buf, pad, slop);
646 } else {
647 memcpy(pad, buf, slop);
648 if (slop < 3)
649 iowrite16_rep(data_addr, pad, 1);
650 else
651 iowrite32_rep(data_addr, pad, 1);
652 }
653 }
654 return (buflen + 1) & ~1;
655 }
656 EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);
657
658 /**
659 * ata_sff_data_xfer_noirq - Transfer data by PIO
660 * @dev: device to target
661 * @buf: data buffer
662 * @buflen: buffer length
663 * @rw: read/write
664 *
665 * Transfer data from/to the device data register by PIO. Do the
666 * transfer with interrupts disabled.
667 *
668 * LOCKING:
669 * Inherited from caller.
670 *
671 * RETURNS:
672 * Bytes consumed.
673 */
674 unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
675 unsigned int buflen, int rw)
676 {
677 unsigned long flags;
678 unsigned int consumed;
679
680 local_irq_save(flags);
681 consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
682 local_irq_restore(flags);
683
684 return consumed;
685 }
686 EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq);
687
688 /**
689 * ata_pio_sector - Transfer a sector of data.
690 * @qc: Command on going
691 *
692 * Transfer qc->sect_size bytes of data from/to the ATA device.
693 *
694 * LOCKING:
695 * Inherited from caller.
696 */
697 static void ata_pio_sector(struct ata_queued_cmd *qc)
698 {
699 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
700 struct ata_port *ap = qc->ap;
701 struct page *page;
702 unsigned int offset;
703 unsigned char *buf;
704
705 if (qc->curbytes == qc->nbytes - qc->sect_size)
706 ap->hsm_task_state = HSM_ST_LAST;
707
708 page = sg_page(qc->cursg);
709 offset = qc->cursg->offset + qc->cursg_ofs;
710
711 /* get the current page and offset */
712 page = nth_page(page, (offset >> PAGE_SHIFT));
713 offset %= PAGE_SIZE;
714
715 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
716
717 if (PageHighMem(page)) {
718 unsigned long flags;
719
720 /* FIXME: use a bounce buffer */
721 local_irq_save(flags);
722 buf = kmap_atomic(page, KM_IRQ0);
723
724 /* do the actual data transfer */
725 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
726 do_write);
727
728 kunmap_atomic(buf, KM_IRQ0);
729 local_irq_restore(flags);
730 } else {
731 buf = page_address(page);
732 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
733 do_write);
734 }
735
736 if (!do_write && !PageSlab(page))
737 flush_dcache_page(page);
738
739 qc->curbytes += qc->sect_size;
740 qc->cursg_ofs += qc->sect_size;
741
742 if (qc->cursg_ofs == qc->cursg->length) {
743 qc->cursg = sg_next(qc->cursg);
744 qc->cursg_ofs = 0;
745 }
746 }
747
748 /**
749 * ata_pio_sectors - Transfer one or many sectors.
750 * @qc: Command on going
751 *
752 * Transfer one or many sectors of data from/to the
753 * ATA device for the DRQ request.
754 *
755 * LOCKING:
756 * Inherited from caller.
757 */
758 static void ata_pio_sectors(struct ata_queued_cmd *qc)
759 {
760 if (is_multi_taskfile(&qc->tf)) {
761 /* READ/WRITE MULTIPLE */
762 unsigned int nsect;
763
764 WARN_ON_ONCE(qc->dev->multi_count == 0);
765
766 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
767 qc->dev->multi_count);
768 while (nsect--)
769 ata_pio_sector(qc);
770 } else
771 ata_pio_sector(qc);
772
773 ata_sff_sync(qc->ap); /* flush */
774 }
775
776 /**
777 * atapi_send_cdb - Write CDB bytes to hardware
778 * @ap: Port to which ATAPI device is attached.
779 * @qc: Taskfile currently active
780 *
781 * When device has indicated its readiness to accept
782 * a CDB, this function is called. Send the CDB.
783 *
784 * LOCKING:
785 * caller.
786 */
787 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
788 {
789 /* send SCSI cdb */
790 DPRINTK("send cdb\n");
791 WARN_ON_ONCE(qc->dev->cdb_len < 12);
792
793 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
794 ata_sff_sync(ap);
795 /* FIXME: If the CDB is for DMA do we need to do the transition delay
796 or is bmdma_start guaranteed to do it ? */
797 switch (qc->tf.protocol) {
798 case ATAPI_PROT_PIO:
799 ap->hsm_task_state = HSM_ST;
800 break;
801 case ATAPI_PROT_NODATA:
802 ap->hsm_task_state = HSM_ST_LAST;
803 break;
804 #ifdef CONFIG_ATA_BMDMA
805 case ATAPI_PROT_DMA:
806 ap->hsm_task_state = HSM_ST_LAST;
807 /* initiate bmdma */
808 ap->ops->bmdma_start(qc);
809 break;
810 #endif /* CONFIG_ATA_BMDMA */
811 default:
812 BUG();
813 }
814 }
815
816 /**
817 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
818 * @qc: Command on going
819 * @bytes: number of bytes
820 *
821 * Transfer Transfer data from/to the ATAPI device.
822 *
823 * LOCKING:
824 * Inherited from caller.
825 *
826 */
827 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
828 {
829 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
830 struct ata_port *ap = qc->ap;
831 struct ata_device *dev = qc->dev;
832 struct ata_eh_info *ehi = &dev->link->eh_info;
833 struct scatterlist *sg;
834 struct page *page;
835 unsigned char *buf;
836 unsigned int offset, count, consumed;
837
838 next_sg:
839 sg = qc->cursg;
840 if (unlikely(!sg)) {
841 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
842 "buf=%u cur=%u bytes=%u",
843 qc->nbytes, qc->curbytes, bytes);
844 return -1;
845 }
846
847 page = sg_page(sg);
848 offset = sg->offset + qc->cursg_ofs;
849
850 /* get the current page and offset */
851 page = nth_page(page, (offset >> PAGE_SHIFT));
852 offset %= PAGE_SIZE;
853
854 /* don't overrun current sg */
855 count = min(sg->length - qc->cursg_ofs, bytes);
856
857 /* don't cross page boundaries */
858 count = min(count, (unsigned int)PAGE_SIZE - offset);
859
860 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
861
862 if (PageHighMem(page)) {
863 unsigned long flags;
864
865 /* FIXME: use bounce buffer */
866 local_irq_save(flags);
867 buf = kmap_atomic(page, KM_IRQ0);
868
869 /* do the actual data transfer */
870 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
871 count, rw);
872
873 kunmap_atomic(buf, KM_IRQ0);
874 local_irq_restore(flags);
875 } else {
876 buf = page_address(page);
877 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
878 count, rw);
879 }
880
881 bytes -= min(bytes, consumed);
882 qc->curbytes += count;
883 qc->cursg_ofs += count;
884
885 if (qc->cursg_ofs == sg->length) {
886 qc->cursg = sg_next(qc->cursg);
887 qc->cursg_ofs = 0;
888 }
889
890 /*
891 * There used to be a WARN_ON_ONCE(qc->cursg && count != consumed);
892 * Unfortunately __atapi_pio_bytes doesn't know enough to do the WARN
893 * check correctly as it doesn't know if it is the last request being
894 * made. Somebody should implement a proper sanity check.
895 */
896 if (bytes)
897 goto next_sg;
898 return 0;
899 }
900
901 /**
902 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
903 * @qc: Command on going
904 *
905 * Transfer Transfer data from/to the ATAPI device.
906 *
907 * LOCKING:
908 * Inherited from caller.
909 */
910 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
911 {
912 struct ata_port *ap = qc->ap;
913 struct ata_device *dev = qc->dev;
914 struct ata_eh_info *ehi = &dev->link->eh_info;
915 unsigned int ireason, bc_lo, bc_hi, bytes;
916 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
917
918 /* Abuse qc->result_tf for temp storage of intermediate TF
919 * here to save some kernel stack usage.
920 * For normal completion, qc->result_tf is not relevant. For
921 * error, qc->result_tf is later overwritten by ata_qc_complete().
922 * So, the correctness of qc->result_tf is not affected.
923 */
924 ap->ops->sff_tf_read(ap, &qc->result_tf);
925 ireason = qc->result_tf.nsect;
926 bc_lo = qc->result_tf.lbam;
927 bc_hi = qc->result_tf.lbah;
928 bytes = (bc_hi << 8) | bc_lo;
929
930 /* shall be cleared to zero, indicating xfer of data */
931 if (unlikely(ireason & (1 << 0)))
932 goto atapi_check;
933
934 /* make sure transfer direction matches expected */
935 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
936 if (unlikely(do_write != i_write))
937 goto atapi_check;
938
939 if (unlikely(!bytes))
940 goto atapi_check;
941
942 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
943
944 if (unlikely(__atapi_pio_bytes(qc, bytes)))
945 goto err_out;
946 ata_sff_sync(ap); /* flush */
947
948 return;
949
950 atapi_check:
951 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
952 ireason, bytes);
953 err_out:
954 qc->err_mask |= AC_ERR_HSM;
955 ap->hsm_task_state = HSM_ST_ERR;
956 }
957
958 /**
959 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
960 * @ap: the target ata_port
961 * @qc: qc on going
962 *
963 * RETURNS:
964 * 1 if ok in workqueue, 0 otherwise.
965 */
966 static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
967 struct ata_queued_cmd *qc)
968 {
969 if (qc->tf.flags & ATA_TFLAG_POLLING)
970 return 1;
971
972 if (ap->hsm_task_state == HSM_ST_FIRST) {
973 if (qc->tf.protocol == ATA_PROT_PIO &&
974 (qc->tf.flags & ATA_TFLAG_WRITE))
975 return 1;
976
977 if (ata_is_atapi(qc->tf.protocol) &&
978 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
979 return 1;
980 }
981
982 return 0;
983 }
984
985 /**
986 * ata_hsm_qc_complete - finish a qc running on standard HSM
987 * @qc: Command to complete
988 * @in_wq: 1 if called from workqueue, 0 otherwise
989 *
990 * Finish @qc which is running on standard HSM.
991 *
992 * LOCKING:
993 * If @in_wq is zero, spin_lock_irqsave(host lock).
994 * Otherwise, none on entry and grabs host lock.
995 */
996 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
997 {
998 struct ata_port *ap = qc->ap;
999 unsigned long flags;
1000
1001 if (ap->ops->error_handler) {
1002 if (in_wq) {
1003 spin_lock_irqsave(ap->lock, flags);
1004
1005 /* EH might have kicked in while host lock is
1006 * released.
1007 */
1008 qc = ata_qc_from_tag(ap, qc->tag);
1009 if (qc) {
1010 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
1011 ata_sff_irq_on(ap);
1012 ata_qc_complete(qc);
1013 } else
1014 ata_port_freeze(ap);
1015 }
1016
1017 spin_unlock_irqrestore(ap->lock, flags);
1018 } else {
1019 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1020 ata_qc_complete(qc);
1021 else
1022 ata_port_freeze(ap);
1023 }
1024 } else {
1025 if (in_wq) {
1026 spin_lock_irqsave(ap->lock, flags);
1027 ata_sff_irq_on(ap);
1028 ata_qc_complete(qc);
1029 spin_unlock_irqrestore(ap->lock, flags);
1030 } else
1031 ata_qc_complete(qc);
1032 }
1033 }
1034
1035 /**
1036 * ata_sff_hsm_move - move the HSM to the next state.
1037 * @ap: the target ata_port
1038 * @qc: qc on going
1039 * @status: current device status
1040 * @in_wq: 1 if called from workqueue, 0 otherwise
1041 *
1042 * RETURNS:
1043 * 1 when poll next status needed, 0 otherwise.
1044 */
1045 int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1046 u8 status, int in_wq)
1047 {
1048 struct ata_link *link = qc->dev->link;
1049 struct ata_eh_info *ehi = &link->eh_info;
1050 unsigned long flags = 0;
1051 int poll_next;
1052
1053 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1054
1055 /* Make sure ata_sff_qc_issue() does not throw things
1056 * like DMA polling into the workqueue. Notice that
1057 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
1058 */
1059 WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
1060
1061 fsm_start:
1062 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
1063 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
1064
1065 switch (ap->hsm_task_state) {
1066 case HSM_ST_FIRST:
1067 /* Send first data block or PACKET CDB */
1068
1069 /* If polling, we will stay in the work queue after
1070 * sending the data. Otherwise, interrupt handler
1071 * takes over after sending the data.
1072 */
1073 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
1074
1075 /* check device status */
1076 if (unlikely((status & ATA_DRQ) == 0)) {
1077 /* handle BSY=0, DRQ=0 as error */
1078 if (likely(status & (ATA_ERR | ATA_DF)))
1079 /* device stops HSM for abort/error */
1080 qc->err_mask |= AC_ERR_DEV;
1081 else {
1082 /* HSM violation. Let EH handle this */
1083 ata_ehi_push_desc(ehi,
1084 "ST_FIRST: !(DRQ|ERR|DF)");
1085 qc->err_mask |= AC_ERR_HSM;
1086 }
1087
1088 ap->hsm_task_state = HSM_ST_ERR;
1089 goto fsm_start;
1090 }
1091
1092 /* Device should not ask for data transfer (DRQ=1)
1093 * when it finds something wrong.
1094 * We ignore DRQ here and stop the HSM by
1095 * changing hsm_task_state to HSM_ST_ERR and
1096 * let the EH abort the command or reset the device.
1097 */
1098 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1099 /* Some ATAPI tape drives forget to clear the ERR bit
1100 * when doing the next command (mostly request sense).
1101 * We ignore ERR here to workaround and proceed sending
1102 * the CDB.
1103 */
1104 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
1105 ata_ehi_push_desc(ehi, "ST_FIRST: "
1106 "DRQ=1 with device error, "
1107 "dev_stat 0x%X", status);
1108 qc->err_mask |= AC_ERR_HSM;
1109 ap->hsm_task_state = HSM_ST_ERR;
1110 goto fsm_start;
1111 }
1112 }
1113
1114 /* Send the CDB (atapi) or the first data block (ata pio out).
1115 * During the state transition, interrupt handler shouldn't
1116 * be invoked before the data transfer is complete and
1117 * hsm_task_state is changed. Hence, the following locking.
1118 */
1119 if (in_wq)
1120 spin_lock_irqsave(ap->lock, flags);
1121
1122 if (qc->tf.protocol == ATA_PROT_PIO) {
1123 /* PIO data out protocol.
1124 * send first data block.
1125 */
1126
1127 /* ata_pio_sectors() might change the state
1128 * to HSM_ST_LAST. so, the state is changed here
1129 * before ata_pio_sectors().
1130 */
1131 ap->hsm_task_state = HSM_ST;
1132 ata_pio_sectors(qc);
1133 } else
1134 /* send CDB */
1135 atapi_send_cdb(ap, qc);
1136
1137 if (in_wq)
1138 spin_unlock_irqrestore(ap->lock, flags);
1139
1140 /* if polling, ata_sff_pio_task() handles the rest.
1141 * otherwise, interrupt handler takes over from here.
1142 */
1143 break;
1144
1145 case HSM_ST:
1146 /* complete command or read/write the data register */
1147 if (qc->tf.protocol == ATAPI_PROT_PIO) {
1148 /* ATAPI PIO protocol */
1149 if ((status & ATA_DRQ) == 0) {
1150 /* No more data to transfer or device error.
1151 * Device error will be tagged in HSM_ST_LAST.
1152 */
1153 ap->hsm_task_state = HSM_ST_LAST;
1154 goto fsm_start;
1155 }
1156
1157 /* Device should not ask for data transfer (DRQ=1)
1158 * when it finds something wrong.
1159 * We ignore DRQ here and stop the HSM by
1160 * changing hsm_task_state to HSM_ST_ERR and
1161 * let the EH abort the command or reset the device.
1162 */
1163 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1164 ata_ehi_push_desc(ehi, "ST-ATAPI: "
1165 "DRQ=1 with device error, "
1166 "dev_stat 0x%X", status);
1167 qc->err_mask |= AC_ERR_HSM;
1168 ap->hsm_task_state = HSM_ST_ERR;
1169 goto fsm_start;
1170 }
1171
1172 atapi_pio_bytes(qc);
1173
1174 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
1175 /* bad ireason reported by device */
1176 goto fsm_start;
1177
1178 } else {
1179 /* ATA PIO protocol */
1180 if (unlikely((status & ATA_DRQ) == 0)) {
1181 /* handle BSY=0, DRQ=0 as error */
1182 if (likely(status & (ATA_ERR | ATA_DF))) {
1183 /* device stops HSM for abort/error */
1184 qc->err_mask |= AC_ERR_DEV;
1185
1186 /* If diagnostic failed and this is
1187 * IDENTIFY, it's likely a phantom
1188 * device. Mark hint.
1189 */
1190 if (qc->dev->horkage &
1191 ATA_HORKAGE_DIAGNOSTIC)
1192 qc->err_mask |=
1193 AC_ERR_NODEV_HINT;
1194 } else {
1195 /* HSM violation. Let EH handle this.
1196 * Phantom devices also trigger this
1197 * condition. Mark hint.
1198 */
1199 ata_ehi_push_desc(ehi, "ST-ATA: "
1200 "DRQ=0 without device error, "
1201 "dev_stat 0x%X", status);
1202 qc->err_mask |= AC_ERR_HSM |
1203 AC_ERR_NODEV_HINT;
1204 }
1205
1206 ap->hsm_task_state = HSM_ST_ERR;
1207 goto fsm_start;
1208 }
1209
1210 /* For PIO reads, some devices may ask for
1211 * data transfer (DRQ=1) alone with ERR=1.
1212 * We respect DRQ here and transfer one
1213 * block of junk data before changing the
1214 * hsm_task_state to HSM_ST_ERR.
1215 *
1216 * For PIO writes, ERR=1 DRQ=1 doesn't make
1217 * sense since the data block has been
1218 * transferred to the device.
1219 */
1220 if (unlikely(status & (ATA_ERR | ATA_DF))) {
1221 /* data might be corrputed */
1222 qc->err_mask |= AC_ERR_DEV;
1223
1224 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
1225 ata_pio_sectors(qc);
1226 status = ata_wait_idle(ap);
1227 }
1228
1229 if (status & (ATA_BUSY | ATA_DRQ)) {
1230 ata_ehi_push_desc(ehi, "ST-ATA: "
1231 "BUSY|DRQ persists on ERR|DF, "
1232 "dev_stat 0x%X", status);
1233 qc->err_mask |= AC_ERR_HSM;
1234 }
1235
1236 /* There are oddball controllers with
1237 * status register stuck at 0x7f and
1238 * lbal/m/h at zero which makes it
1239 * pass all other presence detection
1240 * mechanisms we have. Set NODEV_HINT
1241 * for it. Kernel bz#7241.
1242 */
1243 if (status == 0x7f)
1244 qc->err_mask |= AC_ERR_NODEV_HINT;
1245
1246 /* ata_pio_sectors() might change the
1247 * state to HSM_ST_LAST. so, the state
1248 * is changed after ata_pio_sectors().
1249 */
1250 ap->hsm_task_state = HSM_ST_ERR;
1251 goto fsm_start;
1252 }
1253
1254 ata_pio_sectors(qc);
1255
1256 if (ap->hsm_task_state == HSM_ST_LAST &&
1257 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
1258 /* all data read */
1259 status = ata_wait_idle(ap);
1260 goto fsm_start;
1261 }
1262 }
1263
1264 poll_next = 1;
1265 break;
1266
1267 case HSM_ST_LAST:
1268 if (unlikely(!ata_ok(status))) {
1269 qc->err_mask |= __ac_err_mask(status);
1270 ap->hsm_task_state = HSM_ST_ERR;
1271 goto fsm_start;
1272 }
1273
1274 /* no more data to transfer */
1275 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
1276 ap->print_id, qc->dev->devno, status);
1277
1278 WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
1279
1280 ap->hsm_task_state = HSM_ST_IDLE;
1281
1282 /* complete taskfile transaction */
1283 ata_hsm_qc_complete(qc, in_wq);
1284
1285 poll_next = 0;
1286 break;
1287
1288 case HSM_ST_ERR:
1289 ap->hsm_task_state = HSM_ST_IDLE;
1290
1291 /* complete taskfile transaction */
1292 ata_hsm_qc_complete(qc, in_wq);
1293
1294 poll_next = 0;
1295 break;
1296 default:
1297 poll_next = 0;
1298 BUG();
1299 }
1300
1301 return poll_next;
1302 }
1303 EXPORT_SYMBOL_GPL(ata_sff_hsm_move);
1304
1305 void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay)
1306 {
1307 struct ata_port *ap = link->ap;
1308
1309 WARN_ON((ap->sff_pio_task_link != NULL) &&
1310 (ap->sff_pio_task_link != link));
1311 ap->sff_pio_task_link = link;
1312
1313 /* may fail if ata_sff_flush_pio_task() in progress */
1314 queue_delayed_work(ata_sff_wq, &ap->sff_pio_task,
1315 msecs_to_jiffies(delay));
1316 }
1317 EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
1318
1319 void ata_sff_flush_pio_task(struct ata_port *ap)
1320 {
1321 DPRINTK("ENTER\n");
1322
1323 cancel_rearming_delayed_work(&ap->sff_pio_task);
1324 ap->hsm_task_state = HSM_ST_IDLE;
1325
1326 if (ata_msg_ctl(ap))
1327 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1328 }
1329
1330 static void ata_sff_pio_task(struct work_struct *work)
1331 {
1332 struct ata_port *ap =
1333 container_of(work, struct ata_port, sff_pio_task.work);
1334 struct ata_link *link = ap->sff_pio_task_link;
1335 struct ata_queued_cmd *qc;
1336 u8 status;
1337 int poll_next;
1338
1339 BUG_ON(ap->sff_pio_task_link == NULL);
1340 /* qc can be NULL if timeout occurred */
1341 qc = ata_qc_from_tag(ap, link->active_tag);
1342 if (!qc) {
1343 ap->sff_pio_task_link = NULL;
1344 return;
1345 }
1346
1347 fsm_start:
1348 WARN_ON_ONCE(ap->hsm_task_state == HSM_ST_IDLE);
1349
1350 /*
1351 * This is purely heuristic. This is a fast path.
1352 * Sometimes when we enter, BSY will be cleared in
1353 * a chk-status or two. If not, the drive is probably seeking
1354 * or something. Snooze for a couple msecs, then
1355 * chk-status again. If still busy, queue delayed work.
1356 */
1357 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1358 if (status & ATA_BUSY) {
1359 ata_msleep(ap, 2);
1360 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1361 if (status & ATA_BUSY) {
1362 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1363 return;
1364 }
1365 }
1366
1367 /*
1368 * hsm_move() may trigger another command to be processed.
1369 * clean the link beforehand.
1370 */
1371 ap->sff_pio_task_link = NULL;
1372 /* move the HSM */
1373 poll_next = ata_sff_hsm_move(ap, qc, status, 1);
1374
1375 /* another command or interrupt handler
1376 * may be running at this point.
1377 */
1378 if (poll_next)
1379 goto fsm_start;
1380 }
1381
1382 /**
1383 * ata_sff_qc_issue - issue taskfile to a SFF controller
1384 * @qc: command to issue to device
1385 *
1386 * This function issues a PIO or NODATA command to a SFF
1387 * controller.
1388 *
1389 * LOCKING:
1390 * spin_lock_irqsave(host lock)
1391 *
1392 * RETURNS:
1393 * Zero on success, AC_ERR_* mask on failure
1394 */
1395 unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
1396 {
1397 struct ata_port *ap = qc->ap;
1398 struct ata_link *link = qc->dev->link;
1399
1400 /* Use polling pio if the LLD doesn't handle
1401 * interrupt driven pio and atapi CDB interrupt.
1402 */
1403 if (ap->flags & ATA_FLAG_PIO_POLLING)
1404 qc->tf.flags |= ATA_TFLAG_POLLING;
1405
1406 /* select the device */
1407 ata_dev_select(ap, qc->dev->devno, 1, 0);
1408
1409 /* start the command */
1410 switch (qc->tf.protocol) {
1411 case ATA_PROT_NODATA:
1412 if (qc->tf.flags & ATA_TFLAG_POLLING)
1413 ata_qc_set_polling(qc);
1414
1415 ata_tf_to_host(ap, &qc->tf);
1416 ap->hsm_task_state = HSM_ST_LAST;
1417
1418 if (qc->tf.flags & ATA_TFLAG_POLLING)
1419 ata_sff_queue_pio_task(link, 0);
1420
1421 break;
1422
1423 case ATA_PROT_PIO:
1424 if (qc->tf.flags & ATA_TFLAG_POLLING)
1425 ata_qc_set_polling(qc);
1426
1427 ata_tf_to_host(ap, &qc->tf);
1428
1429 if (qc->tf.flags & ATA_TFLAG_WRITE) {
1430 /* PIO data out protocol */
1431 ap->hsm_task_state = HSM_ST_FIRST;
1432 ata_sff_queue_pio_task(link, 0);
1433
1434 /* always send first data block using the
1435 * ata_sff_pio_task() codepath.
1436 */
1437 } else {
1438 /* PIO data in protocol */
1439 ap->hsm_task_state = HSM_ST;
1440
1441 if (qc->tf.flags & ATA_TFLAG_POLLING)
1442 ata_sff_queue_pio_task(link, 0);
1443
1444 /* if polling, ata_sff_pio_task() handles the
1445 * rest. otherwise, interrupt handler takes
1446 * over from here.
1447 */
1448 }
1449
1450 break;
1451
1452 case ATAPI_PROT_PIO:
1453 case ATAPI_PROT_NODATA:
1454 if (qc->tf.flags & ATA_TFLAG_POLLING)
1455 ata_qc_set_polling(qc);
1456
1457 ata_tf_to_host(ap, &qc->tf);
1458
1459 ap->hsm_task_state = HSM_ST_FIRST;
1460
1461 /* send cdb by polling if no cdb interrupt */
1462 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
1463 (qc->tf.flags & ATA_TFLAG_POLLING))
1464 ata_sff_queue_pio_task(link, 0);
1465 break;
1466
1467 default:
1468 WARN_ON_ONCE(1);
1469 return AC_ERR_SYSTEM;
1470 }
1471
1472 return 0;
1473 }
1474 EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
1475
1476 /**
1477 * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read
1478 * @qc: qc to fill result TF for
1479 *
1480 * @qc is finished and result TF needs to be filled. Fill it
1481 * using ->sff_tf_read.
1482 *
1483 * LOCKING:
1484 * spin_lock_irqsave(host lock)
1485 *
1486 * RETURNS:
1487 * true indicating that result TF is successfully filled.
1488 */
1489 bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
1490 {
1491 qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
1492 return true;
1493 }
1494 EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
1495
1496 static unsigned int ata_sff_idle_irq(struct ata_port *ap)
1497 {
1498 ap->stats.idle_irq++;
1499
1500 #ifdef ATA_IRQ_TRAP
1501 if ((ap->stats.idle_irq % 1000) == 0) {
1502 ap->ops->sff_check_status(ap);
1503 if (ap->ops->sff_irq_clear)
1504 ap->ops->sff_irq_clear(ap);
1505 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
1506 return 1;
1507 }
1508 #endif
1509 return 0; /* irq not handled */
1510 }
1511
1512 static unsigned int __ata_sff_port_intr(struct ata_port *ap,
1513 struct ata_queued_cmd *qc,
1514 bool hsmv_on_idle)
1515 {
1516 u8 status;
1517
1518 VPRINTK("ata%u: protocol %d task_state %d\n",
1519 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1520
1521 /* Check whether we are expecting interrupt in this state */
1522 switch (ap->hsm_task_state) {
1523 case HSM_ST_FIRST:
1524 /* Some pre-ATAPI-4 devices assert INTRQ
1525 * at this state when ready to receive CDB.
1526 */
1527
1528 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
1529 * The flag was turned on only for atapi devices. No
1530 * need to check ata_is_atapi(qc->tf.protocol) again.
1531 */
1532 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1533 return ata_sff_idle_irq(ap);
1534 break;
1535 case HSM_ST:
1536 case HSM_ST_LAST:
1537 break;
1538 default:
1539 return ata_sff_idle_irq(ap);
1540 }
1541
1542 /* check main status, clearing INTRQ if needed */
1543 status = ata_sff_irq_status(ap);
1544 if (status & ATA_BUSY) {
1545 if (hsmv_on_idle) {
1546 /* BMDMA engine is already stopped, we're screwed */
1547 qc->err_mask |= AC_ERR_HSM;
1548 ap->hsm_task_state = HSM_ST_ERR;
1549 } else
1550 return ata_sff_idle_irq(ap);
1551 }
1552
1553 /* clear irq events */
1554 if (ap->ops->sff_irq_clear)
1555 ap->ops->sff_irq_clear(ap);
1556
1557 ata_sff_hsm_move(ap, qc, status, 0);
1558
1559 return 1; /* irq handled */
1560 }
1561
1562 /**
1563 * ata_sff_port_intr - Handle SFF port interrupt
1564 * @ap: Port on which interrupt arrived (possibly...)
1565 * @qc: Taskfile currently active in engine
1566 *
1567 * Handle port interrupt for given queued command.
1568 *
1569 * LOCKING:
1570 * spin_lock_irqsave(host lock)
1571 *
1572 * RETURNS:
1573 * One if interrupt was handled, zero if not (shared irq).
1574 */
1575 unsigned int ata_sff_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1576 {
1577 return __ata_sff_port_intr(ap, qc, false);
1578 }
1579 EXPORT_SYMBOL_GPL(ata_sff_port_intr);
1580
1581 static inline irqreturn_t __ata_sff_interrupt(int irq, void *dev_instance,
1582 unsigned int (*port_intr)(struct ata_port *, struct ata_queued_cmd *))
1583 {
1584 struct ata_host *host = dev_instance;
1585 bool retried = false;
1586 unsigned int i;
1587 unsigned int handled, idle, polling;
1588 unsigned long flags;
1589
1590 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
1591 spin_lock_irqsave(&host->lock, flags);
1592
1593 retry:
1594 handled = idle = polling = 0;
1595 for (i = 0; i < host->n_ports; i++) {
1596 struct ata_port *ap = host->ports[i];
1597 struct ata_queued_cmd *qc;
1598
1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1600 if (qc) {
1601 if (!(qc->tf.flags & ATA_TFLAG_POLLING))
1602 handled |= port_intr(ap, qc);
1603 else
1604 polling |= 1 << i;
1605 } else
1606 idle |= 1 << i;
1607 }
1608
1609 /*
1610 * If no port was expecting IRQ but the controller is actually
1611 * asserting IRQ line, nobody cared will ensue. Check IRQ
1612 * pending status if available and clear spurious IRQ.
1613 */
1614 if (!handled && !retried) {
1615 bool retry = false;
1616
1617 for (i = 0; i < host->n_ports; i++) {
1618 struct ata_port *ap = host->ports[i];
1619
1620 if (polling & (1 << i))
1621 continue;
1622
1623 if (!ap->ops->sff_irq_check ||
1624 !ap->ops->sff_irq_check(ap))
1625 continue;
1626
1627 if (idle & (1 << i)) {
1628 ap->ops->sff_check_status(ap);
1629 if (ap->ops->sff_irq_clear)
1630 ap->ops->sff_irq_clear(ap);
1631 } else {
1632 /* clear INTRQ and check if BUSY cleared */
1633 if (!(ap->ops->sff_check_status(ap) & ATA_BUSY))
1634 retry |= true;
1635 /*
1636 * With command in flight, we can't do
1637 * sff_irq_clear() w/o racing with completion.
1638 */
1639 }
1640 }
1641
1642 if (retry) {
1643 retried = true;
1644 goto retry;
1645 }
1646 }
1647
1648 spin_unlock_irqrestore(&host->lock, flags);
1649
1650 return IRQ_RETVAL(handled);
1651 }
1652
1653 /**
1654 * ata_sff_interrupt - Default SFF ATA host interrupt handler
1655 * @irq: irq line (unused)
1656 * @dev_instance: pointer to our ata_host information structure
1657 *
1658 * Default interrupt handler for PCI IDE devices. Calls
1659 * ata_sff_port_intr() for each port that is not disabled.
1660 *
1661 * LOCKING:
1662 * Obtains host lock during operation.
1663 *
1664 * RETURNS:
1665 * IRQ_NONE or IRQ_HANDLED.
1666 */
1667 irqreturn_t ata_sff_interrupt(int irq, void *dev_instance)
1668 {
1669 return __ata_sff_interrupt(irq, dev_instance, ata_sff_port_intr);
1670 }
1671 EXPORT_SYMBOL_GPL(ata_sff_interrupt);
1672
1673 /**
1674 * ata_sff_lost_interrupt - Check for an apparent lost interrupt
1675 * @ap: port that appears to have timed out
1676 *
1677 * Called from the libata error handlers when the core code suspects
1678 * an interrupt has been lost. If it has complete anything we can and
1679 * then return. Interface must support altstatus for this faster
1680 * recovery to occur.
1681 *
1682 * Locking:
1683 * Caller holds host lock
1684 */
1685
1686 void ata_sff_lost_interrupt(struct ata_port *ap)
1687 {
1688 u8 status;
1689 struct ata_queued_cmd *qc;
1690
1691 /* Only one outstanding command per SFF channel */
1692 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1693 /* We cannot lose an interrupt on a non-existent or polled command */
1694 if (!qc || qc->tf.flags & ATA_TFLAG_POLLING)
1695 return;
1696 /* See if the controller thinks it is still busy - if so the command
1697 isn't a lost IRQ but is still in progress */
1698 status = ata_sff_altstatus(ap);
1699 if (status & ATA_BUSY)
1700 return;
1701
1702 /* There was a command running, we are no longer busy and we have
1703 no interrupt. */
1704 ata_port_printk(ap, KERN_WARNING, "lost interrupt (Status 0x%x)\n",
1705 status);
1706 /* Run the host interrupt logic as if the interrupt had not been
1707 lost */
1708 ata_sff_port_intr(ap, qc);
1709 }
1710 EXPORT_SYMBOL_GPL(ata_sff_lost_interrupt);
1711
1712 /**
1713 * ata_sff_freeze - Freeze SFF controller port
1714 * @ap: port to freeze
1715 *
1716 * Freeze SFF controller port.
1717 *
1718 * LOCKING:
1719 * Inherited from caller.
1720 */
1721 void ata_sff_freeze(struct ata_port *ap)
1722 {
1723 ap->ctl |= ATA_NIEN;
1724 ap->last_ctl = ap->ctl;
1725
1726 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr)
1727 ata_sff_set_devctl(ap, ap->ctl);
1728
1729 /* Under certain circumstances, some controllers raise IRQ on
1730 * ATA_NIEN manipulation. Also, many controllers fail to mask
1731 * previously pending IRQ on ATA_NIEN assertion. Clear it.
1732 */
1733 ap->ops->sff_check_status(ap);
1734
1735 if (ap->ops->sff_irq_clear)
1736 ap->ops->sff_irq_clear(ap);
1737 }
1738 EXPORT_SYMBOL_GPL(ata_sff_freeze);
1739
1740 /**
1741 * ata_sff_thaw - Thaw SFF controller port
1742 * @ap: port to thaw
1743 *
1744 * Thaw SFF controller port.
1745 *
1746 * LOCKING:
1747 * Inherited from caller.
1748 */
1749 void ata_sff_thaw(struct ata_port *ap)
1750 {
1751 /* clear & re-enable interrupts */
1752 ap->ops->sff_check_status(ap);
1753 if (ap->ops->sff_irq_clear)
1754 ap->ops->sff_irq_clear(ap);
1755 ata_sff_irq_on(ap);
1756 }
1757 EXPORT_SYMBOL_GPL(ata_sff_thaw);
1758
1759 /**
1760 * ata_sff_prereset - prepare SFF link for reset
1761 * @link: SFF link to be reset
1762 * @deadline: deadline jiffies for the operation
1763 *
1764 * SFF link @link is about to be reset. Initialize it. It first
1765 * calls ata_std_prereset() and wait for !BSY if the port is
1766 * being softreset.
1767 *
1768 * LOCKING:
1769 * Kernel thread context (may sleep)
1770 *
1771 * RETURNS:
1772 * 0 on success, -errno otherwise.
1773 */
1774 int ata_sff_prereset(struct ata_link *link, unsigned long deadline)
1775 {
1776 struct ata_eh_context *ehc = &link->eh_context;
1777 int rc;
1778
1779 rc = ata_std_prereset(link, deadline);
1780 if (rc)
1781 return rc;
1782
1783 /* if we're about to do hardreset, nothing more to do */
1784 if (ehc->i.action & ATA_EH_HARDRESET)
1785 return 0;
1786
1787 /* wait for !BSY if we don't know that no device is attached */
1788 if (!ata_link_offline(link)) {
1789 rc = ata_sff_wait_ready(link, deadline);
1790 if (rc && rc != -ENODEV) {
1791 ata_link_printk(link, KERN_WARNING, "device not ready "
1792 "(errno=%d), forcing hardreset\n", rc);
1793 ehc->i.action |= ATA_EH_HARDRESET;
1794 }
1795 }
1796
1797 return 0;
1798 }
1799 EXPORT_SYMBOL_GPL(ata_sff_prereset);
1800
1801 /**
1802 * ata_devchk - PATA device presence detection
1803 * @ap: ATA channel to examine
1804 * @device: Device to examine (starting at zero)
1805 *
1806 * This technique was originally described in
1807 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1808 * later found its way into the ATA/ATAPI spec.
1809 *
1810 * Write a pattern to the ATA shadow registers,
1811 * and if a device is present, it will respond by
1812 * correctly storing and echoing back the
1813 * ATA shadow register contents.
1814 *
1815 * LOCKING:
1816 * caller.
1817 */
1818 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1819 {
1820 struct ata_ioports *ioaddr = &ap->ioaddr;
1821 u8 nsect, lbal;
1822
1823 ap->ops->sff_dev_select(ap, device);
1824
1825 iowrite8(0x55, ioaddr->nsect_addr);
1826 iowrite8(0xaa, ioaddr->lbal_addr);
1827
1828 iowrite8(0xaa, ioaddr->nsect_addr);
1829 iowrite8(0x55, ioaddr->lbal_addr);
1830
1831 iowrite8(0x55, ioaddr->nsect_addr);
1832 iowrite8(0xaa, ioaddr->lbal_addr);
1833
1834 nsect = ioread8(ioaddr->nsect_addr);
1835 lbal = ioread8(ioaddr->lbal_addr);
1836
1837 if ((nsect == 0x55) && (lbal == 0xaa))
1838 return 1; /* we found a device */
1839
1840 return 0; /* nothing found */
1841 }
1842
1843 /**
1844 * ata_sff_dev_classify - Parse returned ATA device signature
1845 * @dev: ATA device to classify (starting at zero)
1846 * @present: device seems present
1847 * @r_err: Value of error register on completion
1848 *
1849 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1850 * an ATA/ATAPI-defined set of values is placed in the ATA
1851 * shadow registers, indicating the results of device detection
1852 * and diagnostics.
1853 *
1854 * Select the ATA device, and read the values from the ATA shadow
1855 * registers. Then parse according to the Error register value,
1856 * and the spec-defined values examined by ata_dev_classify().
1857 *
1858 * LOCKING:
1859 * caller.
1860 *
1861 * RETURNS:
1862 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1863 */
1864 unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
1865 u8 *r_err)
1866 {
1867 struct ata_port *ap = dev->link->ap;
1868 struct ata_taskfile tf;
1869 unsigned int class;
1870 u8 err;
1871
1872 ap->ops->sff_dev_select(ap, dev->devno);
1873
1874 memset(&tf, 0, sizeof(tf));
1875
1876 ap->ops->sff_tf_read(ap, &tf);
1877 err = tf.feature;
1878 if (r_err)
1879 *r_err = err;
1880
1881 /* see if device passed diags: continue and warn later */
1882 if (err == 0)
1883 /* diagnostic fail : do nothing _YET_ */
1884 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1885 else if (err == 1)
1886 /* do nothing */ ;
1887 else if ((dev->devno == 0) && (err == 0x81))
1888 /* do nothing */ ;
1889 else
1890 return ATA_DEV_NONE;
1891
1892 /* determine if device is ATA or ATAPI */
1893 class = ata_dev_classify(&tf);
1894
1895 if (class == ATA_DEV_UNKNOWN) {
1896 /* If the device failed diagnostic, it's likely to
1897 * have reported incorrect device signature too.
1898 * Assume ATA device if the device seems present but
1899 * device signature is invalid with diagnostic
1900 * failure.
1901 */
1902 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1903 class = ATA_DEV_ATA;
1904 else
1905 class = ATA_DEV_NONE;
1906 } else if ((class == ATA_DEV_ATA) &&
1907 (ap->ops->sff_check_status(ap) == 0))
1908 class = ATA_DEV_NONE;
1909
1910 return class;
1911 }
1912 EXPORT_SYMBOL_GPL(ata_sff_dev_classify);
1913
1914 /**
1915 * ata_sff_wait_after_reset - wait for devices to become ready after reset
1916 * @link: SFF link which is just reset
1917 * @devmask: mask of present devices
1918 * @deadline: deadline jiffies for the operation
1919 *
1920 * Wait devices attached to SFF @link to become ready after
1921 * reset. It contains preceding 150ms wait to avoid accessing TF
1922 * status register too early.
1923 *
1924 * LOCKING:
1925 * Kernel thread context (may sleep).
1926 *
1927 * RETURNS:
1928 * 0 on success, -ENODEV if some or all of devices in @devmask
1929 * don't seem to exist. -errno on other errors.
1930 */
1931 int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask,
1932 unsigned long deadline)
1933 {
1934 struct ata_port *ap = link->ap;
1935 struct ata_ioports *ioaddr = &ap->ioaddr;
1936 unsigned int dev0 = devmask & (1 << 0);
1937 unsigned int dev1 = devmask & (1 << 1);
1938 int rc, ret = 0;
1939
1940 ata_msleep(ap, ATA_WAIT_AFTER_RESET);
1941
1942 /* always check readiness of the master device */
1943 rc = ata_sff_wait_ready(link, deadline);
1944 /* -ENODEV means the odd clown forgot the D7 pulldown resistor
1945 * and TF status is 0xff, bail out on it too.
1946 */
1947 if (rc)
1948 return rc;
1949
1950 /* if device 1 was found in ata_devchk, wait for register
1951 * access briefly, then wait for BSY to clear.
1952 */
1953 if (dev1) {
1954 int i;
1955
1956 ap->ops->sff_dev_select(ap, 1);
1957
1958 /* Wait for register access. Some ATAPI devices fail
1959 * to set nsect/lbal after reset, so don't waste too
1960 * much time on it. We're gonna wait for !BSY anyway.
1961 */
1962 for (i = 0; i < 2; i++) {
1963 u8 nsect, lbal;
1964
1965 nsect = ioread8(ioaddr->nsect_addr);
1966 lbal = ioread8(ioaddr->lbal_addr);
1967 if ((nsect == 1) && (lbal == 1))
1968 break;
1969 ata_msleep(ap, 50); /* give drive a breather */
1970 }
1971
1972 rc = ata_sff_wait_ready(link, deadline);
1973 if (rc) {
1974 if (rc != -ENODEV)
1975 return rc;
1976 ret = rc;
1977 }
1978 }
1979
1980 /* is all this really necessary? */
1981 ap->ops->sff_dev_select(ap, 0);
1982 if (dev1)
1983 ap->ops->sff_dev_select(ap, 1);
1984 if (dev0)
1985 ap->ops->sff_dev_select(ap, 0);
1986
1987 return ret;
1988 }
1989 EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset);
1990
1991 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
1992 unsigned long deadline)
1993 {
1994 struct ata_ioports *ioaddr = &ap->ioaddr;
1995
1996 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1997
1998 /* software reset. causes dev0 to be selected */
1999 iowrite8(ap->ctl, ioaddr->ctl_addr);
2000 udelay(20); /* FIXME: flush */
2001 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2002 udelay(20); /* FIXME: flush */
2003 iowrite8(ap->ctl, ioaddr->ctl_addr);
2004 ap->last_ctl = ap->ctl;
2005
2006 /* wait the port to become ready */
2007 return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
2008 }
2009
2010 /**
2011 * ata_sff_softreset - reset host port via ATA SRST
2012 * @link: ATA link to reset
2013 * @classes: resulting classes of attached devices
2014 * @deadline: deadline jiffies for the operation
2015 *
2016 * Reset host port using ATA SRST.
2017 *
2018 * LOCKING:
2019 * Kernel thread context (may sleep)
2020 *
2021 * RETURNS:
2022 * 0 on success, -errno otherwise.
2023 */
2024 int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
2025 unsigned long deadline)
2026 {
2027 struct ata_port *ap = link->ap;
2028 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2029 unsigned int devmask = 0;
2030 int rc;
2031 u8 err;
2032
2033 DPRINTK("ENTER\n");
2034
2035 /* determine if device 0/1 are present */
2036 if (ata_devchk(ap, 0))
2037 devmask |= (1 << 0);
2038 if (slave_possible && ata_devchk(ap, 1))
2039 devmask |= (1 << 1);
2040
2041 /* select device 0 again */
2042 ap->ops->sff_dev_select(ap, 0);
2043
2044 /* issue bus reset */
2045 DPRINTK("about to softreset, devmask=%x\n", devmask);
2046 rc = ata_bus_softreset(ap, devmask, deadline);
2047 /* if link is occupied, -ENODEV too is an error */
2048 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
2049 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
2050 return rc;
2051 }
2052
2053 /* determine by signature whether we have ATA or ATAPI devices */
2054 classes[0] = ata_sff_dev_classify(&link->device[0],
2055 devmask & (1 << 0), &err);
2056 if (slave_possible && err != 0x81)
2057 classes[1] = ata_sff_dev_classify(&link->device[1],
2058 devmask & (1 << 1), &err);
2059
2060 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2061 return 0;
2062 }
2063 EXPORT_SYMBOL_GPL(ata_sff_softreset);
2064
2065 /**
2066 * sata_sff_hardreset - reset host port via SATA phy reset
2067 * @link: link to reset
2068 * @class: resulting class of attached device
2069 * @deadline: deadline jiffies for the operation
2070 *
2071 * SATA phy-reset host port using DET bits of SControl register,
2072 * wait for !BSY and classify the attached device.
2073 *
2074 * LOCKING:
2075 * Kernel thread context (may sleep)
2076 *
2077 * RETURNS:
2078 * 0 on success, -errno otherwise.
2079 */
2080 int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
2081 unsigned long deadline)
2082 {
2083 struct ata_eh_context *ehc = &link->eh_context;
2084 const unsigned long *timing = sata_ehc_deb_timing(ehc);
2085 bool online;
2086 int rc;
2087
2088 rc = sata_link_hardreset(link, timing, deadline, &online,
2089 ata_sff_check_ready);
2090 if (online)
2091 *class = ata_sff_dev_classify(link->device, 1, NULL);
2092
2093 DPRINTK("EXIT, class=%u\n", *class);
2094 return rc;
2095 }
2096 EXPORT_SYMBOL_GPL(sata_sff_hardreset);
2097
2098 /**
2099 * ata_sff_postreset - SFF postreset callback
2100 * @link: the target SFF ata_link
2101 * @classes: classes of attached devices
2102 *
2103 * This function is invoked after a successful reset. It first
2104 * calls ata_std_postreset() and performs SFF specific postreset
2105 * processing.
2106 *
2107 * LOCKING:
2108 * Kernel thread context (may sleep)
2109 */
2110 void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
2111 {
2112 struct ata_port *ap = link->ap;
2113
2114 ata_std_postreset(link, classes);
2115
2116 /* is double-select really necessary? */
2117 if (classes[0] != ATA_DEV_NONE)
2118 ap->ops->sff_dev_select(ap, 1);
2119 if (classes[1] != ATA_DEV_NONE)
2120 ap->ops->sff_dev_select(ap, 0);
2121
2122 /* bail out if no device is present */
2123 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2124 DPRINTK("EXIT, no device\n");
2125 return;
2126 }
2127
2128 /* set up device control */
2129 if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
2130 ata_sff_set_devctl(ap, ap->ctl);
2131 ap->last_ctl = ap->ctl;
2132 }
2133 }
2134 EXPORT_SYMBOL_GPL(ata_sff_postreset);
2135
2136 /**
2137 * ata_sff_drain_fifo - Stock FIFO drain logic for SFF controllers
2138 * @qc: command
2139 *
2140 * Drain the FIFO and device of any stuck data following a command
2141 * failing to complete. In some cases this is necessary before a
2142 * reset will recover the device.
2143 *
2144 */
2145
2146 void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
2147 {
2148 int count;
2149 struct ata_port *ap;
2150
2151 /* We only need to flush incoming data when a command was running */
2152 if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
2153 return;
2154
2155 ap = qc->ap;
2156 /* Drain up to 64K of data before we give up this recovery method */
2157 for (count = 0; (ap->ops->sff_check_status(ap) & ATA_DRQ)
2158 && count < 65536; count += 2)
2159 ioread16(ap->ioaddr.data_addr);
2160
2161 /* Can become DEBUG later */
2162 if (count)
2163 ata_port_printk(ap, KERN_DEBUG,
2164 "drained %d bytes to clear DRQ.\n", count);
2165
2166 }
2167 EXPORT_SYMBOL_GPL(ata_sff_drain_fifo);
2168
2169 /**
2170 * ata_sff_error_handler - Stock error handler for SFF controller
2171 * @ap: port to handle error for
2172 *
2173 * Stock error handler for SFF controller. It can handle both
2174 * PATA and SATA controllers. Many controllers should be able to
2175 * use this EH as-is or with some added handling before and
2176 * after.
2177 *
2178 * LOCKING:
2179 * Kernel thread context (may sleep)
2180 */
2181 void ata_sff_error_handler(struct ata_port *ap)
2182 {
2183 ata_reset_fn_t softreset = ap->ops->softreset;
2184 ata_reset_fn_t hardreset = ap->ops->hardreset;
2185 struct ata_queued_cmd *qc;
2186 unsigned long flags;
2187
2188 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2189 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2190 qc = NULL;
2191
2192 spin_lock_irqsave(ap->lock, flags);
2193
2194 /*
2195 * We *MUST* do FIFO draining before we issue a reset as
2196 * several devices helpfully clear their internal state and
2197 * will lock solid if we touch the data port post reset. Pass
2198 * qc in case anyone wants to do different PIO/DMA recovery or
2199 * has per command fixups
2200 */
2201 if (ap->ops->sff_drain_fifo)
2202 ap->ops->sff_drain_fifo(qc);
2203
2204 spin_unlock_irqrestore(ap->lock, flags);
2205
2206 /* ignore ata_sff_softreset if ctl isn't accessible */
2207 if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr)
2208 softreset = NULL;
2209
2210 /* ignore built-in hardresets if SCR access is not available */
2211 if ((hardreset == sata_std_hardreset ||
2212 hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link))
2213 hardreset = NULL;
2214
2215 ata_do_eh(ap, ap->ops->prereset, softreset, hardreset,
2216 ap->ops->postreset);
2217 }
2218 EXPORT_SYMBOL_GPL(ata_sff_error_handler);
2219
2220 /**
2221 * ata_sff_std_ports - initialize ioaddr with standard port offsets.
2222 * @ioaddr: IO address structure to be initialized
2223 *
2224 * Utility function which initializes data_addr, error_addr,
2225 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
2226 * device_addr, status_addr, and command_addr to standard offsets
2227 * relative to cmd_addr.
2228 *
2229 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
2230 */
2231 void ata_sff_std_ports(struct ata_ioports *ioaddr)
2232 {
2233 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
2234 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
2235 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
2236 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
2237 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
2238 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
2239 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
2240 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
2241 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
2242 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
2243 }
2244 EXPORT_SYMBOL_GPL(ata_sff_std_ports);
2245
2246 #ifdef CONFIG_PCI
2247
2248 static int ata_resources_present(struct pci_dev *pdev, int port)
2249 {
2250 int i;
2251
2252 /* Check the PCI resources for this channel are enabled */
2253 port = port * 2;
2254 for (i = 0; i < 2; i++) {
2255 if (pci_resource_start(pdev, port + i) == 0 ||
2256 pci_resource_len(pdev, port + i) == 0)
2257 return 0;
2258 }
2259 return 1;
2260 }
2261
2262 /**
2263 * ata_pci_sff_init_host - acquire native PCI ATA resources and init host
2264 * @host: target ATA host
2265 *
2266 * Acquire native PCI ATA resources for @host and initialize the
2267 * first two ports of @host accordingly. Ports marked dummy are
2268 * skipped and allocation failure makes the port dummy.
2269 *
2270 * Note that native PCI resources are valid even for legacy hosts
2271 * as we fix up pdev resources array early in boot, so this
2272 * function can be used for both native and legacy SFF hosts.
2273 *
2274 * LOCKING:
2275 * Inherited from calling layer (may sleep).
2276 *
2277 * RETURNS:
2278 * 0 if at least one port is initialized, -ENODEV if no port is
2279 * available.
2280 */
2281 int ata_pci_sff_init_host(struct ata_host *host)
2282 {
2283 struct device *gdev = host->dev;
2284 struct pci_dev *pdev = to_pci_dev(gdev);
2285 unsigned int mask = 0;
2286 int i, rc;
2287
2288 /* request, iomap BARs and init port addresses accordingly */
2289 for (i = 0; i < 2; i++) {
2290 struct ata_port *ap = host->ports[i];
2291 int base = i * 2;
2292 void __iomem * const *iomap;
2293
2294 if (ata_port_is_dummy(ap))
2295 continue;
2296
2297 /* Discard disabled ports. Some controllers show
2298 * their unused channels this way. Disabled ports are
2299 * made dummy.
2300 */
2301 if (!ata_resources_present(pdev, i)) {
2302 ap->ops = &ata_dummy_port_ops;
2303 continue;
2304 }
2305
2306 rc = pcim_iomap_regions(pdev, 0x3 << base,
2307 dev_driver_string(gdev));
2308 if (rc) {
2309 dev_printk(KERN_WARNING, gdev,
2310 "failed to request/iomap BARs for port %d "
2311 "(errno=%d)\n", i, rc);
2312 if (rc == -EBUSY)
2313 pcim_pin_device(pdev);
2314 ap->ops = &ata_dummy_port_ops;
2315 continue;
2316 }
2317 host->iomap = iomap = pcim_iomap_table(pdev);
2318
2319 ap->ioaddr.cmd_addr = iomap[base];
2320 ap->ioaddr.altstatus_addr =
2321 ap->ioaddr.ctl_addr = (void __iomem *)
2322 ((unsigned long)iomap[base + 1] | ATA_PCI_CTL_OFS);
2323 ata_sff_std_ports(&ap->ioaddr);
2324
2325 ata_port_desc(ap, "cmd 0x%llx ctl 0x%llx",
2326 (unsigned long long)pci_resource_start(pdev, base),
2327 (unsigned long long)pci_resource_start(pdev, base + 1));
2328
2329 mask |= 1 << i;
2330 }
2331
2332 if (!mask) {
2333 dev_printk(KERN_ERR, gdev, "no available native port\n");
2334 return -ENODEV;
2335 }
2336
2337 return 0;
2338 }
2339 EXPORT_SYMBOL_GPL(ata_pci_sff_init_host);
2340
2341 /**
2342 * ata_pci_sff_prepare_host - helper to prepare PCI PIO-only SFF ATA host
2343 * @pdev: target PCI device
2344 * @ppi: array of port_info, must be enough for two ports
2345 * @r_host: out argument for the initialized ATA host
2346 *
2347 * Helper to allocate PIO-only SFF ATA host for @pdev, acquire
2348 * all PCI resources and initialize it accordingly in one go.
2349 *
2350 * LOCKING:
2351 * Inherited from calling layer (may sleep).
2352 *
2353 * RETURNS:
2354 * 0 on success, -errno otherwise.
2355 */
2356 int ata_pci_sff_prepare_host(struct pci_dev *pdev,
2357 const struct ata_port_info * const *ppi,
2358 struct ata_host **r_host)
2359 {
2360 struct ata_host *host;
2361 int rc;
2362
2363 if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
2364 return -ENOMEM;
2365
2366 host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
2367 if (!host) {
2368 dev_printk(KERN_ERR, &pdev->dev,
2369 "failed to allocate ATA host\n");
2370 rc = -ENOMEM;
2371 goto err_out;
2372 }
2373
2374 rc = ata_pci_sff_init_host(host);
2375 if (rc)
2376 goto err_out;
2377
2378 devres_remove_group(&pdev->dev, NULL);
2379 *r_host = host;
2380 return 0;
2381
2382 err_out:
2383 devres_release_group(&pdev->dev, NULL);
2384 return rc;
2385 }
2386 EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host);
2387
2388 /**
2389 * ata_pci_sff_activate_host - start SFF host, request IRQ and register it
2390 * @host: target SFF ATA host
2391 * @irq_handler: irq_handler used when requesting IRQ(s)
2392 * @sht: scsi_host_template to use when registering the host
2393 *
2394 * This is the counterpart of ata_host_activate() for SFF ATA
2395 * hosts. This separate helper is necessary because SFF hosts
2396 * use two separate interrupts in legacy mode.
2397 *
2398 * LOCKING:
2399 * Inherited from calling layer (may sleep).
2400 *
2401 * RETURNS:
2402 * 0 on success, -errno otherwise.
2403 */
2404 int ata_pci_sff_activate_host(struct ata_host *host,
2405 irq_handler_t irq_handler,
2406 struct scsi_host_template *sht)
2407 {
2408 struct device *dev = host->dev;
2409 struct pci_dev *pdev = to_pci_dev(dev);
2410 const char *drv_name = dev_driver_string(host->dev);
2411 int legacy_mode = 0, rc;
2412
2413 rc = ata_host_start(host);
2414 if (rc)
2415 return rc;
2416
2417 if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
2418 u8 tmp8, mask;
2419
2420 /* TODO: What if one channel is in native mode ... */
2421 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
2422 mask = (1 << 2) | (1 << 0);
2423 if ((tmp8 & mask) != mask)
2424 legacy_mode = 1;
2425 #if defined(CONFIG_NO_ATA_LEGACY)
2426 /* Some platforms with PCI limits cannot address compat
2427 port space. In that case we punt if their firmware has
2428 left a device in compatibility mode */
2429 if (legacy_mode) {
2430 printk(KERN_ERR "ata: Compatibility mode ATA is not supported on this platform, skipping.\n");
2431 return -EOPNOTSUPP;
2432 }
2433 #endif
2434 }
2435
2436 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2437 return -ENOMEM;
2438
2439 if (!legacy_mode && pdev->irq) {
2440 rc = devm_request_irq(dev, pdev->irq, irq_handler,
2441 IRQF_SHARED, drv_name, host);
2442 if (rc)
2443 goto out;
2444
2445 ata_port_desc(host->ports[0], "irq %d", pdev->irq);
2446 ata_port_desc(host->ports[1], "irq %d", pdev->irq);
2447 } else if (legacy_mode) {
2448 if (!ata_port_is_dummy(host->ports[0])) {
2449 rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
2450 irq_handler, IRQF_SHARED,
2451 drv_name, host);
2452 if (rc)
2453 goto out;
2454
2455 ata_port_desc(host->ports[0], "irq %d",
2456 ATA_PRIMARY_IRQ(pdev));
2457 }
2458
2459 if (!ata_port_is_dummy(host->ports[1])) {
2460 rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
2461 irq_handler, IRQF_SHARED,
2462 drv_name, host);
2463 if (rc)
2464 goto out;
2465
2466 ata_port_desc(host->ports[1], "irq %d",
2467 ATA_SECONDARY_IRQ(pdev));
2468 }
2469 }
2470
2471 rc = ata_host_register(host, sht);
2472 out:
2473 if (rc == 0)
2474 devres_remove_group(dev, NULL);
2475 else
2476 devres_release_group(dev, NULL);
2477
2478 return rc;
2479 }
2480 EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host);
2481
2482 static const struct ata_port_info *ata_sff_find_valid_pi(
2483 const struct ata_port_info * const *ppi)
2484 {
2485 int i;
2486
2487 /* look up the first valid port_info */
2488 for (i = 0; i < 2 && ppi[i]; i++)
2489 if (ppi[i]->port_ops != &ata_dummy_port_ops)
2490 return ppi[i];
2491
2492 return NULL;
2493 }
2494
2495 /**
2496 * ata_pci_sff_init_one - Initialize/register PIO-only PCI IDE controller
2497 * @pdev: Controller to be initialized
2498 * @ppi: array of port_info, must be enough for two ports
2499 * @sht: scsi_host_template to use when registering the host
2500 * @host_priv: host private_data
2501 * @hflag: host flags
2502 *
2503 * This is a helper function which can be called from a driver's
2504 * xxx_init_one() probe function if the hardware uses traditional
2505 * IDE taskfile registers and is PIO only.
2506 *
2507 * ASSUMPTION:
2508 * Nobody makes a single channel controller that appears solely as
2509 * the secondary legacy port on PCI.
2510 *
2511 * LOCKING:
2512 * Inherited from PCI layer (may sleep).
2513 *
2514 * RETURNS:
2515 * Zero on success, negative on errno-based value on error.
2516 */
2517 int ata_pci_sff_init_one(struct pci_dev *pdev,
2518 const struct ata_port_info * const *ppi,
2519 struct scsi_host_template *sht, void *host_priv, int hflag)
2520 {
2521 struct device *dev = &pdev->dev;
2522 const struct ata_port_info *pi;
2523 struct ata_host *host = NULL;
2524 int rc;
2525
2526 DPRINTK("ENTER\n");
2527
2528 pi = ata_sff_find_valid_pi(ppi);
2529 if (!pi) {
2530 dev_printk(KERN_ERR, &pdev->dev,
2531 "no valid port_info specified\n");
2532 return -EINVAL;
2533 }
2534
2535 if (!devres_open_group(dev, NULL, GFP_KERNEL))
2536 return -ENOMEM;
2537
2538 rc = pcim_enable_device(pdev);
2539 if (rc)
2540 goto out;
2541
2542 /* prepare and activate SFF host */
2543 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2544 if (rc)
2545 goto out;
2546 host->private_data = host_priv;
2547 host->flags |= hflag;
2548
2549 rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht);
2550 out:
2551 if (rc == 0)
2552 devres_remove_group(&pdev->dev, NULL);
2553 else
2554 devres_release_group(&pdev->dev, NULL);
2555
2556 return rc;
2557 }
2558 EXPORT_SYMBOL_GPL(ata_pci_sff_init_one);
2559
2560 #endif /* CONFIG_PCI */
2561
2562 /*
2563 * BMDMA support
2564 */
2565
2566 #ifdef CONFIG_ATA_BMDMA
2567
2568 const struct ata_port_operations ata_bmdma_port_ops = {
2569 .inherits = &ata_sff_port_ops,
2570
2571 .error_handler = ata_bmdma_error_handler,
2572 .post_internal_cmd = ata_bmdma_post_internal_cmd,
2573
2574 .qc_prep = ata_bmdma_qc_prep,
2575 .qc_issue = ata_bmdma_qc_issue,
2576
2577 .sff_irq_clear = ata_bmdma_irq_clear,
2578 .bmdma_setup = ata_bmdma_setup,
2579 .bmdma_start = ata_bmdma_start,
2580 .bmdma_stop = ata_bmdma_stop,
2581 .bmdma_status = ata_bmdma_status,
2582
2583 .port_start = ata_bmdma_port_start,
2584 };
2585 EXPORT_SYMBOL_GPL(ata_bmdma_port_ops);
2586
2587 const struct ata_port_operations ata_bmdma32_port_ops = {
2588 .inherits = &ata_bmdma_port_ops,
2589
2590 .sff_data_xfer = ata_sff_data_xfer32,
2591 .port_start = ata_bmdma_port_start32,
2592 };
2593 EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops);
2594
2595 /**
2596 * ata_bmdma_fill_sg - Fill PCI IDE PRD table
2597 * @qc: Metadata associated with taskfile to be transferred
2598 *
2599 * Fill PCI IDE PRD (scatter-gather) table with segments
2600 * associated with the current disk command.
2601 *
2602 * LOCKING:
2603 * spin_lock_irqsave(host lock)
2604 *
2605 */
2606 static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
2607 {
2608 struct ata_port *ap = qc->ap;
2609 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2610 struct scatterlist *sg;
2611 unsigned int si, pi;
2612
2613 pi = 0;
2614 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2615 u32 addr, offset;
2616 u32 sg_len, len;
2617
2618 /* determine if physical DMA addr spans 64K boundary.
2619 * Note h/w doesn't support 64-bit, so we unconditionally
2620 * truncate dma_addr_t to u32.
2621 */
2622 addr = (u32) sg_dma_address(sg);
2623 sg_len = sg_dma_len(sg);
2624
2625 while (sg_len) {
2626 offset = addr & 0xffff;
2627 len = sg_len;
2628 if ((offset + sg_len) > 0x10000)
2629 len = 0x10000 - offset;
2630
2631 prd[pi].addr = cpu_to_le32(addr);
2632 prd[pi].flags_len = cpu_to_le32(len & 0xffff);
2633 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2634
2635 pi++;
2636 sg_len -= len;
2637 addr += len;
2638 }
2639 }
2640
2641 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2642 }
2643
2644 /**
2645 * ata_bmdma_fill_sg_dumb - Fill PCI IDE PRD table
2646 * @qc: Metadata associated with taskfile to be transferred
2647 *
2648 * Fill PCI IDE PRD (scatter-gather) table with segments
2649 * associated with the current disk command. Perform the fill
2650 * so that we avoid writing any length 64K records for
2651 * controllers that don't follow the spec.
2652 *
2653 * LOCKING:
2654 * spin_lock_irqsave(host lock)
2655 *
2656 */
2657 static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
2658 {
2659 struct ata_port *ap = qc->ap;
2660 struct ata_bmdma_prd *prd = ap->bmdma_prd;
2661 struct scatterlist *sg;
2662 unsigned int si, pi;
2663
2664 pi = 0;
2665 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2666 u32 addr, offset;
2667 u32 sg_len, len, blen;
2668
2669 /* determine if physical DMA addr spans 64K boundary.
2670 * Note h/w doesn't support 64-bit, so we unconditionally
2671 * truncate dma_addr_t to u32.
2672 */
2673 addr = (u32) sg_dma_address(sg);
2674 sg_len = sg_dma_len(sg);
2675
2676 while (sg_len) {
2677 offset = addr & 0xffff;
2678 len = sg_len;
2679 if ((offset + sg_len) > 0x10000)
2680 len = 0x10000 - offset;
2681
2682 blen = len & 0xffff;
2683 prd[pi].addr = cpu_to_le32(addr);
2684 if (blen == 0) {
2685 /* Some PATA chipsets like the CS5530 can't
2686 cope with 0x0000 meaning 64K as the spec
2687 says */
2688 prd[pi].flags_len = cpu_to_le32(0x8000);
2689 blen = 0x8000;
2690 prd[++pi].addr = cpu_to_le32(addr + 0x8000);
2691 }
2692 prd[pi].flags_len = cpu_to_le32(blen);
2693 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
2694
2695 pi++;
2696 sg_len -= len;
2697 addr += len;
2698 }
2699 }
2700
2701 prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2702 }
2703
2704 /**
2705 * ata_bmdma_qc_prep - Prepare taskfile for submission
2706 * @qc: Metadata associated with taskfile to be prepared
2707 *
2708 * Prepare ATA taskfile for submission.
2709 *
2710 * LOCKING:
2711 * spin_lock_irqsave(host lock)
2712 */
2713 void ata_bmdma_qc_prep(struct ata_queued_cmd *qc)
2714 {
2715 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2716 return;
2717
2718 ata_bmdma_fill_sg(qc);
2719 }
2720 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep);
2721
2722 /**
2723 * ata_bmdma_dumb_qc_prep - Prepare taskfile for submission
2724 * @qc: Metadata associated with taskfile to be prepared
2725 *
2726 * Prepare ATA taskfile for submission.
2727 *
2728 * LOCKING:
2729 * spin_lock_irqsave(host lock)
2730 */
2731 void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc)
2732 {
2733 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2734 return;
2735
2736 ata_bmdma_fill_sg_dumb(qc);
2737 }
2738 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep);
2739
2740 /**
2741 * ata_bmdma_qc_issue - issue taskfile to a BMDMA controller
2742 * @qc: command to issue to device
2743 *
2744 * This function issues a PIO, NODATA or DMA command to a
2745 * SFF/BMDMA controller. PIO and NODATA are handled by
2746 * ata_sff_qc_issue().
2747 *
2748 * LOCKING:
2749 * spin_lock_irqsave(host lock)
2750 *
2751 * RETURNS:
2752 * Zero on success, AC_ERR_* mask on failure
2753 */
2754 unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
2755 {
2756 struct ata_port *ap = qc->ap;
2757 struct ata_link *link = qc->dev->link;
2758
2759 /* defer PIO handling to sff_qc_issue */
2760 if (!ata_is_dma(qc->tf.protocol))
2761 return ata_sff_qc_issue(qc);
2762
2763 /* select the device */
2764 ata_dev_select(ap, qc->dev->devno, 1, 0);
2765
2766 /* start the command */
2767 switch (qc->tf.protocol) {
2768 case ATA_PROT_DMA:
2769 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2770
2771 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2772 ap->ops->bmdma_setup(qc); /* set up bmdma */
2773 ap->ops->bmdma_start(qc); /* initiate bmdma */
2774 ap->hsm_task_state = HSM_ST_LAST;
2775 break;
2776
2777 case ATAPI_PROT_DMA:
2778 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
2779
2780 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2781 ap->ops->bmdma_setup(qc); /* set up bmdma */
2782 ap->hsm_task_state = HSM_ST_FIRST;
2783
2784 /* send cdb by polling if no cdb interrupt */
2785 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
2786 ata_sff_queue_pio_task(link, 0);
2787 break;
2788
2789 default:
2790 WARN_ON(1);
2791 return AC_ERR_SYSTEM;
2792 }
2793
2794 return 0;
2795 }
2796 EXPORT_SYMBOL_GPL(ata_bmdma_qc_issue);
2797
2798 /**
2799 * ata_bmdma_port_intr - Handle BMDMA port interrupt
2800 * @ap: Port on which interrupt arrived (possibly...)
2801 * @qc: Taskfile currently active in engine
2802 *
2803 * Handle port interrupt for given queued command.
2804 *
2805 * LOCKING:
2806 * spin_lock_irqsave(host lock)
2807 *
2808 * RETURNS:
2809 * One if interrupt was handled, zero if not (shared irq).
2810 */
2811 unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
2812 {
2813 struct ata_eh_info *ehi = &ap->link.eh_info;
2814 u8 host_stat = 0;
2815 bool bmdma_stopped = false;
2816 unsigned int handled;
2817
2818 if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
2819 /* check status of DMA engine */
2820 host_stat = ap->ops->bmdma_status(ap);
2821 VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
2822
2823 /* if it's not our irq... */
2824 if (!(host_stat & ATA_DMA_INTR))
2825 return ata_sff_idle_irq(ap);
2826
2827 /* before we do anything else, clear DMA-Start bit */
2828 ap->ops->bmdma_stop(qc);
2829 bmdma_stopped = true;
2830
2831 if (unlikely(host_stat & ATA_DMA_ERR)) {
2832 /* error when transfering data to/from memory */
2833 qc->err_mask |= AC_ERR_HOST_BUS;
2834 ap->hsm_task_state = HSM_ST_ERR;
2835 }
2836 }
2837
2838 handled = __ata_sff_port_intr(ap, qc, bmdma_stopped);
2839
2840 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
2841 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2842
2843 return handled;
2844 }
2845 EXPORT_SYMBOL_GPL(ata_bmdma_port_intr);
2846
2847 /**
2848 * ata_bmdma_interrupt - Default BMDMA ATA host interrupt handler
2849 * @irq: irq line (unused)
2850 * @dev_instance: pointer to our ata_host information structure
2851 *
2852 * Default interrupt handler for PCI IDE devices. Calls
2853 * ata_bmdma_port_intr() for each port that is not disabled.
2854 *
2855 * LOCKING:
2856 * Obtains host lock during operation.
2857 *
2858 * RETURNS:
2859 * IRQ_NONE or IRQ_HANDLED.
2860 */
2861 irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance)
2862 {
2863 return __ata_sff_interrupt(irq, dev_instance, ata_bmdma_port_intr);
2864 }
2865 EXPORT_SYMBOL_GPL(ata_bmdma_interrupt);
2866
2867 /**
2868 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
2869 * @ap: port to handle error for
2870 *
2871 * Stock error handler for BMDMA controller. It can handle both
2872 * PATA and SATA controllers. Most BMDMA controllers should be
2873 * able to use this EH as-is or with some added handling before
2874 * and after.
2875 *
2876 * LOCKING:
2877 * Kernel thread context (may sleep)
2878 */
2879 void ata_bmdma_error_handler(struct ata_port *ap)
2880 {
2881 struct ata_queued_cmd *qc;
2882 unsigned long flags;
2883 bool thaw = false;
2884
2885 qc = __ata_qc_from_tag(ap, ap->link.active_tag);
2886 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
2887 qc = NULL;
2888
2889 /* reset PIO HSM and stop DMA engine */
2890 spin_lock_irqsave(ap->lock, flags);
2891
2892 if (qc && ata_is_dma(qc->tf.protocol)) {
2893 u8 host_stat;
2894
2895 host_stat = ap->ops->bmdma_status(ap);
2896
2897 /* BMDMA controllers indicate host bus error by
2898 * setting DMA_ERR bit and timing out. As it wasn't
2899 * really a timeout event, adjust error mask and
2900 * cancel frozen state.
2901 */
2902 if (qc->err_mask == AC_ERR_TIMEOUT && (host_stat & ATA_DMA_ERR)) {
2903 qc->err_mask = AC_ERR_HOST_BUS;
2904 thaw = true;
2905 }
2906
2907 ap->ops->bmdma_stop(qc);
2908
2909 /* if we're gonna thaw, make sure IRQ is clear */
2910 if (thaw) {
2911 ap->ops->sff_check_status(ap);
2912 if (ap->ops->sff_irq_clear)
2913 ap->ops->sff_irq_clear(ap);
2914 }
2915 }
2916
2917 spin_unlock_irqrestore(ap->lock, flags);
2918
2919 if (thaw)
2920 ata_eh_thaw_port(ap);
2921
2922 ata_sff_error_handler(ap);
2923 }
2924 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
2925
2926 /**
2927 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for BMDMA
2928 * @qc: internal command to clean up
2929 *
2930 * LOCKING:
2931 * Kernel thread context (may sleep)
2932 */
2933 void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
2934 {
2935 struct ata_port *ap = qc->ap;
2936 unsigned long flags;
2937
2938 if (ata_is_dma(qc->tf.protocol)) {
2939 spin_lock_irqsave(ap->lock, flags);
2940 ap->ops->bmdma_stop(qc);
2941 spin_unlock_irqrestore(ap->lock, flags);
2942 }
2943 }
2944 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
2945
2946 /**
2947 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
2948 * @ap: Port associated with this ATA transaction.
2949 *
2950 * Clear interrupt and error flags in DMA status register.
2951 *
2952 * May be used as the irq_clear() entry in ata_port_operations.
2953 *
2954 * LOCKING:
2955 * spin_lock_irqsave(host lock)
2956 */
2957 void ata_bmdma_irq_clear(struct ata_port *ap)
2958 {
2959 void __iomem *mmio = ap->ioaddr.bmdma_addr;
2960
2961 if (!mmio)
2962 return;
2963
2964 iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
2965 }
2966 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
2967
2968 /**
2969 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2970 * @qc: Info associated with this ATA transaction.
2971 *
2972 * LOCKING:
2973 * spin_lock_irqsave(host lock)
2974 */
2975 void ata_bmdma_setup(struct ata_queued_cmd *qc)
2976 {
2977 struct ata_port *ap = qc->ap;
2978 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
2979 u8 dmactl;
2980
2981 /* load PRD table addr. */
2982 mb(); /* make sure PRD table writes are visible to controller */
2983 iowrite32(ap->bmdma_prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2984
2985 /* specify data direction, triple-check start bit is clear */
2986 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2987 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
2988 if (!rw)
2989 dmactl |= ATA_DMA_WR;
2990 iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2991
2992 /* issue r/w command */
2993 ap->ops->sff_exec_command(ap, &qc->tf);
2994 }
2995 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
2996
2997 /**
2998 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
2999 * @qc: Info associated with this ATA transaction.
3000 *
3001 * LOCKING:
3002 * spin_lock_irqsave(host lock)
3003 */
3004 void ata_bmdma_start(struct ata_queued_cmd *qc)
3005 {
3006 struct ata_port *ap = qc->ap;
3007 u8 dmactl;
3008
3009 /* start host DMA transaction */
3010 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3011 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3012
3013 /* Strictly, one may wish to issue an ioread8() here, to
3014 * flush the mmio write. However, control also passes
3015 * to the hardware at this point, and it will interrupt
3016 * us when we are to resume control. So, in effect,
3017 * we don't care when the mmio write flushes.
3018 * Further, a read of the DMA status register _immediately_
3019 * following the write may not be what certain flaky hardware
3020 * is expected, so I think it is best to not add a readb()
3021 * without first all the MMIO ATA cards/mobos.
3022 * Or maybe I'm just being paranoid.
3023 *
3024 * FIXME: The posting of this write means I/O starts are
3025 * unneccessarily delayed for MMIO
3026 */
3027 }
3028 EXPORT_SYMBOL_GPL(ata_bmdma_start);
3029
3030 /**
3031 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3032 * @qc: Command we are ending DMA for
3033 *
3034 * Clears the ATA_DMA_START flag in the dma control register
3035 *
3036 * May be used as the bmdma_stop() entry in ata_port_operations.
3037 *
3038 * LOCKING:
3039 * spin_lock_irqsave(host lock)
3040 */
3041 void ata_bmdma_stop(struct ata_queued_cmd *qc)
3042 {
3043 struct ata_port *ap = qc->ap;
3044 void __iomem *mmio = ap->ioaddr.bmdma_addr;
3045
3046 /* clear start/stop bit */
3047 iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3048 mmio + ATA_DMA_CMD);
3049
3050 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3051 ata_sff_dma_pause(ap);
3052 }
3053 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
3054
3055 /**
3056 * ata_bmdma_status - Read PCI IDE BMDMA status
3057 * @ap: Port associated with this ATA transaction.
3058 *
3059 * Read and return BMDMA status register.
3060 *
3061 * May be used as the bmdma_status() entry in ata_port_operations.
3062 *
3063 * LOCKING:
3064 * spin_lock_irqsave(host lock)
3065 */
3066 u8 ata_bmdma_status(struct ata_port *ap)
3067 {
3068 return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3069 }
3070 EXPORT_SYMBOL_GPL(ata_bmdma_status);
3071
3072
3073 /**
3074 * ata_bmdma_port_start - Set port up for bmdma.
3075 * @ap: Port to initialize
3076 *
3077 * Called just after data structures for each port are
3078 * initialized. Allocates space for PRD table.
3079 *
3080 * May be used as the port_start() entry in ata_port_operations.
3081 *
3082 * LOCKING:
3083 * Inherited from caller.
3084 */
3085 int ata_bmdma_port_start(struct ata_port *ap)
3086 {
3087 if (ap->mwdma_mask || ap->udma_mask) {
3088 ap->bmdma_prd =
3089 dmam_alloc_coherent(ap->host->dev, ATA_PRD_TBL_SZ,
3090 &ap->bmdma_prd_dma, GFP_KERNEL);
3091 if (!ap->bmdma_prd)
3092 return -ENOMEM;
3093 }
3094
3095 return 0;
3096 }
3097 EXPORT_SYMBOL_GPL(ata_bmdma_port_start);
3098
3099 /**
3100 * ata_bmdma_port_start32 - Set port up for dma.
3101 * @ap: Port to initialize
3102 *
3103 * Called just after data structures for each port are
3104 * initialized. Enables 32bit PIO and allocates space for PRD
3105 * table.
3106 *
3107 * May be used as the port_start() entry in ata_port_operations for
3108 * devices that are capable of 32bit PIO.
3109 *
3110 * LOCKING:
3111 * Inherited from caller.
3112 */
3113 int ata_bmdma_port_start32(struct ata_port *ap)
3114 {
3115 ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE;
3116 return ata_bmdma_port_start(ap);
3117 }
3118 EXPORT_SYMBOL_GPL(ata_bmdma_port_start32);
3119
3120 #ifdef CONFIG_PCI
3121
3122 /**
3123 * ata_pci_bmdma_clear_simplex - attempt to kick device out of simplex
3124 * @pdev: PCI device
3125 *
3126 * Some PCI ATA devices report simplex mode but in fact can be told to
3127 * enter non simplex mode. This implements the necessary logic to
3128 * perform the task on such devices. Calling it on other devices will
3129 * have -undefined- behaviour.
3130 */
3131 int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev)
3132 {
3133 unsigned long bmdma = pci_resource_start(pdev, 4);
3134 u8 simplex;
3135
3136 if (bmdma == 0)
3137 return -ENOENT;
3138
3139 simplex = inb(bmdma + 0x02);
3140 outb(simplex & 0x60, bmdma + 0x02);
3141 simplex = inb(bmdma + 0x02);
3142 if (simplex & 0x80)
3143 return -EOPNOTSUPP;
3144 return 0;
3145 }
3146 EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex);
3147
3148 static void ata_bmdma_nodma(struct ata_host *host, const char *reason)
3149 {
3150 int i;
3151
3152 dev_printk(KERN_ERR, host->dev, "BMDMA: %s, falling back to PIO\n",
3153 reason);
3154
3155 for (i = 0; i < 2; i++) {
3156 host->ports[i]->mwdma_mask = 0;
3157 host->ports[i]->udma_mask = 0;
3158 }
3159 }
3160
3161 /**
3162 * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host
3163 * @host: target ATA host
3164 *
3165 * Acquire PCI BMDMA resources and initialize @host accordingly.
3166 *
3167 * LOCKING:
3168 * Inherited from calling layer (may sleep).
3169 */
3170 void ata_pci_bmdma_init(struct ata_host *host)
3171 {
3172 struct device *gdev = host->dev;
3173 struct pci_dev *pdev = to_pci_dev(gdev);
3174 int i, rc;
3175
3176 /* No BAR4 allocation: No DMA */
3177 if (pci_resource_start(pdev, 4) == 0) {
3178 ata_bmdma_nodma(host, "BAR4 is zero");
3179 return;
3180 }
3181
3182 /*
3183 * Some controllers require BMDMA region to be initialized
3184 * even if DMA is not in use to clear IRQ status via
3185 * ->sff_irq_clear method. Try to initialize bmdma_addr
3186 * regardless of dma masks.
3187 */
3188 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3189 if (rc)
3190 ata_bmdma_nodma(host, "failed to set dma mask");
3191 if (!rc) {
3192 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3193 if (rc)
3194 ata_bmdma_nodma(host,
3195 "failed to set consistent dma mask");
3196 }
3197
3198 /* request and iomap DMA region */
3199 rc = pcim_iomap_regions(pdev, 1 << 4, dev_driver_string(gdev));
3200 if (rc) {
3201 ata_bmdma_nodma(host, "failed to request/iomap BAR4");
3202 return;
3203 }
3204 host->iomap = pcim_iomap_table(pdev);
3205
3206 for (i = 0; i < 2; i++) {
3207 struct ata_port *ap = host->ports[i];
3208 void __iomem *bmdma = host->iomap[4] + 8 * i;
3209
3210 if (ata_port_is_dummy(ap))
3211 continue;
3212
3213 ap->ioaddr.bmdma_addr = bmdma;
3214 if ((!(ap->flags & ATA_FLAG_IGN_SIMPLEX)) &&
3215 (ioread8(bmdma + 2) & 0x80))
3216 host->flags |= ATA_HOST_SIMPLEX;
3217
3218 ata_port_desc(ap, "bmdma 0x%llx",
3219 (unsigned long long)pci_resource_start(pdev, 4) + 8 * i);
3220 }
3221 }
3222 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init);
3223
3224 /**
3225 * ata_pci_bmdma_prepare_host - helper to prepare PCI BMDMA ATA host
3226 * @pdev: target PCI device
3227 * @ppi: array of port_info, must be enough for two ports
3228 * @r_host: out argument for the initialized ATA host
3229 *
3230 * Helper to allocate BMDMA ATA host for @pdev, acquire all PCI
3231 * resources and initialize it accordingly in one go.
3232 *
3233 * LOCKING:
3234 * Inherited from calling layer (may sleep).
3235 *
3236 * RETURNS:
3237 * 0 on success, -errno otherwise.
3238 */
3239 int ata_pci_bmdma_prepare_host(struct pci_dev *pdev,
3240 const struct ata_port_info * const * ppi,
3241 struct ata_host **r_host)
3242 {
3243 int rc;
3244
3245 rc = ata_pci_sff_prepare_host(pdev, ppi, r_host);
3246 if (rc)
3247 return rc;
3248
3249 ata_pci_bmdma_init(*r_host);
3250 return 0;
3251 }
3252 EXPORT_SYMBOL_GPL(ata_pci_bmdma_prepare_host);
3253
3254 /**
3255 * ata_pci_bmdma_init_one - Initialize/register BMDMA PCI IDE controller
3256 * @pdev: Controller to be initialized
3257 * @ppi: array of port_info, must be enough for two ports
3258 * @sht: scsi_host_template to use when registering the host
3259 * @host_priv: host private_data
3260 * @hflags: host flags
3261 *
3262 * This function is similar to ata_pci_sff_init_one() but also
3263 * takes care of BMDMA initialization.
3264 *
3265 * LOCKING:
3266 * Inherited from PCI layer (may sleep).
3267 *
3268 * RETURNS:
3269 * Zero on success, negative on errno-based value on error.
3270 */
3271 int ata_pci_bmdma_init_one(struct pci_dev *pdev,
3272 const struct ata_port_info * const * ppi,
3273 struct scsi_host_template *sht, void *host_priv,
3274 int hflags)
3275 {
3276 struct device *dev = &pdev->dev;
3277 const struct ata_port_info *pi;
3278 struct ata_host *host = NULL;
3279 int rc;
3280
3281 DPRINTK("ENTER\n");
3282
3283 pi = ata_sff_find_valid_pi(ppi);
3284 if (!pi) {
3285 dev_printk(KERN_ERR, &pdev->dev,
3286 "no valid port_info specified\n");
3287 return -EINVAL;
3288 }
3289
3290 if (!devres_open_group(dev, NULL, GFP_KERNEL))
3291 return -ENOMEM;
3292
3293 rc = pcim_enable_device(pdev);
3294 if (rc)
3295 goto out;
3296
3297 /* prepare and activate BMDMA host */
3298 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
3299 if (rc)
3300 goto out;
3301 host->private_data = host_priv;
3302 host->flags |= hflags;
3303
3304 pci_set_master(pdev);
3305 rc = ata_pci_sff_activate_host(host, ata_bmdma_interrupt, sht);
3306 out:
3307 if (rc == 0)
3308 devres_remove_group(&pdev->dev, NULL);
3309 else
3310 devres_release_group(&pdev->dev, NULL);
3311
3312 return rc;
3313 }
3314 EXPORT_SYMBOL_GPL(ata_pci_bmdma_init_one);
3315
3316 #endif /* CONFIG_PCI */
3317 #endif /* CONFIG_ATA_BMDMA */
3318
3319 /**
3320 * ata_sff_port_init - Initialize SFF/BMDMA ATA port
3321 * @ap: Port to initialize
3322 *
3323 * Called on port allocation to initialize SFF/BMDMA specific
3324 * fields.
3325 *
3326 * LOCKING:
3327 * None.
3328 */
3329 void ata_sff_port_init(struct ata_port *ap)
3330 {
3331 INIT_DELAYED_WORK(&ap->sff_pio_task, ata_sff_pio_task);
3332 ap->ctl = ATA_DEVCTL_OBS;
3333 ap->last_ctl = 0xFF;
3334 }
3335
3336 int __init ata_sff_init(void)
3337 {
3338 ata_sff_wq = alloc_workqueue("ata_sff", WQ_MEM_RECLAIM, WQ_MAX_ACTIVE);
3339 if (!ata_sff_wq)
3340 return -ENOMEM;
3341
3342 return 0;
3343 }
3344
3345 void ata_sff_exit(void)
3346 {
3347 destroy_workqueue(ata_sff_wq);
3348 }