Merge upstream kernel changes into 'C/H/S support' branch of libata.
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / scsi / libata-core.c
1 /*
2 libata-core.c - helper library for ATA
3
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
6
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
10 by reference.
11
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
22
23 */
24
25 #include <linux/config.h>
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/list.h>
31 #include <linux/mm.h>
32 #include <linux/highmem.h>
33 #include <linux/spinlock.h>
34 #include <linux/blkdev.h>
35 #include <linux/delay.h>
36 #include <linux/timer.h>
37 #include <linux/interrupt.h>
38 #include <linux/completion.h>
39 #include <linux/suspend.h>
40 #include <linux/workqueue.h>
41 #include <scsi/scsi.h>
42 #include "scsi.h"
43 #include "scsi_priv.h"
44 #include <scsi/scsi_host.h>
45 #include <linux/libata.h>
46 #include <asm/io.h>
47 #include <asm/semaphore.h>
48 #include <asm/byteorder.h>
49
50 #include "libata.h"
51
52 static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
54 unsigned long tmout);
55 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev);
56 static void ata_set_mode(struct ata_port *ap);
57 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
58 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
59 static int fgb(u32 bitmap);
60 static int ata_choose_xfer_mode(struct ata_port *ap,
61 u8 *xfer_mode_out,
62 unsigned int *xfer_shift_out);
63 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
64 static void __ata_qc_complete(struct ata_queued_cmd *qc);
65
66 static unsigned int ata_unique_id = 1;
67 static struct workqueue_struct *ata_wq;
68
69 MODULE_AUTHOR("Jeff Garzik");
70 MODULE_DESCRIPTION("Library module for ATA devices");
71 MODULE_LICENSE("GPL");
72 MODULE_VERSION(DRV_VERSION);
73
74 /**
75 * ata_tf_load - send taskfile registers to host controller
76 * @ap: Port to which output is sent
77 * @tf: ATA taskfile register set
78 *
79 * Outputs ATA taskfile to standard ATA host controller.
80 *
81 * LOCKING:
82 * Inherited from caller.
83 */
84
85 static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
86 {
87 struct ata_ioports *ioaddr = &ap->ioaddr;
88 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
89
90 if (tf->ctl != ap->last_ctl) {
91 outb(tf->ctl, ioaddr->ctl_addr);
92 ap->last_ctl = tf->ctl;
93 ata_wait_idle(ap);
94 }
95
96 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
97 outb(tf->hob_feature, ioaddr->feature_addr);
98 outb(tf->hob_nsect, ioaddr->nsect_addr);
99 outb(tf->hob_lbal, ioaddr->lbal_addr);
100 outb(tf->hob_lbam, ioaddr->lbam_addr);
101 outb(tf->hob_lbah, ioaddr->lbah_addr);
102 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
103 tf->hob_feature,
104 tf->hob_nsect,
105 tf->hob_lbal,
106 tf->hob_lbam,
107 tf->hob_lbah);
108 }
109
110 if (is_addr) {
111 outb(tf->feature, ioaddr->feature_addr);
112 outb(tf->nsect, ioaddr->nsect_addr);
113 outb(tf->lbal, ioaddr->lbal_addr);
114 outb(tf->lbam, ioaddr->lbam_addr);
115 outb(tf->lbah, ioaddr->lbah_addr);
116 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
117 tf->feature,
118 tf->nsect,
119 tf->lbal,
120 tf->lbam,
121 tf->lbah);
122 }
123
124 if (tf->flags & ATA_TFLAG_DEVICE) {
125 outb(tf->device, ioaddr->device_addr);
126 VPRINTK("device 0x%X\n", tf->device);
127 }
128
129 ata_wait_idle(ap);
130 }
131
132 /**
133 * ata_tf_load_mmio - send taskfile registers to host controller
134 * @ap: Port to which output is sent
135 * @tf: ATA taskfile register set
136 *
137 * Outputs ATA taskfile to standard ATA host controller using MMIO.
138 *
139 * LOCKING:
140 * Inherited from caller.
141 */
142
143 static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
144 {
145 struct ata_ioports *ioaddr = &ap->ioaddr;
146 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
147
148 if (tf->ctl != ap->last_ctl) {
149 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
150 ap->last_ctl = tf->ctl;
151 ata_wait_idle(ap);
152 }
153
154 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
155 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
156 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
157 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
158 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
159 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
160 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
161 tf->hob_feature,
162 tf->hob_nsect,
163 tf->hob_lbal,
164 tf->hob_lbam,
165 tf->hob_lbah);
166 }
167
168 if (is_addr) {
169 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
170 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
171 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
172 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
173 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
174 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
175 tf->feature,
176 tf->nsect,
177 tf->lbal,
178 tf->lbam,
179 tf->lbah);
180 }
181
182 if (tf->flags & ATA_TFLAG_DEVICE) {
183 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
184 VPRINTK("device 0x%X\n", tf->device);
185 }
186
187 ata_wait_idle(ap);
188 }
189
190
191 /**
192 * ata_tf_load - send taskfile registers to host controller
193 * @ap: Port to which output is sent
194 * @tf: ATA taskfile register set
195 *
196 * Outputs ATA taskfile to standard ATA host controller using MMIO
197 * or PIO as indicated by the ATA_FLAG_MMIO flag.
198 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
199 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
200 * hob_lbal, hob_lbam, and hob_lbah.
201 *
202 * This function waits for idle (!BUSY and !DRQ) after writing
203 * registers. If the control register has a new value, this
204 * function also waits for idle after writing control and before
205 * writing the remaining registers.
206 *
207 * May be used as the tf_load() entry in ata_port_operations.
208 *
209 * LOCKING:
210 * Inherited from caller.
211 */
212 void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
213 {
214 if (ap->flags & ATA_FLAG_MMIO)
215 ata_tf_load_mmio(ap, tf);
216 else
217 ata_tf_load_pio(ap, tf);
218 }
219
220 /**
221 * ata_exec_command_pio - issue ATA command to host controller
222 * @ap: port to which command is being issued
223 * @tf: ATA taskfile register set
224 *
225 * Issues PIO write to ATA command register, with proper
226 * synchronization with interrupt handler / other threads.
227 *
228 * LOCKING:
229 * spin_lock_irqsave(host_set lock)
230 */
231
232 static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
233 {
234 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
235
236 outb(tf->command, ap->ioaddr.command_addr);
237 ata_pause(ap);
238 }
239
240
241 /**
242 * ata_exec_command_mmio - issue ATA command to host controller
243 * @ap: port to which command is being issued
244 * @tf: ATA taskfile register set
245 *
246 * Issues MMIO write to ATA command register, with proper
247 * synchronization with interrupt handler / other threads.
248 *
249 * LOCKING:
250 * spin_lock_irqsave(host_set lock)
251 */
252
253 static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
254 {
255 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
256
257 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
258 ata_pause(ap);
259 }
260
261
262 /**
263 * ata_exec_command - issue ATA command to host controller
264 * @ap: port to which command is being issued
265 * @tf: ATA taskfile register set
266 *
267 * Issues PIO/MMIO write to ATA command register, with proper
268 * synchronization with interrupt handler / other threads.
269 *
270 * LOCKING:
271 * spin_lock_irqsave(host_set lock)
272 */
273 void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
274 {
275 if (ap->flags & ATA_FLAG_MMIO)
276 ata_exec_command_mmio(ap, tf);
277 else
278 ata_exec_command_pio(ap, tf);
279 }
280
281 /**
282 * ata_exec - issue ATA command to host controller
283 * @ap: port to which command is being issued
284 * @tf: ATA taskfile register set
285 *
286 * Issues PIO/MMIO write to ATA command register, with proper
287 * synchronization with interrupt handler / other threads.
288 *
289 * LOCKING:
290 * Obtains host_set lock.
291 */
292
293 static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
294 {
295 unsigned long flags;
296
297 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
298 spin_lock_irqsave(&ap->host_set->lock, flags);
299 ap->ops->exec_command(ap, tf);
300 spin_unlock_irqrestore(&ap->host_set->lock, flags);
301 }
302
303 /**
304 * ata_tf_to_host - issue ATA taskfile to host controller
305 * @ap: port to which command is being issued
306 * @tf: ATA taskfile register set
307 *
308 * Issues ATA taskfile register set to ATA host controller,
309 * with proper synchronization with interrupt handler and
310 * other threads.
311 *
312 * LOCKING:
313 * Obtains host_set lock.
314 */
315
316 static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
317 {
318 ap->ops->tf_load(ap, tf);
319
320 ata_exec(ap, tf);
321 }
322
323 /**
324 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
325 * @ap: port to which command is being issued
326 * @tf: ATA taskfile register set
327 *
328 * Issues ATA taskfile register set to ATA host controller,
329 * with proper synchronization with interrupt handler and
330 * other threads.
331 *
332 * LOCKING:
333 * spin_lock_irqsave(host_set lock)
334 */
335
336 void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
337 {
338 ap->ops->tf_load(ap, tf);
339 ap->ops->exec_command(ap, tf);
340 }
341
342 /**
343 * ata_tf_read_pio - input device's ATA taskfile shadow registers
344 * @ap: Port from which input is read
345 * @tf: ATA taskfile register set for storing input
346 *
347 * Reads ATA taskfile registers for currently-selected device
348 * into @tf.
349 *
350 * LOCKING:
351 * Inherited from caller.
352 */
353
354 static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
355 {
356 struct ata_ioports *ioaddr = &ap->ioaddr;
357
358 tf->nsect = inb(ioaddr->nsect_addr);
359 tf->lbal = inb(ioaddr->lbal_addr);
360 tf->lbam = inb(ioaddr->lbam_addr);
361 tf->lbah = inb(ioaddr->lbah_addr);
362 tf->device = inb(ioaddr->device_addr);
363
364 if (tf->flags & ATA_TFLAG_LBA48) {
365 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
366 tf->hob_feature = inb(ioaddr->error_addr);
367 tf->hob_nsect = inb(ioaddr->nsect_addr);
368 tf->hob_lbal = inb(ioaddr->lbal_addr);
369 tf->hob_lbam = inb(ioaddr->lbam_addr);
370 tf->hob_lbah = inb(ioaddr->lbah_addr);
371 }
372 }
373
374 /**
375 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
376 * @ap: Port from which input is read
377 * @tf: ATA taskfile register set for storing input
378 *
379 * Reads ATA taskfile registers for currently-selected device
380 * into @tf via MMIO.
381 *
382 * LOCKING:
383 * Inherited from caller.
384 */
385
386 static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
387 {
388 struct ata_ioports *ioaddr = &ap->ioaddr;
389
390 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
391 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
392 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
393 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
394 tf->device = readb((void __iomem *)ioaddr->device_addr);
395
396 if (tf->flags & ATA_TFLAG_LBA48) {
397 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
398 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
399 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
400 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
401 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
402 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
403 }
404 }
405
406
407 /**
408 * ata_tf_read - input device's ATA taskfile shadow registers
409 * @ap: Port from which input is read
410 * @tf: ATA taskfile register set for storing input
411 *
412 * Reads ATA taskfile registers for currently-selected device
413 * into @tf.
414 *
415 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
416 * is set, also reads the hob registers.
417 *
418 * May be used as the tf_read() entry in ata_port_operations.
419 *
420 * LOCKING:
421 * Inherited from caller.
422 */
423 void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
424 {
425 if (ap->flags & ATA_FLAG_MMIO)
426 ata_tf_read_mmio(ap, tf);
427 else
428 ata_tf_read_pio(ap, tf);
429 }
430
431 /**
432 * ata_check_status_pio - Read device status reg & clear interrupt
433 * @ap: port where the device is
434 *
435 * Reads ATA taskfile status register for currently-selected device
436 * and return its value. This also clears pending interrupts
437 * from this device
438 *
439 * LOCKING:
440 * Inherited from caller.
441 */
442 static u8 ata_check_status_pio(struct ata_port *ap)
443 {
444 return inb(ap->ioaddr.status_addr);
445 }
446
447 /**
448 * ata_check_status_mmio - Read device status reg & clear interrupt
449 * @ap: port where the device is
450 *
451 * Reads ATA taskfile status register for currently-selected device
452 * via MMIO and return its value. This also clears pending interrupts
453 * from this device
454 *
455 * LOCKING:
456 * Inherited from caller.
457 */
458 static u8 ata_check_status_mmio(struct ata_port *ap)
459 {
460 return readb((void __iomem *) ap->ioaddr.status_addr);
461 }
462
463
464 /**
465 * ata_check_status - Read device status reg & clear interrupt
466 * @ap: port where the device is
467 *
468 * Reads ATA taskfile status register for currently-selected device
469 * and return its value. This also clears pending interrupts
470 * from this device
471 *
472 * May be used as the check_status() entry in ata_port_operations.
473 *
474 * LOCKING:
475 * Inherited from caller.
476 */
477 u8 ata_check_status(struct ata_port *ap)
478 {
479 if (ap->flags & ATA_FLAG_MMIO)
480 return ata_check_status_mmio(ap);
481 return ata_check_status_pio(ap);
482 }
483
484
485 /**
486 * ata_altstatus - Read device alternate status reg
487 * @ap: port where the device is
488 *
489 * Reads ATA taskfile alternate status register for
490 * currently-selected device and return its value.
491 *
492 * Note: may NOT be used as the check_altstatus() entry in
493 * ata_port_operations.
494 *
495 * LOCKING:
496 * Inherited from caller.
497 */
498 u8 ata_altstatus(struct ata_port *ap)
499 {
500 if (ap->ops->check_altstatus)
501 return ap->ops->check_altstatus(ap);
502
503 if (ap->flags & ATA_FLAG_MMIO)
504 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
505 return inb(ap->ioaddr.altstatus_addr);
506 }
507
508
509 /**
510 * ata_chk_err - Read device error reg
511 * @ap: port where the device is
512 *
513 * Reads ATA taskfile error register for
514 * currently-selected device and return its value.
515 *
516 * Note: may NOT be used as the check_err() entry in
517 * ata_port_operations.
518 *
519 * LOCKING:
520 * Inherited from caller.
521 */
522 u8 ata_chk_err(struct ata_port *ap)
523 {
524 if (ap->ops->check_err)
525 return ap->ops->check_err(ap);
526
527 if (ap->flags & ATA_FLAG_MMIO) {
528 return readb((void __iomem *) ap->ioaddr.error_addr);
529 }
530 return inb(ap->ioaddr.error_addr);
531 }
532
533 /**
534 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
535 * @tf: Taskfile to convert
536 * @fis: Buffer into which data will output
537 * @pmp: Port multiplier port
538 *
539 * Converts a standard ATA taskfile to a Serial ATA
540 * FIS structure (Register - Host to Device).
541 *
542 * LOCKING:
543 * Inherited from caller.
544 */
545
546 void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
547 {
548 fis[0] = 0x27; /* Register - Host to Device FIS */
549 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
550 bit 7 indicates Command FIS */
551 fis[2] = tf->command;
552 fis[3] = tf->feature;
553
554 fis[4] = tf->lbal;
555 fis[5] = tf->lbam;
556 fis[6] = tf->lbah;
557 fis[7] = tf->device;
558
559 fis[8] = tf->hob_lbal;
560 fis[9] = tf->hob_lbam;
561 fis[10] = tf->hob_lbah;
562 fis[11] = tf->hob_feature;
563
564 fis[12] = tf->nsect;
565 fis[13] = tf->hob_nsect;
566 fis[14] = 0;
567 fis[15] = tf->ctl;
568
569 fis[16] = 0;
570 fis[17] = 0;
571 fis[18] = 0;
572 fis[19] = 0;
573 }
574
575 /**
576 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
577 * @fis: Buffer from which data will be input
578 * @tf: Taskfile to output
579 *
580 * Converts a standard ATA taskfile to a Serial ATA
581 * FIS structure (Register - Host to Device).
582 *
583 * LOCKING:
584 * Inherited from caller.
585 */
586
587 void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
588 {
589 tf->command = fis[2]; /* status */
590 tf->feature = fis[3]; /* error */
591
592 tf->lbal = fis[4];
593 tf->lbam = fis[5];
594 tf->lbah = fis[6];
595 tf->device = fis[7];
596
597 tf->hob_lbal = fis[8];
598 tf->hob_lbam = fis[9];
599 tf->hob_lbah = fis[10];
600
601 tf->nsect = fis[12];
602 tf->hob_nsect = fis[13];
603 }
604
605 /**
606 * ata_prot_to_cmd - determine which read/write opcodes to use
607 * @protocol: ATA_PROT_xxx taskfile protocol
608 * @lba48: true is lba48 is present
609 *
610 * Given necessary input, determine which read/write commands
611 * to use to transfer data.
612 *
613 * LOCKING:
614 * None.
615 */
616 static int ata_prot_to_cmd(int protocol, int lba48)
617 {
618 int rcmd = 0, wcmd = 0;
619
620 switch (protocol) {
621 case ATA_PROT_PIO:
622 if (lba48) {
623 rcmd = ATA_CMD_PIO_READ_EXT;
624 wcmd = ATA_CMD_PIO_WRITE_EXT;
625 } else {
626 rcmd = ATA_CMD_PIO_READ;
627 wcmd = ATA_CMD_PIO_WRITE;
628 }
629 break;
630
631 case ATA_PROT_DMA:
632 if (lba48) {
633 rcmd = ATA_CMD_READ_EXT;
634 wcmd = ATA_CMD_WRITE_EXT;
635 } else {
636 rcmd = ATA_CMD_READ;
637 wcmd = ATA_CMD_WRITE;
638 }
639 break;
640
641 default:
642 return -1;
643 }
644
645 return rcmd | (wcmd << 8);
646 }
647
648 /**
649 * ata_dev_set_protocol - set taskfile protocol and r/w commands
650 * @dev: device to examine and configure
651 *
652 * Examine the device configuration, after we have
653 * read the identify-device page and configured the
654 * data transfer mode. Set internal state related to
655 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
656 * and calculate the proper read/write commands to use.
657 *
658 * LOCKING:
659 * caller.
660 */
661 static void ata_dev_set_protocol(struct ata_device *dev)
662 {
663 int pio = (dev->flags & ATA_DFLAG_PIO);
664 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
665 int proto, cmd;
666
667 if (pio)
668 proto = dev->xfer_protocol = ATA_PROT_PIO;
669 else
670 proto = dev->xfer_protocol = ATA_PROT_DMA;
671
672 cmd = ata_prot_to_cmd(proto, lba48);
673 if (cmd < 0)
674 BUG();
675
676 dev->read_cmd = cmd & 0xff;
677 dev->write_cmd = (cmd >> 8) & 0xff;
678 }
679
680 static const char * xfer_mode_str[] = {
681 "UDMA/16",
682 "UDMA/25",
683 "UDMA/33",
684 "UDMA/44",
685 "UDMA/66",
686 "UDMA/100",
687 "UDMA/133",
688 "UDMA7",
689 "MWDMA0",
690 "MWDMA1",
691 "MWDMA2",
692 "PIO0",
693 "PIO1",
694 "PIO2",
695 "PIO3",
696 "PIO4",
697 };
698
699 /**
700 * ata_udma_string - convert UDMA bit offset to string
701 * @mask: mask of bits supported; only highest bit counts.
702 *
703 * Determine string which represents the highest speed
704 * (highest bit in @udma_mask).
705 *
706 * LOCKING:
707 * None.
708 *
709 * RETURNS:
710 * Constant C string representing highest speed listed in
711 * @udma_mask, or the constant C string "<n/a>".
712 */
713
714 static const char *ata_mode_string(unsigned int mask)
715 {
716 int i;
717
718 for (i = 7; i >= 0; i--)
719 if (mask & (1 << i))
720 goto out;
721 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
722 if (mask & (1 << i))
723 goto out;
724 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
725 if (mask & (1 << i))
726 goto out;
727
728 return "<n/a>";
729
730 out:
731 return xfer_mode_str[i];
732 }
733
734 /**
735 * ata_pio_devchk - PATA device presence detection
736 * @ap: ATA channel to examine
737 * @device: Device to examine (starting at zero)
738 *
739 * This technique was originally described in
740 * Hale Landis's ATADRVR (www.ata-atapi.com), and
741 * later found its way into the ATA/ATAPI spec.
742 *
743 * Write a pattern to the ATA shadow registers,
744 * and if a device is present, it will respond by
745 * correctly storing and echoing back the
746 * ATA shadow register contents.
747 *
748 * LOCKING:
749 * caller.
750 */
751
752 static unsigned int ata_pio_devchk(struct ata_port *ap,
753 unsigned int device)
754 {
755 struct ata_ioports *ioaddr = &ap->ioaddr;
756 u8 nsect, lbal;
757
758 ap->ops->dev_select(ap, device);
759
760 outb(0x55, ioaddr->nsect_addr);
761 outb(0xaa, ioaddr->lbal_addr);
762
763 outb(0xaa, ioaddr->nsect_addr);
764 outb(0x55, ioaddr->lbal_addr);
765
766 outb(0x55, ioaddr->nsect_addr);
767 outb(0xaa, ioaddr->lbal_addr);
768
769 nsect = inb(ioaddr->nsect_addr);
770 lbal = inb(ioaddr->lbal_addr);
771
772 if ((nsect == 0x55) && (lbal == 0xaa))
773 return 1; /* we found a device */
774
775 return 0; /* nothing found */
776 }
777
778 /**
779 * ata_mmio_devchk - PATA device presence detection
780 * @ap: ATA channel to examine
781 * @device: Device to examine (starting at zero)
782 *
783 * This technique was originally described in
784 * Hale Landis's ATADRVR (www.ata-atapi.com), and
785 * later found its way into the ATA/ATAPI spec.
786 *
787 * Write a pattern to the ATA shadow registers,
788 * and if a device is present, it will respond by
789 * correctly storing and echoing back the
790 * ATA shadow register contents.
791 *
792 * LOCKING:
793 * caller.
794 */
795
796 static unsigned int ata_mmio_devchk(struct ata_port *ap,
797 unsigned int device)
798 {
799 struct ata_ioports *ioaddr = &ap->ioaddr;
800 u8 nsect, lbal;
801
802 ap->ops->dev_select(ap, device);
803
804 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
805 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
806
807 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
808 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
809
810 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
811 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
812
813 nsect = readb((void __iomem *) ioaddr->nsect_addr);
814 lbal = readb((void __iomem *) ioaddr->lbal_addr);
815
816 if ((nsect == 0x55) && (lbal == 0xaa))
817 return 1; /* we found a device */
818
819 return 0; /* nothing found */
820 }
821
822 /**
823 * ata_devchk - PATA device presence detection
824 * @ap: ATA channel to examine
825 * @device: Device to examine (starting at zero)
826 *
827 * Dispatch ATA device presence detection, depending
828 * on whether we are using PIO or MMIO to talk to the
829 * ATA shadow registers.
830 *
831 * LOCKING:
832 * caller.
833 */
834
835 static unsigned int ata_devchk(struct ata_port *ap,
836 unsigned int device)
837 {
838 if (ap->flags & ATA_FLAG_MMIO)
839 return ata_mmio_devchk(ap, device);
840 return ata_pio_devchk(ap, device);
841 }
842
843 /**
844 * ata_dev_classify - determine device type based on ATA-spec signature
845 * @tf: ATA taskfile register set for device to be identified
846 *
847 * Determine from taskfile register contents whether a device is
848 * ATA or ATAPI, as per "Signature and persistence" section
849 * of ATA/PI spec (volume 1, sect 5.14).
850 *
851 * LOCKING:
852 * None.
853 *
854 * RETURNS:
855 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
856 * the event of failure.
857 */
858
859 unsigned int ata_dev_classify(struct ata_taskfile *tf)
860 {
861 /* Apple's open source Darwin code hints that some devices only
862 * put a proper signature into the LBA mid/high registers,
863 * So, we only check those. It's sufficient for uniqueness.
864 */
865
866 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
867 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
868 DPRINTK("found ATA device by sig\n");
869 return ATA_DEV_ATA;
870 }
871
872 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
873 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
874 DPRINTK("found ATAPI device by sig\n");
875 return ATA_DEV_ATAPI;
876 }
877
878 DPRINTK("unknown device\n");
879 return ATA_DEV_UNKNOWN;
880 }
881
882 /**
883 * ata_dev_try_classify - Parse returned ATA device signature
884 * @ap: ATA channel to examine
885 * @device: Device to examine (starting at zero)
886 *
887 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
888 * an ATA/ATAPI-defined set of values is placed in the ATA
889 * shadow registers, indicating the results of device detection
890 * and diagnostics.
891 *
892 * Select the ATA device, and read the values from the ATA shadow
893 * registers. Then parse according to the Error register value,
894 * and the spec-defined values examined by ata_dev_classify().
895 *
896 * LOCKING:
897 * caller.
898 */
899
900 static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
901 {
902 struct ata_device *dev = &ap->device[device];
903 struct ata_taskfile tf;
904 unsigned int class;
905 u8 err;
906
907 ap->ops->dev_select(ap, device);
908
909 memset(&tf, 0, sizeof(tf));
910
911 err = ata_chk_err(ap);
912 ap->ops->tf_read(ap, &tf);
913
914 dev->class = ATA_DEV_NONE;
915
916 /* see if device passed diags */
917 if (err == 1)
918 /* do nothing */ ;
919 else if ((device == 0) && (err == 0x81))
920 /* do nothing */ ;
921 else
922 return err;
923
924 /* determine if device if ATA or ATAPI */
925 class = ata_dev_classify(&tf);
926 if (class == ATA_DEV_UNKNOWN)
927 return err;
928 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
929 return err;
930
931 dev->class = class;
932
933 return err;
934 }
935
936 /**
937 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
938 * @id: IDENTIFY DEVICE results we will examine
939 * @s: string into which data is output
940 * @ofs: offset into identify device page
941 * @len: length of string to return. must be an even number.
942 *
943 * The strings in the IDENTIFY DEVICE page are broken up into
944 * 16-bit chunks. Run through the string, and output each
945 * 8-bit chunk linearly, regardless of platform.
946 *
947 * LOCKING:
948 * caller.
949 */
950
951 void ata_dev_id_string(u16 *id, unsigned char *s,
952 unsigned int ofs, unsigned int len)
953 {
954 unsigned int c;
955
956 while (len > 0) {
957 c = id[ofs] >> 8;
958 *s = c;
959 s++;
960
961 c = id[ofs] & 0xff;
962 *s = c;
963 s++;
964
965 ofs++;
966 len -= 2;
967 }
968 }
969
970
971 /**
972 * ata_noop_dev_select - Select device 0/1 on ATA bus
973 * @ap: ATA channel to manipulate
974 * @device: ATA device (numbered from zero) to select
975 *
976 * This function performs no actual function.
977 *
978 * May be used as the dev_select() entry in ata_port_operations.
979 *
980 * LOCKING:
981 * caller.
982 */
983 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
984 {
985 }
986
987
988 /**
989 * ata_std_dev_select - Select device 0/1 on ATA bus
990 * @ap: ATA channel to manipulate
991 * @device: ATA device (numbered from zero) to select
992 *
993 * Use the method defined in the ATA specification to
994 * make either device 0, or device 1, active on the
995 * ATA channel. Works with both PIO and MMIO.
996 *
997 * May be used as the dev_select() entry in ata_port_operations.
998 *
999 * LOCKING:
1000 * caller.
1001 */
1002
1003 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1004 {
1005 u8 tmp;
1006
1007 if (device == 0)
1008 tmp = ATA_DEVICE_OBS;
1009 else
1010 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1011
1012 if (ap->flags & ATA_FLAG_MMIO) {
1013 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
1014 } else {
1015 outb(tmp, ap->ioaddr.device_addr);
1016 }
1017 ata_pause(ap); /* needed; also flushes, for mmio */
1018 }
1019
1020 /**
1021 * ata_dev_select - Select device 0/1 on ATA bus
1022 * @ap: ATA channel to manipulate
1023 * @device: ATA device (numbered from zero) to select
1024 * @wait: non-zero to wait for Status register BSY bit to clear
1025 * @can_sleep: non-zero if context allows sleeping
1026 *
1027 * Use the method defined in the ATA specification to
1028 * make either device 0, or device 1, active on the
1029 * ATA channel.
1030 *
1031 * This is a high-level version of ata_std_dev_select(),
1032 * which additionally provides the services of inserting
1033 * the proper pauses and status polling, where needed.
1034 *
1035 * LOCKING:
1036 * caller.
1037 */
1038
1039 void ata_dev_select(struct ata_port *ap, unsigned int device,
1040 unsigned int wait, unsigned int can_sleep)
1041 {
1042 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
1043 ap->id, device, wait);
1044
1045 if (wait)
1046 ata_wait_idle(ap);
1047
1048 ap->ops->dev_select(ap, device);
1049
1050 if (wait) {
1051 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1052 msleep(150);
1053 ata_wait_idle(ap);
1054 }
1055 }
1056
1057 /**
1058 * ata_dump_id - IDENTIFY DEVICE info debugging output
1059 * @dev: Device whose IDENTIFY DEVICE page we will dump
1060 *
1061 * Dump selected 16-bit words from a detected device's
1062 * IDENTIFY PAGE page.
1063 *
1064 * LOCKING:
1065 * caller.
1066 */
1067
1068 static inline void ata_dump_id(struct ata_device *dev)
1069 {
1070 DPRINTK("49==0x%04x "
1071 "53==0x%04x "
1072 "63==0x%04x "
1073 "64==0x%04x "
1074 "75==0x%04x \n",
1075 dev->id[49],
1076 dev->id[53],
1077 dev->id[63],
1078 dev->id[64],
1079 dev->id[75]);
1080 DPRINTK("80==0x%04x "
1081 "81==0x%04x "
1082 "82==0x%04x "
1083 "83==0x%04x "
1084 "84==0x%04x \n",
1085 dev->id[80],
1086 dev->id[81],
1087 dev->id[82],
1088 dev->id[83],
1089 dev->id[84]);
1090 DPRINTK("88==0x%04x "
1091 "93==0x%04x\n",
1092 dev->id[88],
1093 dev->id[93]);
1094 }
1095
1096 /**
1097 * ata_dev_identify - obtain IDENTIFY x DEVICE page
1098 * @ap: port on which device we wish to probe resides
1099 * @device: device bus address, starting at zero
1100 *
1101 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
1102 * command, and read back the 512-byte device information page.
1103 * The device information page is fed to us via the standard
1104 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
1105 * using standard PIO-IN paths)
1106 *
1107 * After reading the device information page, we use several
1108 * bits of information from it to initialize data structures
1109 * that will be used during the lifetime of the ata_device.
1110 * Other data from the info page is used to disqualify certain
1111 * older ATA devices we do not wish to support.
1112 *
1113 * LOCKING:
1114 * Inherited from caller. Some functions called by this function
1115 * obtain the host_set lock.
1116 */
1117
1118 static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1119 {
1120 struct ata_device *dev = &ap->device[device];
1121 unsigned int major_version;
1122 u16 tmp;
1123 unsigned long xfer_modes;
1124 u8 status;
1125 unsigned int using_edd;
1126 DECLARE_COMPLETION(wait);
1127 struct ata_queued_cmd *qc;
1128 unsigned long flags;
1129 int rc;
1130
1131 if (!ata_dev_present(dev)) {
1132 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1133 ap->id, device);
1134 return;
1135 }
1136
1137 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1138 using_edd = 0;
1139 else
1140 using_edd = 1;
1141
1142 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1143
1144 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1145 dev->class == ATA_DEV_NONE);
1146
1147 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1148
1149 qc = ata_qc_new_init(ap, dev);
1150 BUG_ON(qc == NULL);
1151
1152 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1153 qc->dma_dir = DMA_FROM_DEVICE;
1154 qc->tf.protocol = ATA_PROT_PIO;
1155 qc->nsect = 1;
1156
1157 retry:
1158 if (dev->class == ATA_DEV_ATA) {
1159 qc->tf.command = ATA_CMD_ID_ATA;
1160 DPRINTK("do ATA identify\n");
1161 } else {
1162 qc->tf.command = ATA_CMD_ID_ATAPI;
1163 DPRINTK("do ATAPI identify\n");
1164 }
1165
1166 qc->waiting = &wait;
1167 qc->complete_fn = ata_qc_complete_noop;
1168
1169 spin_lock_irqsave(&ap->host_set->lock, flags);
1170 rc = ata_qc_issue(qc);
1171 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1172
1173 if (rc)
1174 goto err_out;
1175 else
1176 wait_for_completion(&wait);
1177
1178 status = ata_chk_status(ap);
1179 if (status & ATA_ERR) {
1180 /*
1181 * arg! EDD works for all test cases, but seems to return
1182 * the ATA signature for some ATAPI devices. Until the
1183 * reason for this is found and fixed, we fix up the mess
1184 * here. If IDENTIFY DEVICE returns command aborted
1185 * (as ATAPI devices do), then we issue an
1186 * IDENTIFY PACKET DEVICE.
1187 *
1188 * ATA software reset (SRST, the default) does not appear
1189 * to have this problem.
1190 */
1191 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1192 u8 err = ata_chk_err(ap);
1193 if (err & ATA_ABORTED) {
1194 dev->class = ATA_DEV_ATAPI;
1195 qc->cursg = 0;
1196 qc->cursg_ofs = 0;
1197 qc->cursect = 0;
1198 qc->nsect = 1;
1199 goto retry;
1200 }
1201 }
1202 goto err_out;
1203 }
1204
1205 swap_buf_le16(dev->id, ATA_ID_WORDS);
1206
1207 /* print device capabilities */
1208 printk(KERN_DEBUG "ata%u: dev %u cfg "
1209 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1210 ap->id, device, dev->id[49],
1211 dev->id[82], dev->id[83], dev->id[84],
1212 dev->id[85], dev->id[86], dev->id[87],
1213 dev->id[88]);
1214
1215 /*
1216 * common ATA, ATAPI feature tests
1217 */
1218
1219 /* we require DMA support (bits 8 of word 49) */
1220 if (!ata_id_has_dma(dev->id)) {
1221 printk(KERN_DEBUG "ata%u: no dma\n", ap->id);
1222 goto err_out_nosup;
1223 }
1224
1225 /* quick-n-dirty find max transfer mode; for printk only */
1226 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1227 if (!xfer_modes)
1228 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1229 if (!xfer_modes) {
1230 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1231 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1232 }
1233
1234 ata_dump_id(dev);
1235
1236 /* ATA-specific feature tests */
1237 if (dev->class == ATA_DEV_ATA) {
1238 if (!ata_id_is_ata(dev->id)) /* sanity check */
1239 goto err_out_nosup;
1240
1241 /* get major version */
1242 tmp = dev->id[ATA_ID_MAJOR_VER];
1243 for (major_version = 14; major_version >= 1; major_version--)
1244 if (tmp & (1 << major_version))
1245 break;
1246
1247 /*
1248 * The exact sequence expected by certain pre-ATA4 drives is:
1249 * SRST RESET
1250 * IDENTIFY
1251 * INITIALIZE DEVICE PARAMETERS
1252 * anything else..
1253 * Some drives were very specific about that exact sequence.
1254 */
1255 if (major_version < 4 || (!ata_id_has_lba(dev->id)))
1256 ata_dev_init_params(ap, dev);
1257
1258 if (ata_id_has_lba(dev->id)) {
1259 dev->flags |= ATA_DFLAG_LBA;
1260
1261 if (ata_id_has_lba48(dev->id)) {
1262 dev->flags |= ATA_DFLAG_LBA48;
1263 dev->n_sectors = ata_id_u64(dev->id, 100);
1264 } else {
1265 dev->n_sectors = ata_id_u32(dev->id, 60);
1266 }
1267
1268 /* print device info to dmesg */
1269 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n",
1270 ap->id, device,
1271 major_version,
1272 ata_mode_string(xfer_modes),
1273 (unsigned long long)dev->n_sectors,
1274 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA");
1275 } else {
1276 /* CHS */
1277
1278 /* Default translation */
1279 dev->cylinders = dev->id[1];
1280 dev->heads = dev->id[3];
1281 dev->sectors = dev->id[6];
1282 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors;
1283
1284 if (ata_id_current_chs_valid(dev->id)) {
1285 /* Current CHS translation is valid. */
1286 dev->cylinders = dev->id[54];
1287 dev->heads = dev->id[55];
1288 dev->sectors = dev->id[56];
1289
1290 dev->n_sectors = ata_id_u32(dev->id, 57);
1291 }
1292
1293 /* print device info to dmesg */
1294 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n",
1295 ap->id, device,
1296 major_version,
1297 ata_mode_string(xfer_modes),
1298 (unsigned long long)dev->n_sectors,
1299 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors);
1300
1301 }
1302
1303 ap->host->max_cmd_len = 16;
1304 }
1305
1306 /* ATAPI-specific feature tests */
1307 else {
1308 if (ata_id_is_ata(dev->id)) /* sanity check */
1309 goto err_out_nosup;
1310
1311 rc = atapi_cdb_len(dev->id);
1312 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1313 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1314 goto err_out_nosup;
1315 }
1316 ap->cdb_len = (unsigned int) rc;
1317 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1318
1319 /* print device info to dmesg */
1320 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1321 ap->id, device,
1322 ata_mode_string(xfer_modes));
1323 }
1324
1325 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1326 return;
1327
1328 err_out_nosup:
1329 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1330 ap->id, device);
1331 err_out:
1332 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1333 DPRINTK("EXIT, err\n");
1334 }
1335
1336
1337 static inline u8 ata_dev_knobble(struct ata_port *ap)
1338 {
1339 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id)));
1340 }
1341
1342 /**
1343 * ata_dev_config - Run device specific handlers and check for
1344 * SATA->PATA bridges
1345 * @ap: Bus
1346 * @i: Device
1347 *
1348 * LOCKING:
1349 */
1350
1351 void ata_dev_config(struct ata_port *ap, unsigned int i)
1352 {
1353 /* limit bridge transfers to udma5, 200 sectors */
1354 if (ata_dev_knobble(ap)) {
1355 printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
1356 ap->id, ap->device->devno);
1357 ap->udma_mask &= ATA_UDMA5;
1358 ap->host->max_sectors = ATA_MAX_SECTORS;
1359 ap->host->hostt->max_sectors = ATA_MAX_SECTORS;
1360 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS;
1361 }
1362
1363 if (ap->ops->dev_config)
1364 ap->ops->dev_config(ap, &ap->device[i]);
1365 }
1366
1367 /**
1368 * ata_bus_probe - Reset and probe ATA bus
1369 * @ap: Bus to probe
1370 *
1371 * Master ATA bus probing function. Initiates a hardware-dependent
1372 * bus reset, then attempts to identify any devices found on
1373 * the bus.
1374 *
1375 * LOCKING:
1376 * PCI/etc. bus probe sem.
1377 *
1378 * RETURNS:
1379 * Zero on success, non-zero on error.
1380 */
1381
1382 static int ata_bus_probe(struct ata_port *ap)
1383 {
1384 unsigned int i, found = 0;
1385
1386 ap->ops->phy_reset(ap);
1387 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1388 goto err_out;
1389
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1391 ata_dev_identify(ap, i);
1392 if (ata_dev_present(&ap->device[i])) {
1393 found = 1;
1394 ata_dev_config(ap,i);
1395 }
1396 }
1397
1398 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1399 goto err_out_disable;
1400
1401 ata_set_mode(ap);
1402 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1403 goto err_out_disable;
1404
1405 return 0;
1406
1407 err_out_disable:
1408 ap->ops->port_disable(ap);
1409 err_out:
1410 return -1;
1411 }
1412
1413 /**
1414 * ata_port_probe - Mark port as enabled
1415 * @ap: Port for which we indicate enablement
1416 *
1417 * Modify @ap data structure such that the system
1418 * thinks that the entire port is enabled.
1419 *
1420 * LOCKING: host_set lock, or some other form of
1421 * serialization.
1422 */
1423
1424 void ata_port_probe(struct ata_port *ap)
1425 {
1426 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1427 }
1428
1429 /**
1430 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1431 * @ap: SATA port associated with target SATA PHY.
1432 *
1433 * This function issues commands to standard SATA Sxxx
1434 * PHY registers, to wake up the phy (and device), and
1435 * clear any reset condition.
1436 *
1437 * LOCKING:
1438 * PCI/etc. bus probe sem.
1439 *
1440 */
1441 void __sata_phy_reset(struct ata_port *ap)
1442 {
1443 u32 sstatus;
1444 unsigned long timeout = jiffies + (HZ * 5);
1445
1446 if (ap->flags & ATA_FLAG_SATA_RESET) {
1447 /* issue phy wake/reset */
1448 scr_write_flush(ap, SCR_CONTROL, 0x301);
1449 udelay(400); /* FIXME: a guess */
1450 }
1451 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1452
1453 /* wait for phy to become ready, if necessary */
1454 do {
1455 msleep(200);
1456 sstatus = scr_read(ap, SCR_STATUS);
1457 if ((sstatus & 0xf) != 1)
1458 break;
1459 } while (time_before(jiffies, timeout));
1460
1461 /* TODO: phy layer with polling, timeouts, etc. */
1462 if (sata_dev_present(ap))
1463 ata_port_probe(ap);
1464 else {
1465 sstatus = scr_read(ap, SCR_STATUS);
1466 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1467 ap->id, sstatus);
1468 ata_port_disable(ap);
1469 }
1470
1471 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1472 return;
1473
1474 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1475 ata_port_disable(ap);
1476 return;
1477 }
1478
1479 ap->cbl = ATA_CBL_SATA;
1480 }
1481
1482 /**
1483 * sata_phy_reset - Reset SATA bus.
1484 * @ap: SATA port associated with target SATA PHY.
1485 *
1486 * This function resets the SATA bus, and then probes
1487 * the bus for devices.
1488 *
1489 * LOCKING:
1490 * PCI/etc. bus probe sem.
1491 *
1492 */
1493 void sata_phy_reset(struct ata_port *ap)
1494 {
1495 __sata_phy_reset(ap);
1496 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1497 return;
1498 ata_bus_reset(ap);
1499 }
1500
1501 /**
1502 * ata_port_disable - Disable port.
1503 * @ap: Port to be disabled.
1504 *
1505 * Modify @ap data structure such that the system
1506 * thinks that the entire port is disabled, and should
1507 * never attempt to probe or communicate with devices
1508 * on this port.
1509 *
1510 * LOCKING: host_set lock, or some other form of
1511 * serialization.
1512 */
1513
1514 void ata_port_disable(struct ata_port *ap)
1515 {
1516 ap->device[0].class = ATA_DEV_NONE;
1517 ap->device[1].class = ATA_DEV_NONE;
1518 ap->flags |= ATA_FLAG_PORT_DISABLED;
1519 }
1520
1521 static struct {
1522 unsigned int shift;
1523 u8 base;
1524 } xfer_mode_classes[] = {
1525 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1526 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1527 { ATA_SHIFT_PIO, XFER_PIO_0 },
1528 };
1529
1530 static inline u8 base_from_shift(unsigned int shift)
1531 {
1532 int i;
1533
1534 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1535 if (xfer_mode_classes[i].shift == shift)
1536 return xfer_mode_classes[i].base;
1537
1538 return 0xff;
1539 }
1540
1541 static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1542 {
1543 int ofs, idx;
1544 u8 base;
1545
1546 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1547 return;
1548
1549 if (dev->xfer_shift == ATA_SHIFT_PIO)
1550 dev->flags |= ATA_DFLAG_PIO;
1551
1552 ata_dev_set_xfermode(ap, dev);
1553
1554 base = base_from_shift(dev->xfer_shift);
1555 ofs = dev->xfer_mode - base;
1556 idx = ofs + dev->xfer_shift;
1557 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1558
1559 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1560 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1561
1562 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1563 ap->id, dev->devno, xfer_mode_str[idx]);
1564 }
1565
1566 static int ata_host_set_pio(struct ata_port *ap)
1567 {
1568 unsigned int mask;
1569 int x, i;
1570 u8 base, xfer_mode;
1571
1572 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1573 x = fgb(mask);
1574 if (x < 0) {
1575 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1576 return -1;
1577 }
1578
1579 base = base_from_shift(ATA_SHIFT_PIO);
1580 xfer_mode = base + x;
1581
1582 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1583 (int)base, (int)xfer_mode, mask, x);
1584
1585 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1586 struct ata_device *dev = &ap->device[i];
1587 if (ata_dev_present(dev)) {
1588 dev->pio_mode = xfer_mode;
1589 dev->xfer_mode = xfer_mode;
1590 dev->xfer_shift = ATA_SHIFT_PIO;
1591 if (ap->ops->set_piomode)
1592 ap->ops->set_piomode(ap, dev);
1593 }
1594 }
1595
1596 return 0;
1597 }
1598
1599 static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1600 unsigned int xfer_shift)
1601 {
1602 int i;
1603
1604 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1605 struct ata_device *dev = &ap->device[i];
1606 if (ata_dev_present(dev)) {
1607 dev->dma_mode = xfer_mode;
1608 dev->xfer_mode = xfer_mode;
1609 dev->xfer_shift = xfer_shift;
1610 if (ap->ops->set_dmamode)
1611 ap->ops->set_dmamode(ap, dev);
1612 }
1613 }
1614 }
1615
1616 /**
1617 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1618 * @ap: port on which timings will be programmed
1619 *
1620 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1621 *
1622 * LOCKING:
1623 * PCI/etc. bus probe sem.
1624 *
1625 */
1626 static void ata_set_mode(struct ata_port *ap)
1627 {
1628 unsigned int i, xfer_shift;
1629 u8 xfer_mode;
1630 int rc;
1631
1632 /* step 1: always set host PIO timings */
1633 rc = ata_host_set_pio(ap);
1634 if (rc)
1635 goto err_out;
1636
1637 /* step 2: choose the best data xfer mode */
1638 xfer_mode = xfer_shift = 0;
1639 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1640 if (rc)
1641 goto err_out;
1642
1643 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1644 if (xfer_shift != ATA_SHIFT_PIO)
1645 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1646
1647 /* step 4: update devices' xfer mode */
1648 ata_dev_set_mode(ap, &ap->device[0]);
1649 ata_dev_set_mode(ap, &ap->device[1]);
1650
1651 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1652 return;
1653
1654 if (ap->ops->post_set_mode)
1655 ap->ops->post_set_mode(ap);
1656
1657 for (i = 0; i < 2; i++) {
1658 struct ata_device *dev = &ap->device[i];
1659 ata_dev_set_protocol(dev);
1660 }
1661
1662 return;
1663
1664 err_out:
1665 ata_port_disable(ap);
1666 }
1667
1668 /**
1669 * ata_busy_sleep - sleep until BSY clears, or timeout
1670 * @ap: port containing status register to be polled
1671 * @tmout_pat: impatience timeout
1672 * @tmout: overall timeout
1673 *
1674 * Sleep until ATA Status register bit BSY clears,
1675 * or a timeout occurs.
1676 *
1677 * LOCKING: None.
1678 *
1679 */
1680
1681 static unsigned int ata_busy_sleep (struct ata_port *ap,
1682 unsigned long tmout_pat,
1683 unsigned long tmout)
1684 {
1685 unsigned long timer_start, timeout;
1686 u8 status;
1687
1688 status = ata_busy_wait(ap, ATA_BUSY, 300);
1689 timer_start = jiffies;
1690 timeout = timer_start + tmout_pat;
1691 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1692 msleep(50);
1693 status = ata_busy_wait(ap, ATA_BUSY, 3);
1694 }
1695
1696 if (status & ATA_BUSY)
1697 printk(KERN_WARNING "ata%u is slow to respond, "
1698 "please be patient\n", ap->id);
1699
1700 timeout = timer_start + tmout;
1701 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1702 msleep(50);
1703 status = ata_chk_status(ap);
1704 }
1705
1706 if (status & ATA_BUSY) {
1707 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1708 ap->id, tmout / HZ);
1709 return 1;
1710 }
1711
1712 return 0;
1713 }
1714
1715 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1716 {
1717 struct ata_ioports *ioaddr = &ap->ioaddr;
1718 unsigned int dev0 = devmask & (1 << 0);
1719 unsigned int dev1 = devmask & (1 << 1);
1720 unsigned long timeout;
1721
1722 /* if device 0 was found in ata_devchk, wait for its
1723 * BSY bit to clear
1724 */
1725 if (dev0)
1726 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1727
1728 /* if device 1 was found in ata_devchk, wait for
1729 * register access, then wait for BSY to clear
1730 */
1731 timeout = jiffies + ATA_TMOUT_BOOT;
1732 while (dev1) {
1733 u8 nsect, lbal;
1734
1735 ap->ops->dev_select(ap, 1);
1736 if (ap->flags & ATA_FLAG_MMIO) {
1737 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1738 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1739 } else {
1740 nsect = inb(ioaddr->nsect_addr);
1741 lbal = inb(ioaddr->lbal_addr);
1742 }
1743 if ((nsect == 1) && (lbal == 1))
1744 break;
1745 if (time_after(jiffies, timeout)) {
1746 dev1 = 0;
1747 break;
1748 }
1749 msleep(50); /* give drive a breather */
1750 }
1751 if (dev1)
1752 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1753
1754 /* is all this really necessary? */
1755 ap->ops->dev_select(ap, 0);
1756 if (dev1)
1757 ap->ops->dev_select(ap, 1);
1758 if (dev0)
1759 ap->ops->dev_select(ap, 0);
1760 }
1761
1762 /**
1763 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command.
1764 * @ap: Port to reset and probe
1765 *
1766 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and
1767 * probe the bus. Not often used these days.
1768 *
1769 * LOCKING:
1770 * PCI/etc. bus probe sem.
1771 *
1772 */
1773
1774 static unsigned int ata_bus_edd(struct ata_port *ap)
1775 {
1776 struct ata_taskfile tf;
1777
1778 /* set up execute-device-diag (bus reset) taskfile */
1779 /* also, take interrupts to a known state (disabled) */
1780 DPRINTK("execute-device-diag\n");
1781 ata_tf_init(ap, &tf, 0);
1782 tf.ctl |= ATA_NIEN;
1783 tf.command = ATA_CMD_EDD;
1784 tf.protocol = ATA_PROT_NODATA;
1785
1786 /* do bus reset */
1787 ata_tf_to_host(ap, &tf);
1788
1789 /* spec says at least 2ms. but who knows with those
1790 * crazy ATAPI devices...
1791 */
1792 msleep(150);
1793
1794 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1795 }
1796
1797 static unsigned int ata_bus_softreset(struct ata_port *ap,
1798 unsigned int devmask)
1799 {
1800 struct ata_ioports *ioaddr = &ap->ioaddr;
1801
1802 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1803
1804 /* software reset. causes dev0 to be selected */
1805 if (ap->flags & ATA_FLAG_MMIO) {
1806 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1807 udelay(20); /* FIXME: flush */
1808 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1809 udelay(20); /* FIXME: flush */
1810 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1811 } else {
1812 outb(ap->ctl, ioaddr->ctl_addr);
1813 udelay(10);
1814 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1815 udelay(10);
1816 outb(ap->ctl, ioaddr->ctl_addr);
1817 }
1818
1819 /* spec mandates ">= 2ms" before checking status.
1820 * We wait 150ms, because that was the magic delay used for
1821 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1822 * between when the ATA command register is written, and then
1823 * status is checked. Because waiting for "a while" before
1824 * checking status is fine, post SRST, we perform this magic
1825 * delay here as well.
1826 */
1827 msleep(150);
1828
1829 ata_bus_post_reset(ap, devmask);
1830
1831 return 0;
1832 }
1833
1834 /**
1835 * ata_bus_reset - reset host port and associated ATA channel
1836 * @ap: port to reset
1837 *
1838 * This is typically the first time we actually start issuing
1839 * commands to the ATA channel. We wait for BSY to clear, then
1840 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1841 * result. Determine what devices, if any, are on the channel
1842 * by looking at the device 0/1 error register. Look at the signature
1843 * stored in each device's taskfile registers, to determine if
1844 * the device is ATA or ATAPI.
1845 *
1846 * LOCKING:
1847 * PCI/etc. bus probe sem.
1848 * Obtains host_set lock.
1849 *
1850 * SIDE EFFECTS:
1851 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1852 */
1853
1854 void ata_bus_reset(struct ata_port *ap)
1855 {
1856 struct ata_ioports *ioaddr = &ap->ioaddr;
1857 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1858 u8 err;
1859 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1860
1861 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1862
1863 /* determine if device 0/1 are present */
1864 if (ap->flags & ATA_FLAG_SATA_RESET)
1865 dev0 = 1;
1866 else {
1867 dev0 = ata_devchk(ap, 0);
1868 if (slave_possible)
1869 dev1 = ata_devchk(ap, 1);
1870 }
1871
1872 if (dev0)
1873 devmask |= (1 << 0);
1874 if (dev1)
1875 devmask |= (1 << 1);
1876
1877 /* select device 0 again */
1878 ap->ops->dev_select(ap, 0);
1879
1880 /* issue bus reset */
1881 if (ap->flags & ATA_FLAG_SRST)
1882 rc = ata_bus_softreset(ap, devmask);
1883 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1884 /* set up device control */
1885 if (ap->flags & ATA_FLAG_MMIO)
1886 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1887 else
1888 outb(ap->ctl, ioaddr->ctl_addr);
1889 rc = ata_bus_edd(ap);
1890 }
1891
1892 if (rc)
1893 goto err_out;
1894
1895 /*
1896 * determine by signature whether we have ATA or ATAPI devices
1897 */
1898 err = ata_dev_try_classify(ap, 0);
1899 if ((slave_possible) && (err != 0x81))
1900 ata_dev_try_classify(ap, 1);
1901
1902 /* re-enable interrupts */
1903 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1904 ata_irq_on(ap);
1905
1906 /* is double-select really necessary? */
1907 if (ap->device[1].class != ATA_DEV_NONE)
1908 ap->ops->dev_select(ap, 1);
1909 if (ap->device[0].class != ATA_DEV_NONE)
1910 ap->ops->dev_select(ap, 0);
1911
1912 /* if no devices were detected, disable this port */
1913 if ((ap->device[0].class == ATA_DEV_NONE) &&
1914 (ap->device[1].class == ATA_DEV_NONE))
1915 goto err_out;
1916
1917 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1918 /* set up device control for ATA_FLAG_SATA_RESET */
1919 if (ap->flags & ATA_FLAG_MMIO)
1920 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1921 else
1922 outb(ap->ctl, ioaddr->ctl_addr);
1923 }
1924
1925 DPRINTK("EXIT\n");
1926 return;
1927
1928 err_out:
1929 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1930 ap->ops->port_disable(ap);
1931
1932 DPRINTK("EXIT\n");
1933 }
1934
1935 static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1936 {
1937 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1938 ap->id, dev->devno);
1939 }
1940
1941 static const char * ata_dma_blacklist [] = {
1942 "WDC AC11000H",
1943 "WDC AC22100H",
1944 "WDC AC32500H",
1945 "WDC AC33100H",
1946 "WDC AC31600H",
1947 "WDC AC32100H",
1948 "WDC AC23200L",
1949 "Compaq CRD-8241B",
1950 "CRD-8400B",
1951 "CRD-8480B",
1952 "CRD-8482B",
1953 "CRD-84",
1954 "SanDisk SDP3B",
1955 "SanDisk SDP3B-64",
1956 "SANYO CD-ROM CRD",
1957 "HITACHI CDR-8",
1958 "HITACHI CDR-8335",
1959 "HITACHI CDR-8435",
1960 "Toshiba CD-ROM XM-6202B",
1961 "CD-532E-A",
1962 "E-IDE CD-ROM CR-840",
1963 "CD-ROM Drive/F5A",
1964 "WPI CDD-820",
1965 "SAMSUNG CD-ROM SC-148C",
1966 "SAMSUNG CD-ROM SC",
1967 "SanDisk SDP3B-64",
1968 "SAMSUNG CD-ROM SN-124",
1969 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1970 "_NEC DV5800A",
1971 };
1972
1973 static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1974 {
1975 unsigned char model_num[40];
1976 char *s;
1977 unsigned int len;
1978 int i;
1979
1980 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1981 sizeof(model_num));
1982 s = &model_num[0];
1983 len = strnlen(s, sizeof(model_num));
1984
1985 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1986 while ((len > 0) && (s[len - 1] == ' ')) {
1987 len--;
1988 s[len] = 0;
1989 }
1990
1991 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1992 if (!strncmp(ata_dma_blacklist[i], s, len))
1993 return 1;
1994
1995 return 0;
1996 }
1997
1998 static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1999 {
2000 struct ata_device *master, *slave;
2001 unsigned int mask;
2002
2003 master = &ap->device[0];
2004 slave = &ap->device[1];
2005
2006 assert (ata_dev_present(master) || ata_dev_present(slave));
2007
2008 if (shift == ATA_SHIFT_UDMA) {
2009 mask = ap->udma_mask;
2010 if (ata_dev_present(master)) {
2011 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
2012 if (ata_dma_blacklisted(ap, master)) {
2013 mask = 0;
2014 ata_pr_blacklisted(ap, master);
2015 }
2016 }
2017 if (ata_dev_present(slave)) {
2018 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
2019 if (ata_dma_blacklisted(ap, slave)) {
2020 mask = 0;
2021 ata_pr_blacklisted(ap, slave);
2022 }
2023 }
2024 }
2025 else if (shift == ATA_SHIFT_MWDMA) {
2026 mask = ap->mwdma_mask;
2027 if (ata_dev_present(master)) {
2028 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
2029 if (ata_dma_blacklisted(ap, master)) {
2030 mask = 0;
2031 ata_pr_blacklisted(ap, master);
2032 }
2033 }
2034 if (ata_dev_present(slave)) {
2035 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
2036 if (ata_dma_blacklisted(ap, slave)) {
2037 mask = 0;
2038 ata_pr_blacklisted(ap, slave);
2039 }
2040 }
2041 }
2042 else if (shift == ATA_SHIFT_PIO) {
2043 mask = ap->pio_mask;
2044 if (ata_dev_present(master)) {
2045 /* spec doesn't return explicit support for
2046 * PIO0-2, so we fake it
2047 */
2048 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
2049 tmp_mode <<= 3;
2050 tmp_mode |= 0x7;
2051 mask &= tmp_mode;
2052 }
2053 if (ata_dev_present(slave)) {
2054 /* spec doesn't return explicit support for
2055 * PIO0-2, so we fake it
2056 */
2057 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
2058 tmp_mode <<= 3;
2059 tmp_mode |= 0x7;
2060 mask &= tmp_mode;
2061 }
2062 }
2063 else {
2064 mask = 0xffffffff; /* shut up compiler warning */
2065 BUG();
2066 }
2067
2068 return mask;
2069 }
2070
2071 /* find greatest bit */
2072 static int fgb(u32 bitmap)
2073 {
2074 unsigned int i;
2075 int x = -1;
2076
2077 for (i = 0; i < 32; i++)
2078 if (bitmap & (1 << i))
2079 x = i;
2080
2081 return x;
2082 }
2083
2084 /**
2085 * ata_choose_xfer_mode - attempt to find best transfer mode
2086 * @ap: Port for which an xfer mode will be selected
2087 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
2088 * @xfer_shift_out: (output) bit shift that selects this mode
2089 *
2090 * Based on host and device capabilities, determine the
2091 * maximum transfer mode that is amenable to all.
2092 *
2093 * LOCKING:
2094 * PCI/etc. bus probe sem.
2095 *
2096 * RETURNS:
2097 * Zero on success, negative on error.
2098 */
2099
2100 static int ata_choose_xfer_mode(struct ata_port *ap,
2101 u8 *xfer_mode_out,
2102 unsigned int *xfer_shift_out)
2103 {
2104 unsigned int mask, shift;
2105 int x, i;
2106
2107 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
2108 shift = xfer_mode_classes[i].shift;
2109 mask = ata_get_mode_mask(ap, shift);
2110
2111 x = fgb(mask);
2112 if (x >= 0) {
2113 *xfer_mode_out = xfer_mode_classes[i].base + x;
2114 *xfer_shift_out = shift;
2115 return 0;
2116 }
2117 }
2118
2119 return -1;
2120 }
2121
2122 /**
2123 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
2124 * @ap: Port associated with device @dev
2125 * @dev: Device to which command will be sent
2126 *
2127 * Issue SET FEATURES - XFER MODE command to device @dev
2128 * on port @ap.
2129 *
2130 * LOCKING:
2131 * PCI/etc. bus probe sem.
2132 */
2133
2134 static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
2135 {
2136 DECLARE_COMPLETION(wait);
2137 struct ata_queued_cmd *qc;
2138 int rc;
2139 unsigned long flags;
2140
2141 /* set up set-features taskfile */
2142 DPRINTK("set features - xfer mode\n");
2143
2144 qc = ata_qc_new_init(ap, dev);
2145 BUG_ON(qc == NULL);
2146
2147 qc->tf.command = ATA_CMD_SET_FEATURES;
2148 qc->tf.feature = SETFEATURES_XFER;
2149 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2150 qc->tf.protocol = ATA_PROT_NODATA;
2151 qc->tf.nsect = dev->xfer_mode;
2152
2153 qc->waiting = &wait;
2154 qc->complete_fn = ata_qc_complete_noop;
2155
2156 spin_lock_irqsave(&ap->host_set->lock, flags);
2157 rc = ata_qc_issue(qc);
2158 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2159
2160 if (rc)
2161 ata_port_disable(ap);
2162 else
2163 wait_for_completion(&wait);
2164
2165 DPRINTK("EXIT\n");
2166 }
2167
2168 /**
2169 * ata_dev_init_params - Issue INIT DEV PARAMS command
2170 * @ap: Port associated with device @dev
2171 * @dev: Device to which command will be sent
2172 *
2173 * LOCKING:
2174 */
2175
2176 static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev)
2177 {
2178 DECLARE_COMPLETION(wait);
2179 struct ata_queued_cmd *qc;
2180 int rc;
2181 unsigned long flags;
2182 u16 sectors = dev->id[6];
2183 u16 heads = dev->id[3];
2184
2185 /* Number of sectors per track 1-255. Number of heads 1-16 */
2186 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
2187 return;
2188
2189 /* set up init dev params taskfile */
2190 DPRINTK("init dev params \n");
2191
2192 qc = ata_qc_new_init(ap, dev);
2193 BUG_ON(qc == NULL);
2194
2195 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS;
2196 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2197 qc->tf.protocol = ATA_PROT_NODATA;
2198 qc->tf.nsect = sectors;
2199 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2200
2201 qc->waiting = &wait;
2202 qc->complete_fn = ata_qc_complete_noop;
2203
2204 spin_lock_irqsave(&ap->host_set->lock, flags);
2205 rc = ata_qc_issue(qc);
2206 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2207
2208 if (rc)
2209 ata_port_disable(ap);
2210 else
2211 wait_for_completion(&wait);
2212
2213 DPRINTK("EXIT\n");
2214 }
2215
2216 /**
2217 * ata_sg_clean - Unmap DMA memory associated with command
2218 * @qc: Command containing DMA memory to be released
2219 *
2220 * Unmap all mapped DMA memory associated with this command.
2221 *
2222 * LOCKING:
2223 * spin_lock_irqsave(host_set lock)
2224 */
2225
2226 static void ata_sg_clean(struct ata_queued_cmd *qc)
2227 {
2228 struct ata_port *ap = qc->ap;
2229 struct scatterlist *sg = qc->sg;
2230 int dir = qc->dma_dir;
2231
2232 assert(qc->flags & ATA_QCFLAG_DMAMAP);
2233 assert(sg != NULL);
2234
2235 if (qc->flags & ATA_QCFLAG_SINGLE)
2236 assert(qc->n_elem == 1);
2237
2238 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
2239
2240 if (qc->flags & ATA_QCFLAG_SG)
2241 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2242 else
2243 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
2244 sg_dma_len(&sg[0]), dir);
2245
2246 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2247 qc->sg = NULL;
2248 }
2249
2250 /**
2251 * ata_fill_sg - Fill PCI IDE PRD table
2252 * @qc: Metadata associated with taskfile to be transferred
2253 *
2254 * Fill PCI IDE PRD (scatter-gather) table with segments
2255 * associated with the current disk command.
2256 *
2257 * LOCKING:
2258 * spin_lock_irqsave(host_set lock)
2259 *
2260 */
2261 static void ata_fill_sg(struct ata_queued_cmd *qc)
2262 {
2263 struct scatterlist *sg = qc->sg;
2264 struct ata_port *ap = qc->ap;
2265 unsigned int idx, nelem;
2266
2267 assert(sg != NULL);
2268 assert(qc->n_elem > 0);
2269
2270 idx = 0;
2271 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2272 u32 addr, offset;
2273 u32 sg_len, len;
2274
2275 /* determine if physical DMA addr spans 64K boundary.
2276 * Note h/w doesn't support 64-bit, so we unconditionally
2277 * truncate dma_addr_t to u32.
2278 */
2279 addr = (u32) sg_dma_address(sg);
2280 sg_len = sg_dma_len(sg);
2281
2282 while (sg_len) {
2283 offset = addr & 0xffff;
2284 len = sg_len;
2285 if ((offset + sg_len) > 0x10000)
2286 len = 0x10000 - offset;
2287
2288 ap->prd[idx].addr = cpu_to_le32(addr);
2289 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2290 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2291
2292 idx++;
2293 sg_len -= len;
2294 addr += len;
2295 }
2296 }
2297
2298 if (idx)
2299 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2300 }
2301 /**
2302 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2303 * @qc: Metadata associated with taskfile to check
2304 *
2305 * Allow low-level driver to filter ATA PACKET commands, returning
2306 * a status indicating whether or not it is OK to use DMA for the
2307 * supplied PACKET command.
2308 *
2309 * LOCKING:
2310 * spin_lock_irqsave(host_set lock)
2311 *
2312 * RETURNS: 0 when ATAPI DMA can be used
2313 * nonzero otherwise
2314 */
2315 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2316 {
2317 struct ata_port *ap = qc->ap;
2318 int rc = 0; /* Assume ATAPI DMA is OK by default */
2319
2320 if (ap->ops->check_atapi_dma)
2321 rc = ap->ops->check_atapi_dma(qc);
2322
2323 return rc;
2324 }
2325 /**
2326 * ata_qc_prep - Prepare taskfile for submission
2327 * @qc: Metadata associated with taskfile to be prepared
2328 *
2329 * Prepare ATA taskfile for submission.
2330 *
2331 * LOCKING:
2332 * spin_lock_irqsave(host_set lock)
2333 */
2334 void ata_qc_prep(struct ata_queued_cmd *qc)
2335 {
2336 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2337 return;
2338
2339 ata_fill_sg(qc);
2340 }
2341
2342 /**
2343 * ata_sg_init_one - Associate command with memory buffer
2344 * @qc: Command to be associated
2345 * @buf: Memory buffer
2346 * @buflen: Length of memory buffer, in bytes.
2347 *
2348 * Initialize the data-related elements of queued_cmd @qc
2349 * to point to a single memory buffer, @buf of byte length @buflen.
2350 *
2351 * LOCKING:
2352 * spin_lock_irqsave(host_set lock)
2353 */
2354
2355
2356
2357 /**
2358 * ata_sg_init_one - Prepare a one-entry scatter-gather list.
2359 * @qc: Queued command
2360 * @buf: transfer buffer
2361 * @buflen: length of buf
2362 *
2363 * Builds a single-entry scatter-gather list to initiate a
2364 * transfer utilizing the specified buffer.
2365 *
2366 * LOCKING:
2367 */
2368 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2369 {
2370 struct scatterlist *sg;
2371
2372 qc->flags |= ATA_QCFLAG_SINGLE;
2373
2374 memset(&qc->sgent, 0, sizeof(qc->sgent));
2375 qc->sg = &qc->sgent;
2376 qc->n_elem = 1;
2377 qc->buf_virt = buf;
2378
2379 sg = qc->sg;
2380 sg->page = virt_to_page(buf);
2381 sg->offset = (unsigned long) buf & ~PAGE_MASK;
2382 sg->length = buflen;
2383 }
2384
2385 /**
2386 * ata_sg_init - Associate command with scatter-gather table.
2387 * @qc: Command to be associated
2388 * @sg: Scatter-gather table.
2389 * @n_elem: Number of elements in s/g table.
2390 *
2391 * Initialize the data-related elements of queued_cmd @qc
2392 * to point to a scatter-gather table @sg, containing @n_elem
2393 * elements.
2394 *
2395 * LOCKING:
2396 * spin_lock_irqsave(host_set lock)
2397 */
2398
2399
2400 /**
2401 * ata_sg_init - Assign a scatter gather list to a queued command
2402 * @qc: Queued command
2403 * @sg: Scatter-gather list
2404 * @n_elem: length of sg list
2405 *
2406 * Attaches a scatter-gather list to a queued command.
2407 *
2408 * LOCKING:
2409 */
2410
2411 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2412 unsigned int n_elem)
2413 {
2414 qc->flags |= ATA_QCFLAG_SG;
2415 qc->sg = sg;
2416 qc->n_elem = n_elem;
2417 }
2418
2419 /**
2420 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
2421 * @qc: Command with memory buffer to be mapped.
2422 *
2423 * DMA-map the memory buffer associated with queued_cmd @qc.
2424 *
2425 * LOCKING:
2426 * spin_lock_irqsave(host_set lock)
2427 *
2428 * RETURNS:
2429 * Zero on success, negative on error.
2430 */
2431
2432 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2433 {
2434 struct ata_port *ap = qc->ap;
2435 int dir = qc->dma_dir;
2436 struct scatterlist *sg = qc->sg;
2437 dma_addr_t dma_address;
2438
2439 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
2440 sg->length, dir);
2441 if (dma_mapping_error(dma_address))
2442 return -1;
2443
2444 sg_dma_address(sg) = dma_address;
2445 sg_dma_len(sg) = sg->length;
2446
2447 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2448 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2449
2450 return 0;
2451 }
2452
2453 /**
2454 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
2455 * @qc: Command with scatter-gather table to be mapped.
2456 *
2457 * DMA-map the scatter-gather table associated with queued_cmd @qc.
2458 *
2459 * LOCKING:
2460 * spin_lock_irqsave(host_set lock)
2461 *
2462 * RETURNS:
2463 * Zero on success, negative on error.
2464 *
2465 */
2466
2467 static int ata_sg_setup(struct ata_queued_cmd *qc)
2468 {
2469 struct ata_port *ap = qc->ap;
2470 struct scatterlist *sg = qc->sg;
2471 int n_elem, dir;
2472
2473 VPRINTK("ENTER, ata%u\n", ap->id);
2474 assert(qc->flags & ATA_QCFLAG_SG);
2475
2476 dir = qc->dma_dir;
2477 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2478 if (n_elem < 1)
2479 return -1;
2480
2481 DPRINTK("%d sg elements mapped\n", n_elem);
2482
2483 qc->n_elem = n_elem;
2484
2485 return 0;
2486 }
2487
2488 /**
2489 * ata_pio_poll -
2490 * @ap:
2491 *
2492 * LOCKING:
2493 * None. (executing in kernel thread context)
2494 *
2495 * RETURNS:
2496 *
2497 */
2498
2499 static unsigned long ata_pio_poll(struct ata_port *ap)
2500 {
2501 u8 status;
2502 unsigned int poll_state = PIO_ST_UNKNOWN;
2503 unsigned int reg_state = PIO_ST_UNKNOWN;
2504 const unsigned int tmout_state = PIO_ST_TMOUT;
2505
2506 switch (ap->pio_task_state) {
2507 case PIO_ST:
2508 case PIO_ST_POLL:
2509 poll_state = PIO_ST_POLL;
2510 reg_state = PIO_ST;
2511 break;
2512 case PIO_ST_LAST:
2513 case PIO_ST_LAST_POLL:
2514 poll_state = PIO_ST_LAST_POLL;
2515 reg_state = PIO_ST_LAST;
2516 break;
2517 default:
2518 BUG();
2519 break;
2520 }
2521
2522 status = ata_chk_status(ap);
2523 if (status & ATA_BUSY) {
2524 if (time_after(jiffies, ap->pio_task_timeout)) {
2525 ap->pio_task_state = tmout_state;
2526 return 0;
2527 }
2528 ap->pio_task_state = poll_state;
2529 return ATA_SHORT_PAUSE;
2530 }
2531
2532 ap->pio_task_state = reg_state;
2533 return 0;
2534 }
2535
2536 /**
2537 * ata_pio_complete -
2538 * @ap:
2539 *
2540 * LOCKING:
2541 * None. (executing in kernel thread context)
2542 */
2543
2544 static void ata_pio_complete (struct ata_port *ap)
2545 {
2546 struct ata_queued_cmd *qc;
2547 u8 drv_stat;
2548
2549 /*
2550 * This is purely hueristic. This is a fast path.
2551 * Sometimes when we enter, BSY will be cleared in
2552 * a chk-status or two. If not, the drive is probably seeking
2553 * or something. Snooze for a couple msecs, then
2554 * chk-status again. If still busy, fall back to
2555 * PIO_ST_POLL state.
2556 */
2557 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2558 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2559 msleep(2);
2560 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2561 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2562 ap->pio_task_state = PIO_ST_LAST_POLL;
2563 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2564 return;
2565 }
2566 }
2567
2568 drv_stat = ata_wait_idle(ap);
2569 if (!ata_ok(drv_stat)) {
2570 ap->pio_task_state = PIO_ST_ERR;
2571 return;
2572 }
2573
2574 qc = ata_qc_from_tag(ap, ap->active_tag);
2575 assert(qc != NULL);
2576
2577 ap->pio_task_state = PIO_ST_IDLE;
2578
2579 ata_irq_on(ap);
2580
2581 ata_qc_complete(qc, drv_stat);
2582 }
2583
2584
2585 /**
2586 * swap_buf_le16 -
2587 * @buf: Buffer to swap
2588 * @buf_words: Number of 16-bit words in buffer.
2589 *
2590 * Swap halves of 16-bit words if needed to convert from
2591 * little-endian byte order to native cpu byte order, or
2592 * vice-versa.
2593 *
2594 * LOCKING:
2595 */
2596 void swap_buf_le16(u16 *buf, unsigned int buf_words)
2597 {
2598 #ifdef __BIG_ENDIAN
2599 unsigned int i;
2600
2601 for (i = 0; i < buf_words; i++)
2602 buf[i] = le16_to_cpu(buf[i]);
2603 #endif /* __BIG_ENDIAN */
2604 }
2605
2606 static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2607 unsigned int buflen, int write_data)
2608 {
2609 unsigned int i;
2610 unsigned int words = buflen >> 1;
2611 u16 *buf16 = (u16 *) buf;
2612 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2613
2614 if (write_data) {
2615 for (i = 0; i < words; i++)
2616 writew(le16_to_cpu(buf16[i]), mmio);
2617 } else {
2618 for (i = 0; i < words; i++)
2619 buf16[i] = cpu_to_le16(readw(mmio));
2620 }
2621 }
2622
2623 static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2624 unsigned int buflen, int write_data)
2625 {
2626 unsigned int dwords = buflen >> 1;
2627
2628 if (write_data)
2629 outsw(ap->ioaddr.data_addr, buf, dwords);
2630 else
2631 insw(ap->ioaddr.data_addr, buf, dwords);
2632 }
2633
2634 static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2635 unsigned int buflen, int do_write)
2636 {
2637 if (ap->flags & ATA_FLAG_MMIO)
2638 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2639 else
2640 ata_pio_data_xfer(ap, buf, buflen, do_write);
2641 }
2642
2643 static void ata_pio_sector(struct ata_queued_cmd *qc)
2644 {
2645 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2646 struct scatterlist *sg = qc->sg;
2647 struct ata_port *ap = qc->ap;
2648 struct page *page;
2649 unsigned int offset;
2650 unsigned char *buf;
2651
2652 if (qc->cursect == (qc->nsect - 1))
2653 ap->pio_task_state = PIO_ST_LAST;
2654
2655 page = sg[qc->cursg].page;
2656 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2657
2658 /* get the current page and offset */
2659 page = nth_page(page, (offset >> PAGE_SHIFT));
2660 offset %= PAGE_SIZE;
2661
2662 buf = kmap(page) + offset;
2663
2664 qc->cursect++;
2665 qc->cursg_ofs++;
2666
2667 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
2668 qc->cursg++;
2669 qc->cursg_ofs = 0;
2670 }
2671
2672 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2673
2674 /* do the actual data transfer */
2675 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2676 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2677
2678 kunmap(page);
2679 }
2680
2681 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2682 {
2683 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2684 struct scatterlist *sg = qc->sg;
2685 struct ata_port *ap = qc->ap;
2686 struct page *page;
2687 unsigned char *buf;
2688 unsigned int offset, count;
2689
2690 if (qc->curbytes == qc->nbytes - bytes)
2691 ap->pio_task_state = PIO_ST_LAST;
2692
2693 next_sg:
2694 sg = &qc->sg[qc->cursg];
2695
2696 page = sg->page;
2697 offset = sg->offset + qc->cursg_ofs;
2698
2699 /* get the current page and offset */
2700 page = nth_page(page, (offset >> PAGE_SHIFT));
2701 offset %= PAGE_SIZE;
2702
2703 /* don't overrun current sg */
2704 count = min(sg->length - qc->cursg_ofs, bytes);
2705
2706 /* don't cross page boundaries */
2707 count = min(count, (unsigned int)PAGE_SIZE - offset);
2708
2709 buf = kmap(page) + offset;
2710
2711 bytes -= count;
2712 qc->curbytes += count;
2713 qc->cursg_ofs += count;
2714
2715 if (qc->cursg_ofs == sg->length) {
2716 qc->cursg++;
2717 qc->cursg_ofs = 0;
2718 }
2719
2720 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2721
2722 /* do the actual data transfer */
2723 ata_data_xfer(ap, buf, count, do_write);
2724
2725 kunmap(page);
2726
2727 if (bytes) {
2728 goto next_sg;
2729 }
2730 }
2731
2732 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2733 {
2734 struct ata_port *ap = qc->ap;
2735 struct ata_device *dev = qc->dev;
2736 unsigned int ireason, bc_lo, bc_hi, bytes;
2737 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2738
2739 ap->ops->tf_read(ap, &qc->tf);
2740 ireason = qc->tf.nsect;
2741 bc_lo = qc->tf.lbam;
2742 bc_hi = qc->tf.lbah;
2743 bytes = (bc_hi << 8) | bc_lo;
2744
2745 /* shall be cleared to zero, indicating xfer of data */
2746 if (ireason & (1 << 0))
2747 goto err_out;
2748
2749 /* make sure transfer direction matches expected */
2750 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2751 if (do_write != i_write)
2752 goto err_out;
2753
2754 __atapi_pio_bytes(qc, bytes);
2755
2756 return;
2757
2758 err_out:
2759 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2760 ap->id, dev->devno);
2761 ap->pio_task_state = PIO_ST_ERR;
2762 }
2763
2764 /**
2765 * ata_pio_sector -
2766 * @ap:
2767 *
2768 * LOCKING:
2769 * None. (executing in kernel thread context)
2770 */
2771
2772 static void ata_pio_block(struct ata_port *ap)
2773 {
2774 struct ata_queued_cmd *qc;
2775 u8 status;
2776
2777 /*
2778 * This is purely hueristic. This is a fast path.
2779 * Sometimes when we enter, BSY will be cleared in
2780 * a chk-status or two. If not, the drive is probably seeking
2781 * or something. Snooze for a couple msecs, then
2782 * chk-status again. If still busy, fall back to
2783 * PIO_ST_POLL state.
2784 */
2785 status = ata_busy_wait(ap, ATA_BUSY, 5);
2786 if (status & ATA_BUSY) {
2787 msleep(2);
2788 status = ata_busy_wait(ap, ATA_BUSY, 10);
2789 if (status & ATA_BUSY) {
2790 ap->pio_task_state = PIO_ST_POLL;
2791 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2792 return;
2793 }
2794 }
2795
2796 qc = ata_qc_from_tag(ap, ap->active_tag);
2797 assert(qc != NULL);
2798
2799 if (is_atapi_taskfile(&qc->tf)) {
2800 /* no more data to transfer or unsupported ATAPI command */
2801 if ((status & ATA_DRQ) == 0) {
2802 ap->pio_task_state = PIO_ST_IDLE;
2803
2804 ata_irq_on(ap);
2805
2806 ata_qc_complete(qc, status);
2807 return;
2808 }
2809
2810 atapi_pio_bytes(qc);
2811 } else {
2812 /* handle BSY=0, DRQ=0 as error */
2813 if ((status & ATA_DRQ) == 0) {
2814 ap->pio_task_state = PIO_ST_ERR;
2815 return;
2816 }
2817
2818 ata_pio_sector(qc);
2819 }
2820 }
2821
2822 static void ata_pio_error(struct ata_port *ap)
2823 {
2824 struct ata_queued_cmd *qc;
2825 u8 drv_stat;
2826
2827 qc = ata_qc_from_tag(ap, ap->active_tag);
2828 assert(qc != NULL);
2829
2830 drv_stat = ata_chk_status(ap);
2831 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2832 ap->id, drv_stat);
2833
2834 ap->pio_task_state = PIO_ST_IDLE;
2835
2836 ata_irq_on(ap);
2837
2838 ata_qc_complete(qc, drv_stat | ATA_ERR);
2839 }
2840
2841 static void ata_pio_task(void *_data)
2842 {
2843 struct ata_port *ap = _data;
2844 unsigned long timeout = 0;
2845
2846 switch (ap->pio_task_state) {
2847 case PIO_ST_IDLE:
2848 return;
2849
2850 case PIO_ST:
2851 ata_pio_block(ap);
2852 break;
2853
2854 case PIO_ST_LAST:
2855 ata_pio_complete(ap);
2856 break;
2857
2858 case PIO_ST_POLL:
2859 case PIO_ST_LAST_POLL:
2860 timeout = ata_pio_poll(ap);
2861 break;
2862
2863 case PIO_ST_TMOUT:
2864 case PIO_ST_ERR:
2865 ata_pio_error(ap);
2866 return;
2867 }
2868
2869 if (timeout)
2870 queue_delayed_work(ata_wq, &ap->pio_task,
2871 timeout);
2872 else
2873 queue_work(ata_wq, &ap->pio_task);
2874 }
2875
2876 static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2877 struct scsi_cmnd *cmd)
2878 {
2879 DECLARE_COMPLETION(wait);
2880 struct ata_queued_cmd *qc;
2881 unsigned long flags;
2882 int rc;
2883
2884 DPRINTK("ATAPI request sense\n");
2885
2886 qc = ata_qc_new_init(ap, dev);
2887 BUG_ON(qc == NULL);
2888
2889 /* FIXME: is this needed? */
2890 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2891
2892 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2893 qc->dma_dir = DMA_FROM_DEVICE;
2894
2895 memset(&qc->cdb, 0, ap->cdb_len);
2896 qc->cdb[0] = REQUEST_SENSE;
2897 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2898
2899 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2900 qc->tf.command = ATA_CMD_PACKET;
2901
2902 qc->tf.protocol = ATA_PROT_ATAPI;
2903 qc->tf.lbam = (8 * 1024) & 0xff;
2904 qc->tf.lbah = (8 * 1024) >> 8;
2905 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2906
2907 qc->waiting = &wait;
2908 qc->complete_fn = ata_qc_complete_noop;
2909
2910 spin_lock_irqsave(&ap->host_set->lock, flags);
2911 rc = ata_qc_issue(qc);
2912 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2913
2914 if (rc)
2915 ata_port_disable(ap);
2916 else
2917 wait_for_completion(&wait);
2918
2919 DPRINTK("EXIT\n");
2920 }
2921
2922 /**
2923 * ata_qc_timeout - Handle timeout of queued command
2924 * @qc: Command that timed out
2925 *
2926 * Some part of the kernel (currently, only the SCSI layer)
2927 * has noticed that the active command on port @ap has not
2928 * completed after a specified length of time. Handle this
2929 * condition by disabling DMA (if necessary) and completing
2930 * transactions, with error if necessary.
2931 *
2932 * This also handles the case of the "lost interrupt", where
2933 * for some reason (possibly hardware bug, possibly driver bug)
2934 * an interrupt was not delivered to the driver, even though the
2935 * transaction completed successfully.
2936 *
2937 * LOCKING:
2938 * Inherited from SCSI layer (none, can sleep)
2939 */
2940
2941 static void ata_qc_timeout(struct ata_queued_cmd *qc)
2942 {
2943 struct ata_port *ap = qc->ap;
2944 struct ata_device *dev = qc->dev;
2945 u8 host_stat = 0, drv_stat;
2946
2947 DPRINTK("ENTER\n");
2948
2949 /* FIXME: doesn't this conflict with timeout handling? */
2950 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2951 struct scsi_cmnd *cmd = qc->scsicmd;
2952
2953 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2954
2955 /* finish completing original command */
2956 __ata_qc_complete(qc);
2957
2958 atapi_request_sense(ap, dev, cmd);
2959
2960 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2961 scsi_finish_command(cmd);
2962
2963 goto out;
2964 }
2965 }
2966
2967 /* hack alert! We cannot use the supplied completion
2968 * function from inside the ->eh_strategy_handler() thread.
2969 * libata is the only user of ->eh_strategy_handler() in
2970 * any kernel, so the default scsi_done() assumes it is
2971 * not being called from the SCSI EH.
2972 */
2973 qc->scsidone = scsi_finish_command;
2974
2975 switch (qc->tf.protocol) {
2976
2977 case ATA_PROT_DMA:
2978 case ATA_PROT_ATAPI_DMA:
2979 host_stat = ap->ops->bmdma_status(ap);
2980
2981 /* before we do anything else, clear DMA-Start bit */
2982 ap->ops->bmdma_stop(ap);
2983
2984 /* fall through */
2985
2986 default:
2987 ata_altstatus(ap);
2988 drv_stat = ata_chk_status(ap);
2989
2990 /* ack bmdma irq events */
2991 ap->ops->irq_clear(ap);
2992
2993 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2994 ap->id, qc->tf.command, drv_stat, host_stat);
2995
2996 /* complete taskfile transaction */
2997 ata_qc_complete(qc, drv_stat);
2998 break;
2999 }
3000 out:
3001 DPRINTK("EXIT\n");
3002 }
3003
3004 /**
3005 * ata_eng_timeout - Handle timeout of queued command
3006 * @ap: Port on which timed-out command is active
3007 *
3008 * Some part of the kernel (currently, only the SCSI layer)
3009 * has noticed that the active command on port @ap has not
3010 * completed after a specified length of time. Handle this
3011 * condition by disabling DMA (if necessary) and completing
3012 * transactions, with error if necessary.
3013 *
3014 * This also handles the case of the "lost interrupt", where
3015 * for some reason (possibly hardware bug, possibly driver bug)
3016 * an interrupt was not delivered to the driver, even though the
3017 * transaction completed successfully.
3018 *
3019 * LOCKING:
3020 * Inherited from SCSI layer (none, can sleep)
3021 */
3022
3023 void ata_eng_timeout(struct ata_port *ap)
3024 {
3025 struct ata_queued_cmd *qc;
3026
3027 DPRINTK("ENTER\n");
3028
3029 qc = ata_qc_from_tag(ap, ap->active_tag);
3030 if (!qc) {
3031 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
3032 ap->id);
3033 goto out;
3034 }
3035
3036 ata_qc_timeout(qc);
3037
3038 out:
3039 DPRINTK("EXIT\n");
3040 }
3041
3042 /**
3043 * ata_qc_new - Request an available ATA command, for queueing
3044 * @ap: Port associated with device @dev
3045 * @dev: Device from whom we request an available command structure
3046 *
3047 * LOCKING:
3048 * None.
3049 */
3050
3051 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
3052 {
3053 struct ata_queued_cmd *qc = NULL;
3054 unsigned int i;
3055
3056 for (i = 0; i < ATA_MAX_QUEUE; i++)
3057 if (!test_and_set_bit(i, &ap->qactive)) {
3058 qc = ata_qc_from_tag(ap, i);
3059 break;
3060 }
3061
3062 if (qc)
3063 qc->tag = i;
3064
3065 return qc;
3066 }
3067
3068 /**
3069 * ata_qc_new_init - Request an available ATA command, and initialize it
3070 * @ap: Port associated with device @dev
3071 * @dev: Device from whom we request an available command structure
3072 *
3073 * LOCKING:
3074 * None.
3075 */
3076
3077 struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
3078 struct ata_device *dev)
3079 {
3080 struct ata_queued_cmd *qc;
3081
3082 qc = ata_qc_new(ap);
3083 if (qc) {
3084 qc->sg = NULL;
3085 qc->flags = 0;
3086 qc->scsicmd = NULL;
3087 qc->ap = ap;
3088 qc->dev = dev;
3089 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
3090 qc->nsect = 0;
3091 qc->nbytes = qc->curbytes = 0;
3092
3093 ata_tf_init(ap, &qc->tf, dev->devno);
3094
3095 if (dev->flags & ATA_DFLAG_LBA) {
3096 qc->tf.flags |= ATA_TFLAG_LBA;
3097
3098 if (dev->flags & ATA_DFLAG_LBA48)
3099 qc->tf.flags |= ATA_TFLAG_LBA48;
3100 }
3101 }
3102
3103 return qc;
3104 }
3105
3106 static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
3107 {
3108 return 0;
3109 }
3110
3111 static void __ata_qc_complete(struct ata_queued_cmd *qc)
3112 {
3113 struct ata_port *ap = qc->ap;
3114 unsigned int tag, do_clear = 0;
3115
3116 qc->flags = 0;
3117 tag = qc->tag;
3118 if (likely(ata_tag_valid(tag))) {
3119 if (tag == ap->active_tag)
3120 ap->active_tag = ATA_TAG_POISON;
3121 qc->tag = ATA_TAG_POISON;
3122 do_clear = 1;
3123 }
3124
3125 if (qc->waiting) {
3126 struct completion *waiting = qc->waiting;
3127 qc->waiting = NULL;
3128 complete(waiting);
3129 }
3130
3131 if (likely(do_clear))
3132 clear_bit(tag, &ap->qactive);
3133 }
3134
3135 /**
3136 * ata_qc_free - free unused ata_queued_cmd
3137 * @qc: Command to complete
3138 *
3139 * Designed to free unused ata_queued_cmd object
3140 * in case something prevents using it.
3141 *
3142 * LOCKING:
3143 * spin_lock_irqsave(host_set lock)
3144 *
3145 */
3146 void ata_qc_free(struct ata_queued_cmd *qc)
3147 {
3148 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3149 assert(qc->waiting == NULL); /* nothing should be waiting */
3150
3151 __ata_qc_complete(qc);
3152 }
3153
3154 /**
3155 * ata_qc_complete - Complete an active ATA command
3156 * @qc: Command to complete
3157 * @drv_stat: ATA Status register contents
3158 *
3159 * Indicate to the mid and upper layers that an ATA
3160 * command has completed, with either an ok or not-ok status.
3161 *
3162 * LOCKING:
3163 * spin_lock_irqsave(host_set lock)
3164 *
3165 */
3166
3167 void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
3168 {
3169 int rc;
3170
3171 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
3172 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3173
3174 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
3175 ata_sg_clean(qc);
3176
3177 /* call completion callback */
3178 rc = qc->complete_fn(qc, drv_stat);
3179 qc->flags &= ~ATA_QCFLAG_ACTIVE;
3180
3181 /* if callback indicates not to complete command (non-zero),
3182 * return immediately
3183 */
3184 if (rc != 0)
3185 return;
3186
3187 __ata_qc_complete(qc);
3188
3189 VPRINTK("EXIT\n");
3190 }
3191
3192 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
3193 {
3194 struct ata_port *ap = qc->ap;
3195
3196 switch (qc->tf.protocol) {
3197 case ATA_PROT_DMA:
3198 case ATA_PROT_ATAPI_DMA:
3199 return 1;
3200
3201 case ATA_PROT_ATAPI:
3202 case ATA_PROT_PIO:
3203 case ATA_PROT_PIO_MULT:
3204 if (ap->flags & ATA_FLAG_PIO_DMA)
3205 return 1;
3206
3207 /* fall through */
3208
3209 default:
3210 return 0;
3211 }
3212
3213 /* never reached */
3214 }
3215
3216 /**
3217 * ata_qc_issue - issue taskfile to device
3218 * @qc: command to issue to device
3219 *
3220 * Prepare an ATA command to submission to device.
3221 * This includes mapping the data into a DMA-able
3222 * area, filling in the S/G table, and finally
3223 * writing the taskfile to hardware, starting the command.
3224 *
3225 * LOCKING:
3226 * spin_lock_irqsave(host_set lock)
3227 *
3228 * RETURNS:
3229 * Zero on success, negative on error.
3230 */
3231
3232 int ata_qc_issue(struct ata_queued_cmd *qc)
3233 {
3234 struct ata_port *ap = qc->ap;
3235
3236 if (ata_should_dma_map(qc)) {
3237 if (qc->flags & ATA_QCFLAG_SG) {
3238 if (ata_sg_setup(qc))
3239 goto err_out;
3240 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
3241 if (ata_sg_setup_one(qc))
3242 goto err_out;
3243 }
3244 } else {
3245 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3246 }
3247
3248 ap->ops->qc_prep(qc);
3249
3250 qc->ap->active_tag = qc->tag;
3251 qc->flags |= ATA_QCFLAG_ACTIVE;
3252
3253 return ap->ops->qc_issue(qc);
3254
3255 err_out:
3256 return -1;
3257 }
3258
3259
3260 /**
3261 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
3262 * @qc: command to issue to device
3263 *
3264 * Using various libata functions and hooks, this function
3265 * starts an ATA command. ATA commands are grouped into
3266 * classes called "protocols", and issuing each type of protocol
3267 * is slightly different.
3268 *
3269 * May be used as the qc_issue() entry in ata_port_operations.
3270 *
3271 * LOCKING:
3272 * spin_lock_irqsave(host_set lock)
3273 *
3274 * RETURNS:
3275 * Zero on success, negative on error.
3276 */
3277
3278 int ata_qc_issue_prot(struct ata_queued_cmd *qc)
3279 {
3280 struct ata_port *ap = qc->ap;
3281
3282 ata_dev_select(ap, qc->dev->devno, 1, 0);
3283
3284 switch (qc->tf.protocol) {
3285 case ATA_PROT_NODATA:
3286 ata_tf_to_host_nolock(ap, &qc->tf);
3287 break;
3288
3289 case ATA_PROT_DMA:
3290 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3291 ap->ops->bmdma_setup(qc); /* set up bmdma */
3292 ap->ops->bmdma_start(qc); /* initiate bmdma */
3293 break;
3294
3295 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
3296 ata_qc_set_polling(qc);
3297 ata_tf_to_host_nolock(ap, &qc->tf);
3298 ap->pio_task_state = PIO_ST;
3299 queue_work(ata_wq, &ap->pio_task);
3300 break;
3301
3302 case ATA_PROT_ATAPI:
3303 ata_qc_set_polling(qc);
3304 ata_tf_to_host_nolock(ap, &qc->tf);
3305 queue_work(ata_wq, &ap->packet_task);
3306 break;
3307
3308 case ATA_PROT_ATAPI_NODATA:
3309 ata_tf_to_host_nolock(ap, &qc->tf);
3310 queue_work(ata_wq, &ap->packet_task);
3311 break;
3312
3313 case ATA_PROT_ATAPI_DMA:
3314 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
3315 ap->ops->bmdma_setup(qc); /* set up bmdma */
3316 queue_work(ata_wq, &ap->packet_task);
3317 break;
3318
3319 default:
3320 WARN_ON(1);
3321 return -1;
3322 }
3323
3324 return 0;
3325 }
3326
3327 /**
3328 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
3329 * @qc: Info associated with this ATA transaction.
3330 *
3331 * LOCKING:
3332 * spin_lock_irqsave(host_set lock)
3333 */
3334
3335 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3336 {
3337 struct ata_port *ap = qc->ap;
3338 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3339 u8 dmactl;
3340 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3341
3342 /* load PRD table addr. */
3343 mb(); /* make sure PRD table writes are visible to controller */
3344 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3345
3346 /* specify data direction, triple-check start bit is clear */
3347 dmactl = readb(mmio + ATA_DMA_CMD);
3348 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3349 if (!rw)
3350 dmactl |= ATA_DMA_WR;
3351 writeb(dmactl, mmio + ATA_DMA_CMD);
3352
3353 /* issue r/w command */
3354 ap->ops->exec_command(ap, &qc->tf);
3355 }
3356
3357 /**
3358 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3359 * @qc: Info associated with this ATA transaction.
3360 *
3361 * LOCKING:
3362 * spin_lock_irqsave(host_set lock)
3363 */
3364
3365 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3366 {
3367 struct ata_port *ap = qc->ap;
3368 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3369 u8 dmactl;
3370
3371 /* start host DMA transaction */
3372 dmactl = readb(mmio + ATA_DMA_CMD);
3373 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3374
3375 /* Strictly, one may wish to issue a readb() here, to
3376 * flush the mmio write. However, control also passes
3377 * to the hardware at this point, and it will interrupt
3378 * us when we are to resume control. So, in effect,
3379 * we don't care when the mmio write flushes.
3380 * Further, a read of the DMA status register _immediately_
3381 * following the write may not be what certain flaky hardware
3382 * is expected, so I think it is best to not add a readb()
3383 * without first all the MMIO ATA cards/mobos.
3384 * Or maybe I'm just being paranoid.
3385 */
3386 }
3387
3388 /**
3389 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3390 * @qc: Info associated with this ATA transaction.
3391 *
3392 * LOCKING:
3393 * spin_lock_irqsave(host_set lock)
3394 */
3395
3396 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3397 {
3398 struct ata_port *ap = qc->ap;
3399 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3400 u8 dmactl;
3401
3402 /* load PRD table addr. */
3403 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3404
3405 /* specify data direction, triple-check start bit is clear */
3406 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3407 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3408 if (!rw)
3409 dmactl |= ATA_DMA_WR;
3410 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3411
3412 /* issue r/w command */
3413 ap->ops->exec_command(ap, &qc->tf);
3414 }
3415
3416 /**
3417 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3418 * @qc: Info associated with this ATA transaction.
3419 *
3420 * LOCKING:
3421 * spin_lock_irqsave(host_set lock)
3422 */
3423
3424 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3425 {
3426 struct ata_port *ap = qc->ap;
3427 u8 dmactl;
3428
3429 /* start host DMA transaction */
3430 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3431 outb(dmactl | ATA_DMA_START,
3432 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3433 }
3434
3435
3436 /**
3437 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3438 * @qc: Info associated with this ATA transaction.
3439 *
3440 * Writes the ATA_DMA_START flag to the DMA command register.
3441 *
3442 * May be used as the bmdma_start() entry in ata_port_operations.
3443 *
3444 * LOCKING:
3445 * spin_lock_irqsave(host_set lock)
3446 */
3447 void ata_bmdma_start(struct ata_queued_cmd *qc)
3448 {
3449 if (qc->ap->flags & ATA_FLAG_MMIO)
3450 ata_bmdma_start_mmio(qc);
3451 else
3452 ata_bmdma_start_pio(qc);
3453 }
3454
3455
3456 /**
3457 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
3458 * @qc: Info associated with this ATA transaction.
3459 *
3460 * Writes address of PRD table to device's PRD Table Address
3461 * register, sets the DMA control register, and calls
3462 * ops->exec_command() to start the transfer.
3463 *
3464 * May be used as the bmdma_setup() entry in ata_port_operations.
3465 *
3466 * LOCKING:
3467 * spin_lock_irqsave(host_set lock)
3468 */
3469 void ata_bmdma_setup(struct ata_queued_cmd *qc)
3470 {
3471 if (qc->ap->flags & ATA_FLAG_MMIO)
3472 ata_bmdma_setup_mmio(qc);
3473 else
3474 ata_bmdma_setup_pio(qc);
3475 }
3476
3477
3478 /**
3479 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
3480 * @ap: Port associated with this ATA transaction.
3481 *
3482 * Clear interrupt and error flags in DMA status register.
3483 *
3484 * May be used as the irq_clear() entry in ata_port_operations.
3485 *
3486 * LOCKING:
3487 * spin_lock_irqsave(host_set lock)
3488 */
3489
3490 void ata_bmdma_irq_clear(struct ata_port *ap)
3491 {
3492 if (ap->flags & ATA_FLAG_MMIO) {
3493 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3494 writeb(readb(mmio), mmio);
3495 } else {
3496 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3497 outb(inb(addr), addr);
3498 }
3499
3500 }
3501
3502
3503 /**
3504 * ata_bmdma_status - Read PCI IDE BMDMA status
3505 * @ap: Port associated with this ATA transaction.
3506 *
3507 * Read and return BMDMA status register.
3508 *
3509 * May be used as the bmdma_status() entry in ata_port_operations.
3510 *
3511 * LOCKING:
3512 * spin_lock_irqsave(host_set lock)
3513 */
3514
3515 u8 ata_bmdma_status(struct ata_port *ap)
3516 {
3517 u8 host_stat;
3518 if (ap->flags & ATA_FLAG_MMIO) {
3519 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3520 host_stat = readb(mmio + ATA_DMA_STATUS);
3521 } else
3522 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3523 return host_stat;
3524 }
3525
3526
3527 /**
3528 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
3529 * @ap: Port associated with this ATA transaction.
3530 *
3531 * Clears the ATA_DMA_START flag in the dma control register
3532 *
3533 * May be used as the bmdma_stop() entry in ata_port_operations.
3534 *
3535 * LOCKING:
3536 * spin_lock_irqsave(host_set lock)
3537 */
3538
3539 void ata_bmdma_stop(struct ata_port *ap)
3540 {
3541 if (ap->flags & ATA_FLAG_MMIO) {
3542 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3543
3544 /* clear start/stop bit */
3545 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3546 mmio + ATA_DMA_CMD);
3547 } else {
3548 /* clear start/stop bit */
3549 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3550 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3551 }
3552
3553 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3554 ata_altstatus(ap); /* dummy read */
3555 }
3556
3557 /**
3558 * ata_host_intr - Handle host interrupt for given (port, task)
3559 * @ap: Port on which interrupt arrived (possibly...)
3560 * @qc: Taskfile currently active in engine
3561 *
3562 * Handle host interrupt for given queued command. Currently,
3563 * only DMA interrupts are handled. All other commands are
3564 * handled via polling with interrupts disabled (nIEN bit).
3565 *
3566 * LOCKING:
3567 * spin_lock_irqsave(host_set lock)
3568 *
3569 * RETURNS:
3570 * One if interrupt was handled, zero if not (shared irq).
3571 */
3572
3573 inline unsigned int ata_host_intr (struct ata_port *ap,
3574 struct ata_queued_cmd *qc)
3575 {
3576 u8 status, host_stat;
3577
3578 switch (qc->tf.protocol) {
3579
3580 case ATA_PROT_DMA:
3581 case ATA_PROT_ATAPI_DMA:
3582 case ATA_PROT_ATAPI:
3583 /* check status of DMA engine */
3584 host_stat = ap->ops->bmdma_status(ap);
3585 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3586
3587 /* if it's not our irq... */
3588 if (!(host_stat & ATA_DMA_INTR))
3589 goto idle_irq;
3590
3591 /* before we do anything else, clear DMA-Start bit */
3592 ap->ops->bmdma_stop(ap);
3593
3594 /* fall through */
3595
3596 case ATA_PROT_ATAPI_NODATA:
3597 case ATA_PROT_NODATA:
3598 /* check altstatus */
3599 status = ata_altstatus(ap);
3600 if (status & ATA_BUSY)
3601 goto idle_irq;
3602
3603 /* check main status, clearing INTRQ */
3604 status = ata_chk_status(ap);
3605 if (unlikely(status & ATA_BUSY))
3606 goto idle_irq;
3607 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3608 ap->id, qc->tf.protocol, status);
3609
3610 /* ack bmdma irq events */
3611 ap->ops->irq_clear(ap);
3612
3613 /* complete taskfile transaction */
3614 ata_qc_complete(qc, status);
3615 break;
3616
3617 default:
3618 goto idle_irq;
3619 }
3620
3621 return 1; /* irq handled */
3622
3623 idle_irq:
3624 ap->stats.idle_irq++;
3625
3626 #ifdef ATA_IRQ_TRAP
3627 if ((ap->stats.idle_irq % 1000) == 0) {
3628 handled = 1;
3629 ata_irq_ack(ap, 0); /* debug trap */
3630 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3631 }
3632 #endif
3633 return 0; /* irq not handled */
3634 }
3635
3636 /**
3637 * ata_interrupt - Default ATA host interrupt handler
3638 * @irq: irq line (unused)
3639 * @dev_instance: pointer to our ata_host_set information structure
3640 * @regs: unused
3641 *
3642 * Default interrupt handler for PCI IDE devices. Calls
3643 * ata_host_intr() for each port that is not disabled.
3644 *
3645 * LOCKING:
3646 * Obtains host_set lock during operation.
3647 *
3648 * RETURNS:
3649 * IRQ_NONE or IRQ_HANDLED.
3650 *
3651 */
3652
3653 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3654 {
3655 struct ata_host_set *host_set = dev_instance;
3656 unsigned int i;
3657 unsigned int handled = 0;
3658 unsigned long flags;
3659
3660 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3661 spin_lock_irqsave(&host_set->lock, flags);
3662
3663 for (i = 0; i < host_set->n_ports; i++) {
3664 struct ata_port *ap;
3665
3666 ap = host_set->ports[i];
3667 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3668 struct ata_queued_cmd *qc;
3669
3670 qc = ata_qc_from_tag(ap, ap->active_tag);
3671 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3672 (qc->flags & ATA_QCFLAG_ACTIVE))
3673 handled |= ata_host_intr(ap, qc);
3674 }
3675 }
3676
3677 spin_unlock_irqrestore(&host_set->lock, flags);
3678
3679 return IRQ_RETVAL(handled);
3680 }
3681
3682 /**
3683 * atapi_packet_task - Write CDB bytes to hardware
3684 * @_data: Port to which ATAPI device is attached.
3685 *
3686 * When device has indicated its readiness to accept
3687 * a CDB, this function is called. Send the CDB.
3688 * If DMA is to be performed, exit immediately.
3689 * Otherwise, we are in polling mode, so poll
3690 * status under operation succeeds or fails.
3691 *
3692 * LOCKING:
3693 * Kernel thread context (may sleep)
3694 */
3695
3696 static void atapi_packet_task(void *_data)
3697 {
3698 struct ata_port *ap = _data;
3699 struct ata_queued_cmd *qc;
3700 u8 status;
3701
3702 qc = ata_qc_from_tag(ap, ap->active_tag);
3703 assert(qc != NULL);
3704 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3705
3706 /* sleep-wait for BSY to clear */
3707 DPRINTK("busy wait\n");
3708 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3709 goto err_out;
3710
3711 /* make sure DRQ is set */
3712 status = ata_chk_status(ap);
3713 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3714 goto err_out;
3715
3716 /* send SCSI cdb */
3717 DPRINTK("send cdb\n");
3718 assert(ap->cdb_len >= 12);
3719 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3720
3721 /* if we are DMA'ing, irq handler takes over from here */
3722 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3723 ap->ops->bmdma_start(qc); /* initiate bmdma */
3724
3725 /* non-data commands are also handled via irq */
3726 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3727 /* do nothing */
3728 }
3729
3730 /* PIO commands are handled by polling */
3731 else {
3732 ap->pio_task_state = PIO_ST;
3733 queue_work(ata_wq, &ap->pio_task);
3734 }
3735
3736 return;
3737
3738 err_out:
3739 ata_qc_complete(qc, ATA_ERR);
3740 }
3741
3742
3743 /**
3744 * ata_port_start - Set port up for dma.
3745 * @ap: Port to initialize
3746 *
3747 * Called just after data structures for each port are
3748 * initialized. Allocates space for PRD table.
3749 *
3750 * May be used as the port_start() entry in ata_port_operations.
3751 *
3752 * LOCKING:
3753 */
3754
3755 int ata_port_start (struct ata_port *ap)
3756 {
3757 struct device *dev = ap->host_set->dev;
3758
3759 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3760 if (!ap->prd)
3761 return -ENOMEM;
3762
3763 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3764
3765 return 0;
3766 }
3767
3768
3769 /**
3770 * ata_port_stop - Undo ata_port_start()
3771 * @ap: Port to shut down
3772 *
3773 * Frees the PRD table.
3774 *
3775 * May be used as the port_stop() entry in ata_port_operations.
3776 *
3777 * LOCKING:
3778 */
3779
3780 void ata_port_stop (struct ata_port *ap)
3781 {
3782 struct device *dev = ap->host_set->dev;
3783
3784 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3785 }
3786
3787 void ata_host_stop (struct ata_host_set *host_set)
3788 {
3789 if (host_set->mmio_base)
3790 iounmap(host_set->mmio_base);
3791 }
3792
3793
3794 /**
3795 * ata_host_remove - Unregister SCSI host structure with upper layers
3796 * @ap: Port to unregister
3797 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3798 *
3799 * LOCKING:
3800 */
3801
3802 static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3803 {
3804 struct Scsi_Host *sh = ap->host;
3805
3806 DPRINTK("ENTER\n");
3807
3808 if (do_unregister)
3809 scsi_remove_host(sh);
3810
3811 ap->ops->port_stop(ap);
3812 }
3813
3814 /**
3815 * ata_host_init - Initialize an ata_port structure
3816 * @ap: Structure to initialize
3817 * @host: associated SCSI mid-layer structure
3818 * @host_set: Collection of hosts to which @ap belongs
3819 * @ent: Probe information provided by low-level driver
3820 * @port_no: Port number associated with this ata_port
3821 *
3822 * Initialize a new ata_port structure, and its associated
3823 * scsi_host.
3824 *
3825 * LOCKING:
3826 * Inherited from caller.
3827 *
3828 */
3829
3830 static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3831 struct ata_host_set *host_set,
3832 struct ata_probe_ent *ent, unsigned int port_no)
3833 {
3834 unsigned int i;
3835
3836 host->max_id = 16;
3837 host->max_lun = 1;
3838 host->max_channel = 1;
3839 host->unique_id = ata_unique_id++;
3840 host->max_cmd_len = 12;
3841 scsi_set_device(host, ent->dev);
3842 scsi_assign_lock(host, &host_set->lock);
3843
3844 ap->flags = ATA_FLAG_PORT_DISABLED;
3845 ap->id = host->unique_id;
3846 ap->host = host;
3847 ap->ctl = ATA_DEVCTL_OBS;
3848 ap->host_set = host_set;
3849 ap->port_no = port_no;
3850 ap->hard_port_no =
3851 ent->legacy_mode ? ent->hard_port_no : port_no;
3852 ap->pio_mask = ent->pio_mask;
3853 ap->mwdma_mask = ent->mwdma_mask;
3854 ap->udma_mask = ent->udma_mask;
3855 ap->flags |= ent->host_flags;
3856 ap->ops = ent->port_ops;
3857 ap->cbl = ATA_CBL_NONE;
3858 ap->active_tag = ATA_TAG_POISON;
3859 ap->last_ctl = 0xFF;
3860
3861 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3862 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3863
3864 for (i = 0; i < ATA_MAX_DEVICES; i++)
3865 ap->device[i].devno = i;
3866
3867 #ifdef ATA_IRQ_TRAP
3868 ap->stats.unhandled_irq = 1;
3869 ap->stats.idle_irq = 1;
3870 #endif
3871
3872 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3873 }
3874
3875 /**
3876 * ata_host_add - Attach low-level ATA driver to system
3877 * @ent: Information provided by low-level driver
3878 * @host_set: Collections of ports to which we add
3879 * @port_no: Port number associated with this host
3880 *
3881 * Attach low-level ATA driver to system.
3882 *
3883 * LOCKING:
3884 * PCI/etc. bus probe sem.
3885 *
3886 * RETURNS:
3887 * New ata_port on success, for NULL on error.
3888 *
3889 */
3890
3891 static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3892 struct ata_host_set *host_set,
3893 unsigned int port_no)
3894 {
3895 struct Scsi_Host *host;
3896 struct ata_port *ap;
3897 int rc;
3898
3899 DPRINTK("ENTER\n");
3900 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3901 if (!host)
3902 return NULL;
3903
3904 ap = (struct ata_port *) &host->hostdata[0];
3905
3906 ata_host_init(ap, host, host_set, ent, port_no);
3907
3908 rc = ap->ops->port_start(ap);
3909 if (rc)
3910 goto err_out;
3911
3912 return ap;
3913
3914 err_out:
3915 scsi_host_put(host);
3916 return NULL;
3917 }
3918
3919 /**
3920 * ata_device_add - Register hardware device with ATA and SCSI layers
3921 * @ent: Probe information describing hardware device to be registered
3922 *
3923 * This function processes the information provided in the probe
3924 * information struct @ent, allocates the necessary ATA and SCSI
3925 * host information structures, initializes them, and registers
3926 * everything with requisite kernel subsystems.
3927 *
3928 * This function requests irqs, probes the ATA bus, and probes
3929 * the SCSI bus.
3930 *
3931 * LOCKING:
3932 * PCI/etc. bus probe sem.
3933 *
3934 * RETURNS:
3935 * Number of ports registered. Zero on error (no ports registered).
3936 *
3937 */
3938
3939 int ata_device_add(struct ata_probe_ent *ent)
3940 {
3941 unsigned int count = 0, i;
3942 struct device *dev = ent->dev;
3943 struct ata_host_set *host_set;
3944
3945 DPRINTK("ENTER\n");
3946 /* alloc a container for our list of ATA ports (buses) */
3947 host_set = kmalloc(sizeof(struct ata_host_set) +
3948 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3949 if (!host_set)
3950 return 0;
3951 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3952 spin_lock_init(&host_set->lock);
3953
3954 host_set->dev = dev;
3955 host_set->n_ports = ent->n_ports;
3956 host_set->irq = ent->irq;
3957 host_set->mmio_base = ent->mmio_base;
3958 host_set->private_data = ent->private_data;
3959 host_set->ops = ent->port_ops;
3960
3961 /* register each port bound to this device */
3962 for (i = 0; i < ent->n_ports; i++) {
3963 struct ata_port *ap;
3964 unsigned long xfer_mode_mask;
3965
3966 ap = ata_host_add(ent, host_set, i);
3967 if (!ap)
3968 goto err_out;
3969
3970 host_set->ports[i] = ap;
3971 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3972 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3973 (ap->pio_mask << ATA_SHIFT_PIO);
3974
3975 /* print per-port info to dmesg */
3976 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3977 "bmdma 0x%lX irq %lu\n",
3978 ap->id,
3979 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3980 ata_mode_string(xfer_mode_mask),
3981 ap->ioaddr.cmd_addr,
3982 ap->ioaddr.ctl_addr,
3983 ap->ioaddr.bmdma_addr,
3984 ent->irq);
3985
3986 ata_chk_status(ap);
3987 host_set->ops->irq_clear(ap);
3988 count++;
3989 }
3990
3991 if (!count) {
3992 kfree(host_set);
3993 return 0;
3994 }
3995
3996 /* obtain irq, that is shared between channels */
3997 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3998 DRV_NAME, host_set))
3999 goto err_out;
4000
4001 /* perform each probe synchronously */
4002 DPRINTK("probe begin\n");
4003 for (i = 0; i < count; i++) {
4004 struct ata_port *ap;
4005 int rc;
4006
4007 ap = host_set->ports[i];
4008
4009 DPRINTK("ata%u: probe begin\n", ap->id);
4010 rc = ata_bus_probe(ap);
4011 DPRINTK("ata%u: probe end\n", ap->id);
4012
4013 if (rc) {
4014 /* FIXME: do something useful here?
4015 * Current libata behavior will
4016 * tear down everything when
4017 * the module is removed
4018 * or the h/w is unplugged.
4019 */
4020 }
4021
4022 rc = scsi_add_host(ap->host, dev);
4023 if (rc) {
4024 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
4025 ap->id);
4026 /* FIXME: do something useful here */
4027 /* FIXME: handle unconditional calls to
4028 * scsi_scan_host and ata_host_remove, below,
4029 * at the very least
4030 */
4031 }
4032 }
4033
4034 /* probes are done, now scan each port's disk(s) */
4035 DPRINTK("probe begin\n");
4036 for (i = 0; i < count; i++) {
4037 struct ata_port *ap = host_set->ports[i];
4038
4039 scsi_scan_host(ap->host);
4040 }
4041
4042 dev_set_drvdata(dev, host_set);
4043
4044 VPRINTK("EXIT, returning %u\n", ent->n_ports);
4045 return ent->n_ports; /* success */
4046
4047 err_out:
4048 for (i = 0; i < count; i++) {
4049 ata_host_remove(host_set->ports[i], 1);
4050 scsi_host_put(host_set->ports[i]->host);
4051 }
4052 kfree(host_set);
4053 VPRINTK("EXIT, returning 0\n");
4054 return 0;
4055 }
4056
4057 /**
4058 * ata_scsi_release - SCSI layer callback hook for host unload
4059 * @host: libata host to be unloaded
4060 *
4061 * Performs all duties necessary to shut down a libata port...
4062 * Kill port kthread, disable port, and release resources.
4063 *
4064 * LOCKING:
4065 * Inherited from SCSI layer.
4066 *
4067 * RETURNS:
4068 * One.
4069 */
4070
4071 int ata_scsi_release(struct Scsi_Host *host)
4072 {
4073 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
4074
4075 DPRINTK("ENTER\n");
4076
4077 ap->ops->port_disable(ap);
4078 ata_host_remove(ap, 0);
4079
4080 DPRINTK("EXIT\n");
4081 return 1;
4082 }
4083
4084 /**
4085 * ata_std_ports - initialize ioaddr with standard port offsets.
4086 * @ioaddr: IO address structure to be initialized
4087 *
4088 * Utility function which initializes data_addr, error_addr,
4089 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
4090 * device_addr, status_addr, and command_addr to standard offsets
4091 * relative to cmd_addr.
4092 *
4093 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
4094 */
4095
4096 void ata_std_ports(struct ata_ioports *ioaddr)
4097 {
4098 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
4099 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
4100 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
4101 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
4102 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
4103 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
4104 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
4105 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
4106 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
4107 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
4108 }
4109
4110 static struct ata_probe_ent *
4111 ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
4112 {
4113 struct ata_probe_ent *probe_ent;
4114
4115 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
4116 if (!probe_ent) {
4117 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
4118 kobject_name(&(dev->kobj)));
4119 return NULL;
4120 }
4121
4122 memset(probe_ent, 0, sizeof(*probe_ent));
4123
4124 INIT_LIST_HEAD(&probe_ent->node);
4125 probe_ent->dev = dev;
4126
4127 probe_ent->sht = port->sht;
4128 probe_ent->host_flags = port->host_flags;
4129 probe_ent->pio_mask = port->pio_mask;
4130 probe_ent->mwdma_mask = port->mwdma_mask;
4131 probe_ent->udma_mask = port->udma_mask;
4132 probe_ent->port_ops = port->port_ops;
4133
4134 return probe_ent;
4135 }
4136
4137
4138
4139 /**
4140 * ata_pci_init_native_mode - Initialize native-mode driver
4141 * @pdev: pci device to be initialized
4142 * @port: array[2] of pointers to port info structures.
4143 *
4144 * Utility function which allocates and initializes an
4145 * ata_probe_ent structure for a standard dual-port
4146 * PIO-based IDE controller. The returned ata_probe_ent
4147 * structure can be passed to ata_device_add(). The returned
4148 * ata_probe_ent structure should then be freed with kfree().
4149 */
4150
4151 #ifdef CONFIG_PCI
4152 struct ata_probe_ent *
4153 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
4154 {
4155 struct ata_probe_ent *probe_ent =
4156 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4157 if (!probe_ent)
4158 return NULL;
4159
4160 probe_ent->n_ports = 2;
4161 probe_ent->irq = pdev->irq;
4162 probe_ent->irq_flags = SA_SHIRQ;
4163
4164 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
4165 probe_ent->port[0].altstatus_addr =
4166 probe_ent->port[0].ctl_addr =
4167 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4168 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4169
4170 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
4171 probe_ent->port[1].altstatus_addr =
4172 probe_ent->port[1].ctl_addr =
4173 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4174 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
4175
4176 ata_std_ports(&probe_ent->port[0]);
4177 ata_std_ports(&probe_ent->port[1]);
4178
4179 return probe_ent;
4180 }
4181
4182 static struct ata_probe_ent *
4183 ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
4184 struct ata_probe_ent **ppe2)
4185 {
4186 struct ata_probe_ent *probe_ent, *probe_ent2;
4187
4188 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
4189 if (!probe_ent)
4190 return NULL;
4191 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
4192 if (!probe_ent2) {
4193 kfree(probe_ent);
4194 return NULL;
4195 }
4196
4197 probe_ent->n_ports = 1;
4198 probe_ent->irq = 14;
4199
4200 probe_ent->hard_port_no = 0;
4201 probe_ent->legacy_mode = 1;
4202
4203 probe_ent2->n_ports = 1;
4204 probe_ent2->irq = 15;
4205
4206 probe_ent2->hard_port_no = 1;
4207 probe_ent2->legacy_mode = 1;
4208
4209 probe_ent->port[0].cmd_addr = 0x1f0;
4210 probe_ent->port[0].altstatus_addr =
4211 probe_ent->port[0].ctl_addr = 0x3f6;
4212 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
4213
4214 probe_ent2->port[0].cmd_addr = 0x170;
4215 probe_ent2->port[0].altstatus_addr =
4216 probe_ent2->port[0].ctl_addr = 0x376;
4217 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
4218
4219 ata_std_ports(&probe_ent->port[0]);
4220 ata_std_ports(&probe_ent2->port[0]);
4221
4222 *ppe2 = probe_ent2;
4223 return probe_ent;
4224 }
4225
4226 /**
4227 * ata_pci_init_one - Initialize/register PCI IDE host controller
4228 * @pdev: Controller to be initialized
4229 * @port_info: Information from low-level host driver
4230 * @n_ports: Number of ports attached to host controller
4231 *
4232 * This is a helper function which can be called from a driver's
4233 * xxx_init_one() probe function if the hardware uses traditional
4234 * IDE taskfile registers.
4235 *
4236 * This function calls pci_enable_device(), reserves its register
4237 * regions, sets the dma mask, enables bus master mode, and calls
4238 * ata_device_add()
4239 *
4240 * LOCKING:
4241 * Inherited from PCI layer (may sleep).
4242 *
4243 * RETURNS:
4244 * Zero on success, negative on errno-based value on error.
4245 *
4246 */
4247
4248 int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
4249 unsigned int n_ports)
4250 {
4251 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
4252 struct ata_port_info *port[2];
4253 u8 tmp8, mask;
4254 unsigned int legacy_mode = 0;
4255 int disable_dev_on_err = 1;
4256 int rc;
4257
4258 DPRINTK("ENTER\n");
4259
4260 port[0] = port_info[0];
4261 if (n_ports > 1)
4262 port[1] = port_info[1];
4263 else
4264 port[1] = port[0];
4265
4266 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
4267 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
4268 /* TODO: support transitioning to native mode? */
4269 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
4270 mask = (1 << 2) | (1 << 0);
4271 if ((tmp8 & mask) != mask)
4272 legacy_mode = (1 << 3);
4273 }
4274
4275 /* FIXME... */
4276 if ((!legacy_mode) && (n_ports > 1)) {
4277 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
4278 return -EINVAL;
4279 }
4280
4281 rc = pci_enable_device(pdev);
4282 if (rc)
4283 return rc;
4284
4285 rc = pci_request_regions(pdev, DRV_NAME);
4286 if (rc) {
4287 disable_dev_on_err = 0;
4288 goto err_out;
4289 }
4290
4291 if (legacy_mode) {
4292 if (!request_region(0x1f0, 8, "libata")) {
4293 struct resource *conflict, res;
4294 res.start = 0x1f0;
4295 res.end = 0x1f0 + 8 - 1;
4296 conflict = ____request_resource(&ioport_resource, &res);
4297 if (!strcmp(conflict->name, "libata"))
4298 legacy_mode |= (1 << 0);
4299 else {
4300 disable_dev_on_err = 0;
4301 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
4302 }
4303 } else
4304 legacy_mode |= (1 << 0);
4305
4306 if (!request_region(0x170, 8, "libata")) {
4307 struct resource *conflict, res;
4308 res.start = 0x170;
4309 res.end = 0x170 + 8 - 1;
4310 conflict = ____request_resource(&ioport_resource, &res);
4311 if (!strcmp(conflict->name, "libata"))
4312 legacy_mode |= (1 << 1);
4313 else {
4314 disable_dev_on_err = 0;
4315 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
4316 }
4317 } else
4318 legacy_mode |= (1 << 1);
4319 }
4320
4321 /* we have legacy mode, but all ports are unavailable */
4322 if (legacy_mode == (1 << 3)) {
4323 rc = -EBUSY;
4324 goto err_out_regions;
4325 }
4326
4327 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
4328 if (rc)
4329 goto err_out_regions;
4330 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
4331 if (rc)
4332 goto err_out_regions;
4333
4334 if (legacy_mode) {
4335 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
4336 } else
4337 probe_ent = ata_pci_init_native_mode(pdev, port);
4338 if (!probe_ent) {
4339 rc = -ENOMEM;
4340 goto err_out_regions;
4341 }
4342
4343 pci_set_master(pdev);
4344
4345 /* FIXME: check ata_device_add return */
4346 if (legacy_mode) {
4347 if (legacy_mode & (1 << 0))
4348 ata_device_add(probe_ent);
4349 if (legacy_mode & (1 << 1))
4350 ata_device_add(probe_ent2);
4351 } else
4352 ata_device_add(probe_ent);
4353
4354 kfree(probe_ent);
4355 kfree(probe_ent2);
4356
4357 return 0;
4358
4359 err_out_regions:
4360 if (legacy_mode & (1 << 0))
4361 release_region(0x1f0, 8);
4362 if (legacy_mode & (1 << 1))
4363 release_region(0x170, 8);
4364 pci_release_regions(pdev);
4365 err_out:
4366 if (disable_dev_on_err)
4367 pci_disable_device(pdev);
4368 return rc;
4369 }
4370
4371 /**
4372 * ata_pci_remove_one - PCI layer callback for device removal
4373 * @pdev: PCI device that was removed
4374 *
4375 * PCI layer indicates to libata via this hook that
4376 * hot-unplug or module unload event has occured.
4377 * Handle this by unregistering all objects associated
4378 * with this PCI device. Free those objects. Then finally
4379 * release PCI resources and disable device.
4380 *
4381 * LOCKING:
4382 * Inherited from PCI layer (may sleep).
4383 */
4384
4385 void ata_pci_remove_one (struct pci_dev *pdev)
4386 {
4387 struct device *dev = pci_dev_to_dev(pdev);
4388 struct ata_host_set *host_set = dev_get_drvdata(dev);
4389 struct ata_port *ap;
4390 unsigned int i;
4391
4392 for (i = 0; i < host_set->n_ports; i++) {
4393 ap = host_set->ports[i];
4394
4395 scsi_remove_host(ap->host);
4396 }
4397
4398 free_irq(host_set->irq, host_set);
4399
4400 for (i = 0; i < host_set->n_ports; i++) {
4401 ap = host_set->ports[i];
4402
4403 ata_scsi_release(ap->host);
4404
4405 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
4406 struct ata_ioports *ioaddr = &ap->ioaddr;
4407
4408 if (ioaddr->cmd_addr == 0x1f0)
4409 release_region(0x1f0, 8);
4410 else if (ioaddr->cmd_addr == 0x170)
4411 release_region(0x170, 8);
4412 }
4413
4414 scsi_host_put(ap->host);
4415 }
4416
4417 if (host_set->ops->host_stop)
4418 host_set->ops->host_stop(host_set);
4419
4420 kfree(host_set);
4421
4422 pci_release_regions(pdev);
4423 pci_disable_device(pdev);
4424 dev_set_drvdata(dev, NULL);
4425 }
4426
4427 /* move to PCI subsystem */
4428 int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
4429 {
4430 unsigned long tmp = 0;
4431
4432 switch (bits->width) {
4433 case 1: {
4434 u8 tmp8 = 0;
4435 pci_read_config_byte(pdev, bits->reg, &tmp8);
4436 tmp = tmp8;
4437 break;
4438 }
4439 case 2: {
4440 u16 tmp16 = 0;
4441 pci_read_config_word(pdev, bits->reg, &tmp16);
4442 tmp = tmp16;
4443 break;
4444 }
4445 case 4: {
4446 u32 tmp32 = 0;
4447 pci_read_config_dword(pdev, bits->reg, &tmp32);
4448 tmp = tmp32;
4449 break;
4450 }
4451
4452 default:
4453 return -EINVAL;
4454 }
4455
4456 tmp &= bits->mask;
4457
4458 return (tmp == bits->val) ? 1 : 0;
4459 }
4460 #endif /* CONFIG_PCI */
4461
4462
4463 static int __init ata_init(void)
4464 {
4465 ata_wq = create_workqueue("ata");
4466 if (!ata_wq)
4467 return -ENOMEM;
4468
4469 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
4470 return 0;
4471 }
4472
4473 static void __exit ata_exit(void)
4474 {
4475 destroy_workqueue(ata_wq);
4476 }
4477
4478 module_init(ata_init);
4479 module_exit(ata_exit);
4480
4481 /*
4482 * libata is essentially a library of internal helper functions for
4483 * low-level ATA host controller drivers. As such, the API/ABI is
4484 * likely to change as new drivers are added and updated.
4485 * Do not depend on ABI/API stability.
4486 */
4487
4488 EXPORT_SYMBOL_GPL(ata_std_bios_param);
4489 EXPORT_SYMBOL_GPL(ata_std_ports);
4490 EXPORT_SYMBOL_GPL(ata_device_add);
4491 EXPORT_SYMBOL_GPL(ata_sg_init);
4492 EXPORT_SYMBOL_GPL(ata_sg_init_one);
4493 EXPORT_SYMBOL_GPL(ata_qc_complete);
4494 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4495 EXPORT_SYMBOL_GPL(ata_eng_timeout);
4496 EXPORT_SYMBOL_GPL(ata_tf_load);
4497 EXPORT_SYMBOL_GPL(ata_tf_read);
4498 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4499 EXPORT_SYMBOL_GPL(ata_std_dev_select);
4500 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4501 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4502 EXPORT_SYMBOL_GPL(ata_check_status);
4503 EXPORT_SYMBOL_GPL(ata_altstatus);
4504 EXPORT_SYMBOL_GPL(ata_chk_err);
4505 EXPORT_SYMBOL_GPL(ata_exec_command);
4506 EXPORT_SYMBOL_GPL(ata_port_start);
4507 EXPORT_SYMBOL_GPL(ata_port_stop);
4508 EXPORT_SYMBOL_GPL(ata_host_stop);
4509 EXPORT_SYMBOL_GPL(ata_interrupt);
4510 EXPORT_SYMBOL_GPL(ata_qc_prep);
4511 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4512 EXPORT_SYMBOL_GPL(ata_bmdma_start);
4513 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4514 EXPORT_SYMBOL_GPL(ata_bmdma_status);
4515 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4516 EXPORT_SYMBOL_GPL(ata_port_probe);
4517 EXPORT_SYMBOL_GPL(sata_phy_reset);
4518 EXPORT_SYMBOL_GPL(__sata_phy_reset);
4519 EXPORT_SYMBOL_GPL(ata_bus_reset);
4520 EXPORT_SYMBOL_GPL(ata_port_disable);
4521 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4522 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4523 EXPORT_SYMBOL_GPL(ata_scsi_error);
4524 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4525 EXPORT_SYMBOL_GPL(ata_scsi_release);
4526 EXPORT_SYMBOL_GPL(ata_host_intr);
4527 EXPORT_SYMBOL_GPL(ata_dev_classify);
4528 EXPORT_SYMBOL_GPL(ata_dev_id_string);
4529 EXPORT_SYMBOL_GPL(ata_dev_config);
4530 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4531
4532 #ifdef CONFIG_PCI
4533 EXPORT_SYMBOL_GPL(pci_test_config_bits);
4534 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4535 EXPORT_SYMBOL_GPL(ata_pci_init_one);
4536 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4537 #endif /* CONFIG_PCI */