ide: add ide_queue_pc_tail() helper
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / ide.h
1 #ifndef _IDE_H
2 #define _IDE_H
3 /*
4 * linux/include/linux/ide.h
5 *
6 * Copyright (C) 1994-2002 Linus Torvalds & authors
7 */
8
9 #include <linux/init.h>
10 #include <linux/ioport.h>
11 #include <linux/ata.h>
12 #include <linux/blkdev.h>
13 #include <linux/proc_fs.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/bio.h>
17 #include <linux/device.h>
18 #include <linux/pci.h>
19 #include <linux/completion.h>
20 #include <linux/pm.h>
21 #ifdef CONFIG_BLK_DEV_IDEACPI
22 #include <acpi/acpi.h>
23 #endif
24 #include <asm/byteorder.h>
25 #include <asm/system.h>
26 #include <asm/io.h>
27 #include <asm/mutex.h>
28
29 #if defined(CONFIG_CRIS) || defined(CONFIG_FRV)
30 # define SUPPORT_VLB_SYNC 0
31 #else
32 # define SUPPORT_VLB_SYNC 1
33 #endif
34
35 /*
36 * Used to indicate "no IRQ", should be a value that cannot be an IRQ
37 * number.
38 */
39
40 #define IDE_NO_IRQ (-1)
41
42 typedef unsigned char byte; /* used everywhere */
43
44 /*
45 * Probably not wise to fiddle with these
46 */
47 #define ERROR_MAX 8 /* Max read/write errors per sector */
48 #define ERROR_RESET 3 /* Reset controller every 4th retry */
49 #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
50
51 /*
52 * state flags
53 */
54
55 #define DMA_PIO_RETRY 1 /* retrying in PIO */
56
57 #define HWIF(drive) ((ide_hwif_t *)((drive)->hwif))
58 #define HWGROUP(drive) ((ide_hwgroup_t *)(HWIF(drive)->hwgroup))
59
60 /*
61 * Definitions for accessing IDE controller registers
62 */
63 #define IDE_NR_PORTS (10)
64
65 struct ide_io_ports {
66 unsigned long data_addr;
67
68 union {
69 unsigned long error_addr; /* read: error */
70 unsigned long feature_addr; /* write: feature */
71 };
72
73 unsigned long nsect_addr;
74 unsigned long lbal_addr;
75 unsigned long lbam_addr;
76 unsigned long lbah_addr;
77
78 unsigned long device_addr;
79
80 union {
81 unsigned long status_addr; /*  read: status  */
82 unsigned long command_addr; /* write: command */
83 };
84
85 unsigned long ctl_addr;
86
87 unsigned long irq_addr;
88 };
89
90 #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
91
92 #define BAD_R_STAT (ATA_BUSY | ATA_ERR)
93 #define BAD_W_STAT (BAD_R_STAT | ATA_DF)
94 #define BAD_STAT (BAD_R_STAT | ATA_DRQ)
95 #define DRIVE_READY (ATA_DRDY | ATA_DSC)
96
97 #define BAD_CRC (ATA_ABORTED | ATA_ICRC)
98
99 #define SATA_NR_PORTS (3) /* 16 possible ?? */
100
101 #define SATA_STATUS_OFFSET (0)
102 #define SATA_ERROR_OFFSET (1)
103 #define SATA_CONTROL_OFFSET (2)
104
105 /*
106 * Our Physical Region Descriptor (PRD) table should be large enough
107 * to handle the biggest I/O request we are likely to see. Since requests
108 * can have no more than 256 sectors, and since the typical blocksize is
109 * two or more sectors, we could get by with a limit of 128 entries here for
110 * the usual worst case. Most requests seem to include some contiguous blocks,
111 * further reducing the number of table entries required.
112 *
113 * The driver reverts to PIO mode for individual requests that exceed
114 * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
115 * 100% of all crazy scenarios here is not necessary.
116 *
117 * As it turns out though, we must allocate a full 4KB page for this,
118 * so the two PRD tables (ide0 & ide1) will each get half of that,
119 * allowing each to have about 256 entries (8 bytes each) from this.
120 */
121 #define PRD_BYTES 8
122 #define PRD_ENTRIES 256
123
124 /*
125 * Some more useful definitions
126 */
127 #define PARTN_BITS 6 /* number of minor dev bits for partitions */
128 #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
129 #define SECTOR_SIZE 512
130
131 #define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
132
133 /*
134 * Timeouts for various operations:
135 */
136 #define WAIT_DRQ (HZ/10) /* 100msec - spec allows up to 20ms */
137 #define WAIT_READY (5*HZ) /* 5sec - some laptops are very slow */
138 #define WAIT_PIDENTIFY (10*HZ) /* 10sec - should be less than 3ms (?), if all ATAPI CD is closed at boot */
139 #define WAIT_WORSTCASE (30*HZ) /* 30sec - worst case when spinning up */
140 #define WAIT_CMD (10*HZ) /* 10sec - maximum wait for an IRQ to happen */
141 #define WAIT_MIN_SLEEP (2*HZ/100) /* 20msec - minimum sleep time */
142
143 /*
144 * Op codes for special requests to be handled by ide_special_rq().
145 * Values should be in the range of 0x20 to 0x3f.
146 */
147 #define REQ_DRIVE_RESET 0x20
148
149 /*
150 * Check for an interrupt and acknowledge the interrupt status
151 */
152 struct hwif_s;
153 typedef int (ide_ack_intr_t)(struct hwif_s *);
154
155 /*
156 * hwif_chipset_t is used to keep track of the specific hardware
157 * chipset used by each IDE interface, if known.
158 */
159 enum { ide_unknown, ide_generic, ide_pci,
160 ide_cmd640, ide_dtc2278, ide_ali14xx,
161 ide_qd65xx, ide_umc8672, ide_ht6560b,
162 ide_rz1000, ide_trm290,
163 ide_cmd646, ide_cy82c693, ide_4drives,
164 ide_pmac, ide_acorn,
165 ide_au1xxx, ide_palm3710
166 };
167
168 typedef u8 hwif_chipset_t;
169
170 /*
171 * Structure to hold all information about the location of this port
172 */
173 typedef struct hw_regs_s {
174 union {
175 struct ide_io_ports io_ports;
176 unsigned long io_ports_array[IDE_NR_PORTS];
177 };
178
179 int irq; /* our irq number */
180 ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
181 hwif_chipset_t chipset;
182 struct device *dev, *parent;
183 unsigned long config;
184 } hw_regs_t;
185
186 void ide_init_port_data(struct hwif_s *, unsigned int);
187 void ide_init_port_hw(struct hwif_s *, hw_regs_t *);
188
189 static inline void ide_std_init_ports(hw_regs_t *hw,
190 unsigned long io_addr,
191 unsigned long ctl_addr)
192 {
193 unsigned int i;
194
195 for (i = 0; i <= 7; i++)
196 hw->io_ports_array[i] = io_addr++;
197
198 hw->io_ports.ctl_addr = ctl_addr;
199 }
200
201 /* for IDE PCI controllers in legacy mode, temporary */
202 static inline int __ide_default_irq(unsigned long base)
203 {
204 switch (base) {
205 #ifdef CONFIG_IA64
206 case 0x1f0: return isa_irq_to_vector(14);
207 case 0x170: return isa_irq_to_vector(15);
208 #else
209 case 0x1f0: return 14;
210 case 0x170: return 15;
211 #endif
212 }
213 return 0;
214 }
215
216 #if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \
217 defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \
218 || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64)
219 #include <asm/ide.h>
220 #else
221 #include <asm-generic/ide_iops.h>
222 #endif
223
224 #define MAX_HWIFS 10
225
226 /* Currently only m68k, apus and m8xx need it */
227 #ifndef IDE_ARCH_ACK_INTR
228 # define ide_ack_intr(hwif) (1)
229 #endif
230
231 /* Currently only Atari needs it */
232 #ifndef IDE_ARCH_LOCK
233 # define ide_release_lock() do {} while (0)
234 # define ide_get_lock(hdlr, data) do {} while (0)
235 #endif /* IDE_ARCH_LOCK */
236
237 /*
238 * Now for the data we need to maintain per-drive: ide_drive_t
239 */
240
241 #define ide_scsi 0x21
242 #define ide_disk 0x20
243 #define ide_optical 0x7
244 #define ide_cdrom 0x5
245 #define ide_tape 0x1
246 #define ide_floppy 0x0
247
248 /*
249 * Special Driver Flags
250 *
251 * set_geometry : respecify drive geometry
252 * recalibrate : seek to cyl 0
253 * set_multmode : set multmode count
254 * set_tune : tune interface for drive
255 * serviced : service command
256 * reserved : unused
257 */
258 typedef union {
259 unsigned all : 8;
260 struct {
261 unsigned set_geometry : 1;
262 unsigned recalibrate : 1;
263 unsigned set_multmode : 1;
264 unsigned set_tune : 1;
265 unsigned serviced : 1;
266 unsigned reserved : 3;
267 } b;
268 } special_t;
269
270 /*
271 * ATA-IDE Select Register, aka Device-Head
272 *
273 * head : always zeros here
274 * unit : drive select number: 0/1
275 * bit5 : always 1
276 * lba : using LBA instead of CHS
277 * bit7 : always 1
278 */
279 typedef union {
280 unsigned all : 8;
281 struct {
282 #if defined(__LITTLE_ENDIAN_BITFIELD)
283 unsigned head : 4;
284 unsigned unit : 1;
285 unsigned bit5 : 1;
286 unsigned lba : 1;
287 unsigned bit7 : 1;
288 #elif defined(__BIG_ENDIAN_BITFIELD)
289 unsigned bit7 : 1;
290 unsigned lba : 1;
291 unsigned bit5 : 1;
292 unsigned unit : 1;
293 unsigned head : 4;
294 #else
295 #error "Please fix <asm/byteorder.h>"
296 #endif
297 } b;
298 } select_t, ata_select_t;
299
300 /*
301 * Status returned from various ide_ functions
302 */
303 typedef enum {
304 ide_stopped, /* no drive operation was started */
305 ide_started, /* a drive operation was started, handler was set */
306 } ide_startstop_t;
307
308 struct ide_devset;
309 struct ide_driver_s;
310
311 #ifdef CONFIG_BLK_DEV_IDEACPI
312 struct ide_acpi_drive_link;
313 struct ide_acpi_hwif_link;
314 #endif
315
316 /* ATAPI device flags */
317 enum {
318 IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
319 IDE_AFLAG_MEDIA_CHANGED = (1 << 1),
320
321 /* ide-cd */
322 /* Drive cannot lock the door. */
323 IDE_AFLAG_NO_DOORLOCK = (1 << 2),
324 /* Drive cannot eject the disc. */
325 IDE_AFLAG_NO_EJECT = (1 << 3),
326 /* Drive is a pre ATAPI 1.2 drive. */
327 IDE_AFLAG_PRE_ATAPI12 = (1 << 4),
328 /* TOC addresses are in BCD. */
329 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 5),
330 /* TOC track numbers are in BCD. */
331 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 6),
332 /*
333 * Drive does not provide data in multiples of SECTOR_SIZE
334 * when more than one interrupt is needed.
335 */
336 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
337 /* Seeking in progress. */
338 IDE_AFLAG_SEEKING = (1 << 8),
339 /* Saved TOC information is current. */
340 IDE_AFLAG_TOC_VALID = (1 << 9),
341 /* We think that the drive door is locked. */
342 IDE_AFLAG_DOOR_LOCKED = (1 << 10),
343 /* SET_CD_SPEED command is unsupported. */
344 IDE_AFLAG_NO_SPEED_SELECT = (1 << 11),
345 IDE_AFLAG_VERTOS_300_SSD = (1 << 12),
346 IDE_AFLAG_VERTOS_600_ESD = (1 << 13),
347 IDE_AFLAG_SANYO_3CD = (1 << 14),
348 IDE_AFLAG_FULL_CAPS_PAGE = (1 << 15),
349 IDE_AFLAG_PLAY_AUDIO_OK = (1 << 16),
350 IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17),
351
352 /* ide-floppy */
353 /* Format in progress */
354 IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18),
355 /* Avoid commands not supported in Clik drive */
356 IDE_AFLAG_CLIK_DRIVE = (1 << 19),
357 /* Requires BH algorithm for packets */
358 IDE_AFLAG_ZIP_DRIVE = (1 << 20),
359
360 /* ide-tape */
361 IDE_AFLAG_IGNORE_DSC = (1 << 21),
362 /* 0 When the tape position is unknown */
363 IDE_AFLAG_ADDRESS_VALID = (1 << 22),
364 /* Device already opened */
365 IDE_AFLAG_BUSY = (1 << 23),
366 /* Attempt to auto-detect the current user block size */
367 IDE_AFLAG_DETECT_BS = (1 << 24),
368 /* Currently on a filemark */
369 IDE_AFLAG_FILEMARK = (1 << 25),
370 /* 0 = no tape is loaded, so we don't rewind after ejecting */
371 IDE_AFLAG_MEDIUM_PRESENT = (1 << 26),
372
373 IDE_AFLAG_NO_AUTOCLOSE = (1 << 27),
374 };
375
376 struct ide_drive_s {
377 char name[4]; /* drive name, such as "hda" */
378 char driver_req[10]; /* requests specific driver */
379
380 struct request_queue *queue; /* request queue */
381
382 struct request *rq; /* current request */
383 struct ide_drive_s *next; /* circular list of hwgroup drives */
384 void *driver_data; /* extra driver data */
385 u16 *id; /* identification info */
386 #ifdef CONFIG_IDE_PROC_FS
387 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
388 const struct ide_devset **settings; /* /proc/ide/ drive settings */
389 #endif
390 struct hwif_s *hwif; /* actually (ide_hwif_t *) */
391
392 unsigned long sleep; /* sleep until this time */
393 unsigned long service_start; /* time we started last request */
394 unsigned long service_time; /* service time of last request */
395 unsigned long timeout; /* max time to wait for irq */
396
397 special_t special; /* special action flags */
398 select_t select; /* basic drive/head select reg value */
399
400 u8 retry_pio; /* retrying dma capable host in pio */
401 u8 state; /* retry state */
402 u8 waiting_for_dma; /* dma currently in progress */
403
404 unsigned keep_settings : 1; /* restore settings after drive reset */
405 unsigned using_dma : 1; /* disk is using dma for read/write */
406 unsigned unmask : 1; /* okay to unmask other irqs */
407 unsigned noflush : 1; /* don't attempt flushes */
408 unsigned dsc_overlap : 1; /* DSC overlap */
409 unsigned nice1 : 1; /* give potential excess bandwidth */
410 unsigned present : 1; /* drive is physically present */
411 unsigned dead : 1; /* device ejected hint */
412 unsigned id_read : 1; /* 1=id read from disk 0 = synthetic */
413 unsigned noprobe : 1; /* from: hdx=noprobe */
414 unsigned removable : 1; /* 1 if need to do check_media_change */
415 unsigned attach : 1; /* needed for removable devices */
416 unsigned forced_geom : 1; /* 1 if hdx=c,h,s was given at boot */
417 unsigned no_unmask : 1; /* disallow setting unmask bit */
418 unsigned no_io_32bit : 1; /* disallow enabling 32bit I/O */
419 unsigned doorlocking : 1; /* for removable only: door lock/unlock works */
420 unsigned nodma : 1; /* disallow DMA */
421 unsigned blocked : 1; /* 1=powermanagment told us not to do anything, so sleep nicely */
422 unsigned scsi : 1; /* 0=default, 1=ide-scsi emulation */
423 unsigned sleeping : 1; /* 1=sleeping & sleep field valid */
424 unsigned post_reset : 1;
425 unsigned udma33_warned : 1;
426 unsigned addressing : 2; /* 0=28-bit, 1=48-bit, 2=48-bit doing 28-bit */
427 unsigned wcache : 1; /* status of write cache */
428 unsigned nowerr : 1; /* used for ignoring ATA_DF */
429
430 u8 quirk_list; /* considered quirky, set for a specific host */
431 u8 init_speed; /* transfer rate set at boot */
432 u8 current_speed; /* current transfer rate set */
433 u8 desired_speed; /* desired transfer rate set */
434 u8 dn; /* now wide spread use */
435 u8 acoustic; /* acoustic management */
436 u8 media; /* disk, cdrom, tape, floppy, ... */
437 u8 ready_stat; /* min status value for drive ready */
438 u8 mult_count; /* current multiple sector setting */
439 u8 mult_req; /* requested multiple sector setting */
440 u8 tune_req; /* requested drive tuning setting */
441 u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
442 u8 bad_wstat; /* used for ignoring ATA_DF */
443 u8 head; /* "real" number of heads */
444 u8 sect; /* "real" sectors per track */
445 u8 bios_head; /* BIOS/fdisk/LILO number of heads */
446 u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
447
448 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
449 unsigned int cyl; /* "real" number of cyls */
450 unsigned int drive_data; /* used by set_pio_mode/selectproc */
451 unsigned int failures; /* current failure count */
452 unsigned int max_failures; /* maximum allowed failure count */
453 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
454
455 u64 capacity64; /* total number of sectors */
456
457 int lun; /* logical unit */
458 int crc_count; /* crc counter to reduce drive speed */
459 #ifdef CONFIG_BLK_DEV_IDEACPI
460 struct ide_acpi_drive_link *acpidata;
461 #endif
462 struct list_head list;
463 struct device gendev;
464 struct completion gendev_rel_comp; /* to deal with device release() */
465
466 /* callback for packet commands */
467 void (*pc_callback)(struct ide_drive_s *);
468
469 unsigned long atapi_flags;
470 };
471
472 typedef struct ide_drive_s ide_drive_t;
473
474 #define to_ide_device(dev)container_of(dev, ide_drive_t, gendev)
475
476 struct ide_task_s;
477 struct ide_port_info;
478
479 struct ide_tp_ops {
480 void (*exec_command)(struct hwif_s *, u8);
481 u8 (*read_status)(struct hwif_s *);
482 u8 (*read_altstatus)(struct hwif_s *);
483 u8 (*read_sff_dma_status)(struct hwif_s *);
484
485 void (*set_irq)(struct hwif_s *, int);
486
487 void (*tf_load)(ide_drive_t *, struct ide_task_s *);
488 void (*tf_read)(ide_drive_t *, struct ide_task_s *);
489
490 void (*input_data)(ide_drive_t *, struct request *, void *,
491 unsigned int);
492 void (*output_data)(ide_drive_t *, struct request *, void *,
493 unsigned int);
494 };
495
496 extern const struct ide_tp_ops default_tp_ops;
497
498 /**
499 * struct ide_port_ops - IDE port operations
500 *
501 * @init_dev: host specific initialization of a device
502 * @set_pio_mode: routine to program host for PIO mode
503 * @set_dma_mode: routine to program host for DMA mode
504 * @selectproc: tweaks hardware to select drive
505 * @reset_poll: chipset polling based on hba specifics
506 * @pre_reset: chipset specific changes to default for device-hba resets
507 * @resetproc: routine to reset controller after a disk reset
508 * @maskproc: special host masking for drive selection
509 * @quirkproc: check host's drive quirk list
510 *
511 * @mdma_filter: filter MDMA modes
512 * @udma_filter: filter UDMA modes
513 *
514 * @cable_detect: detect cable type
515 */
516 struct ide_port_ops {
517 void (*init_dev)(ide_drive_t *);
518 void (*set_pio_mode)(ide_drive_t *, const u8);
519 void (*set_dma_mode)(ide_drive_t *, const u8);
520 void (*selectproc)(ide_drive_t *);
521 int (*reset_poll)(ide_drive_t *);
522 void (*pre_reset)(ide_drive_t *);
523 void (*resetproc)(ide_drive_t *);
524 void (*maskproc)(ide_drive_t *, int);
525 void (*quirkproc)(ide_drive_t *);
526
527 u8 (*mdma_filter)(ide_drive_t *);
528 u8 (*udma_filter)(ide_drive_t *);
529
530 u8 (*cable_detect)(struct hwif_s *);
531 };
532
533 struct ide_dma_ops {
534 void (*dma_host_set)(struct ide_drive_s *, int);
535 int (*dma_setup)(struct ide_drive_s *);
536 void (*dma_exec_cmd)(struct ide_drive_s *, u8);
537 void (*dma_start)(struct ide_drive_s *);
538 int (*dma_end)(struct ide_drive_s *);
539 int (*dma_test_irq)(struct ide_drive_s *);
540 void (*dma_lost_irq)(struct ide_drive_s *);
541 void (*dma_timeout)(struct ide_drive_s *);
542 };
543
544 struct ide_host;
545
546 typedef struct hwif_s {
547 struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
548 struct hwif_s *mate; /* other hwif from same PCI chip */
549 struct hwgroup_s *hwgroup; /* actually (ide_hwgroup_t *) */
550 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
551
552 struct ide_host *host;
553
554 char name[6]; /* name of interface, eg. "ide0" */
555
556 struct ide_io_ports io_ports;
557
558 unsigned long sata_scr[SATA_NR_PORTS];
559
560 ide_drive_t drives[MAX_DRIVES]; /* drive info */
561
562 u8 major; /* our major number */
563 u8 index; /* 0 for ide0; 1 for ide1; ... */
564 u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
565
566 u32 host_flags;
567
568 u8 pio_mask;
569
570 u8 ultra_mask;
571 u8 mwdma_mask;
572 u8 swdma_mask;
573
574 u8 cbl; /* cable type */
575
576 hwif_chipset_t chipset; /* sub-module for tuning.. */
577
578 struct device *dev;
579
580 ide_ack_intr_t *ack_intr;
581
582 void (*rw_disk)(ide_drive_t *, struct request *);
583
584 const struct ide_tp_ops *tp_ops;
585 const struct ide_port_ops *port_ops;
586 const struct ide_dma_ops *dma_ops;
587
588 void (*ide_dma_clear_irq)(ide_drive_t *drive);
589
590 /* dma physical region descriptor table (cpu view) */
591 unsigned int *dmatable_cpu;
592 /* dma physical region descriptor table (dma view) */
593 dma_addr_t dmatable_dma;
594 /* Scatter-gather list used to build the above */
595 struct scatterlist *sg_table;
596 int sg_max_nents; /* Maximum number of entries in it */
597 int sg_nents; /* Current number of entries in it */
598 int sg_dma_direction; /* dma transfer direction */
599
600 /* data phase of the active command (currently only valid for PIO/DMA) */
601 int data_phase;
602
603 unsigned int nsect;
604 unsigned int nleft;
605 struct scatterlist *cursg;
606 unsigned int cursg_ofs;
607
608 int rqsize; /* max sectors per request */
609 int irq; /* our irq number */
610
611 unsigned long dma_base; /* base addr for dma ports */
612
613 unsigned long config_data; /* for use by chipset-specific code */
614 unsigned long select_data; /* for use by chipset-specific code */
615
616 unsigned long extra_base; /* extra addr for dma ports */
617 unsigned extra_ports; /* number of extra dma ports */
618
619 unsigned present : 1; /* this interface exists */
620 unsigned serialized : 1; /* serialized all channel operation */
621 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
622 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
623
624 struct device gendev;
625 struct device *portdev;
626
627 struct completion gendev_rel_comp; /* To deal with device release() */
628
629 void *hwif_data; /* extra hwif data */
630
631 unsigned dma;
632
633 #ifdef CONFIG_BLK_DEV_IDEACPI
634 struct ide_acpi_hwif_link *acpidata;
635 #endif
636 } ____cacheline_internodealigned_in_smp ide_hwif_t;
637
638 struct ide_host {
639 ide_hwif_t *ports[MAX_HWIFS];
640 unsigned int n_ports;
641 struct device *dev[2];
642 unsigned int (*init_chipset)(struct pci_dev *);
643 unsigned long host_flags;
644 void *host_priv;
645 };
646
647 /*
648 * internal ide interrupt handler type
649 */
650 typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
651 typedef int (ide_expiry_t)(ide_drive_t *);
652
653 /* used by ide-cd, ide-floppy, etc. */
654 typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
655
656 typedef struct hwgroup_s {
657 /* irq handler, if active */
658 ide_startstop_t (*handler)(ide_drive_t *);
659
660 /* BOOL: protects all fields below */
661 volatile int busy;
662 /* BOOL: wake us up on timer expiry */
663 unsigned int sleeping : 1;
664 /* BOOL: polling active & poll_timeout field valid */
665 unsigned int polling : 1;
666
667 /* current drive */
668 ide_drive_t *drive;
669 /* ptr to current hwif in linked-list */
670 ide_hwif_t *hwif;
671
672 /* current request */
673 struct request *rq;
674
675 /* failsafe timer */
676 struct timer_list timer;
677 /* timeout value during long polls */
678 unsigned long poll_timeout;
679 /* queried upon timeouts */
680 int (*expiry)(ide_drive_t *);
681
682 int req_gen;
683 int req_gen_timer;
684 } ide_hwgroup_t;
685
686 typedef struct ide_driver_s ide_driver_t;
687
688 extern struct mutex ide_setting_mtx;
689
690 int get_io_32bit(ide_drive_t *);
691 int set_io_32bit(ide_drive_t *, int);
692 int get_ksettings(ide_drive_t *);
693 int set_ksettings(ide_drive_t *, int);
694 int set_pio_mode(ide_drive_t *, int);
695 int get_unmaskirq(ide_drive_t *);
696 int set_unmaskirq(ide_drive_t *, int);
697 int get_using_dma(ide_drive_t *);
698 int set_using_dma(ide_drive_t *, int);
699
700 #define ide_devset_get(name, field) \
701 int get_##name(ide_drive_t *drive) \
702 { \
703 return drive->field; \
704 }
705
706 #define ide_devset_set(name, field) \
707 int set_##name(ide_drive_t *drive, int arg) \
708 { \
709 drive->field = arg; \
710 return 0; \
711 }
712
713 /* ATAPI packet command flags */
714 enum {
715 /* set when an error is considered normal - no retry (ide-tape) */
716 PC_FLAG_ABORT = (1 << 0),
717 PC_FLAG_SUPPRESS_ERROR = (1 << 1),
718 PC_FLAG_WAIT_FOR_DSC = (1 << 2),
719 PC_FLAG_DMA_OK = (1 << 3),
720 PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
721 PC_FLAG_DMA_ERROR = (1 << 5),
722 PC_FLAG_WRITING = (1 << 6),
723 /* command timed out */
724 PC_FLAG_TIMEDOUT = (1 << 7),
725 };
726
727 /*
728 * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
729 * This is used for several packet commands (not for READ/WRITE commands).
730 */
731 #define IDE_PC_BUFFER_SIZE 256
732
733 struct ide_atapi_pc {
734 /* actual packet bytes */
735 u8 c[12];
736 /* incremented on each retry */
737 int retries;
738 int error;
739
740 /* bytes to transfer */
741 int req_xfer;
742 /* bytes actually transferred */
743 int xferred;
744
745 /* data buffer */
746 u8 *buf;
747 /* current buffer position */
748 u8 *cur_pos;
749 int buf_size;
750 /* missing/available data on the current buffer */
751 int b_count;
752
753 /* the corresponding request */
754 struct request *rq;
755
756 unsigned long flags;
757
758 /*
759 * those are more or less driver-specific and some of them are subject
760 * to change/removal later.
761 */
762 u8 pc_buf[IDE_PC_BUFFER_SIZE];
763
764 /* idetape only */
765 struct idetape_bh *bh;
766 char *b_data;
767
768 /* idescsi only for now */
769 struct scatterlist *sg;
770 unsigned int sg_cnt;
771
772 struct scsi_cmnd *scsi_cmd;
773 void (*done) (struct scsi_cmnd *);
774
775 unsigned long timeout;
776 };
777
778 #ifdef CONFIG_IDE_PROC_FS
779 /*
780 * configurable drive settings
781 */
782
783 #define S_READ (1 << 0)
784 #define S_WRITE (1 << 1)
785 #define S_RW (S_READ | S_WRITE)
786 #define S_NOLOCK (1 << 2)
787
788 struct ide_devset {
789 const char *name;
790 unsigned int flags;
791 int min, max;
792 int (*get)(ide_drive_t *);
793 int (*set)(ide_drive_t *, int);
794 int (*mulf)(ide_drive_t *);
795 int (*divf)(ide_drive_t *);
796 };
797
798 #define __DEVSET(_name, _flags, _min, _max, _get, _set, _mulf, _divf) { \
799 .name = __stringify(_name), \
800 .flags = _flags, \
801 .min = _min, \
802 .max = _max, \
803 .get = _get, \
804 .set = _set, \
805 .mulf = _mulf, \
806 .divf = _divf, \
807 }
808
809 #define __IDE_DEVSET(_name, _flags, _min, _max, _get, _set, _mulf, _divf) \
810 static const struct ide_devset ide_devset_##_name = \
811 __DEVSET(_name, _flags, _min, _max, _get, _set, _mulf, _divf)
812
813 #define IDE_DEVSET(_name, _flags, _min, _max, _get, _set) \
814 __IDE_DEVSET(_name, _flags, _min, _max, _get, _set, NULL, NULL)
815
816 #define ide_devset_rw_nolock(_name, _min, _max, _func) \
817 IDE_DEVSET(_name, S_RW | S_NOLOCK, _min, _max, get_##_func, set_##_func)
818
819 #define ide_devset_w_nolock(_name, _min, _max, _func) \
820 IDE_DEVSET(_name, S_WRITE | S_NOLOCK, _min, _max, NULL, set_##_func)
821
822 #define ide_devset_rw(_name, _min, _max, _field) \
823 static ide_devset_get(_name, _field); \
824 static ide_devset_set(_name, _field); \
825 IDE_DEVSET(_name, S_RW, _min, _max, get_##_name, set_##_name)
826
827 #define ide_devset_r(_name, _min, _max, _field) \
828 ide_devset_get(_name, _field) \
829 IDE_DEVSET(_name, S_READ, _min, _max, get_##_name, NULL)
830
831 /*
832 * /proc/ide interface
833 */
834 typedef struct {
835 const char *name;
836 mode_t mode;
837 read_proc_t *read_proc;
838 write_proc_t *write_proc;
839 } ide_proc_entry_t;
840
841 void proc_ide_create(void);
842 void proc_ide_destroy(void);
843 void ide_proc_register_port(ide_hwif_t *);
844 void ide_proc_port_register_devices(ide_hwif_t *);
845 void ide_proc_unregister_device(ide_drive_t *);
846 void ide_proc_unregister_port(ide_hwif_t *);
847 void ide_proc_register_driver(ide_drive_t *, ide_driver_t *);
848 void ide_proc_unregister_driver(ide_drive_t *, ide_driver_t *);
849
850 read_proc_t proc_ide_read_capacity;
851 read_proc_t proc_ide_read_geometry;
852
853 /*
854 * Standard exit stuff:
855 */
856 #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) \
857 { \
858 len -= off; \
859 if (len < count) { \
860 *eof = 1; \
861 if (len <= 0) \
862 return 0; \
863 } else \
864 len = count; \
865 *start = page + off; \
866 return len; \
867 }
868 #else
869 static inline void proc_ide_create(void) { ; }
870 static inline void proc_ide_destroy(void) { ; }
871 static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
872 static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
873 static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
874 static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
875 static inline void ide_proc_register_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
876 static inline void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) { ; }
877 #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0;
878 #endif
879
880 /*
881 * Power Management step value (rq->pm->pm_step).
882 *
883 * The step value starts at 0 (ide_pm_state_start_suspend) for a
884 * suspend operation or 1000 (ide_pm_state_start_resume) for a
885 * resume operation.
886 *
887 * For each step, the core calls the subdriver start_power_step() first.
888 * This can return:
889 * - ide_stopped : In this case, the core calls us back again unless
890 * step have been set to ide_power_state_completed.
891 * - ide_started : In this case, the channel is left busy until an
892 * async event (interrupt) occurs.
893 * Typically, start_power_step() will issue a taskfile request with
894 * do_rw_taskfile().
895 *
896 * Upon reception of the interrupt, the core will call complete_power_step()
897 * with the error code if any. This routine should update the step value
898 * and return. It should not start a new request. The core will call
899 * start_power_step for the new step value, unless step have been set to
900 * ide_power_state_completed.
901 *
902 * Subdrivers are expected to define their own additional power
903 * steps from 1..999 for suspend and from 1001..1999 for resume,
904 * other values are reserved for future use.
905 */
906
907 enum {
908 ide_pm_state_completed = -1,
909 ide_pm_state_start_suspend = 0,
910 ide_pm_state_start_resume = 1000,
911 };
912
913 /*
914 * Subdrivers support.
915 *
916 * The gendriver.owner field should be set to the module owner of this driver.
917 * The gendriver.name field should be set to the name of this driver
918 */
919 struct ide_driver_s {
920 const char *version;
921 u8 media;
922 ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
923 int (*end_request)(ide_drive_t *, int, int);
924 ide_startstop_t (*error)(ide_drive_t *, struct request *rq, u8, u8);
925 struct device_driver gen_driver;
926 int (*probe)(ide_drive_t *);
927 void (*remove)(ide_drive_t *);
928 void (*resume)(ide_drive_t *);
929 void (*shutdown)(ide_drive_t *);
930 #ifdef CONFIG_IDE_PROC_FS
931 ide_proc_entry_t *proc;
932 const struct ide_devset **settings;
933 #endif
934 };
935
936 #define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver)
937
938 int ide_device_get(ide_drive_t *);
939 void ide_device_put(ide_drive_t *);
940
941 struct ide_ioctl_devset {
942 unsigned int get_ioctl;
943 unsigned int set_ioctl;
944
945 int (*get)(ide_drive_t *);
946 int (*set)(ide_drive_t *, int);
947 };
948
949 int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
950 unsigned long, const struct ide_ioctl_devset *);
951
952 int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *,
953 unsigned, unsigned long);
954
955 extern int ide_vlb_clk;
956 extern int ide_pci_clk;
957
958 extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
959 int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
960 int uptodate, int nr_sectors);
961
962 extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
963
964 void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
965 ide_expiry_t *);
966
967 void ide_execute_pkt_cmd(ide_drive_t *);
968
969 void ide_pad_transfer(ide_drive_t *, int, int);
970
971 ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
972
973 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
974
975 void ide_fix_driveid(u16 *);
976
977 extern void ide_fixstring(u8 *, const int, const int);
978
979 int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
980
981 int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
982
983 extern ide_startstop_t ide_do_reset (ide_drive_t *);
984
985 extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
986
987 extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
988
989 enum {
990 IDE_TFLAG_LBA48 = (1 << 0),
991 IDE_TFLAG_FLAGGED = (1 << 2),
992 IDE_TFLAG_OUT_DATA = (1 << 3),
993 IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
994 IDE_TFLAG_OUT_HOB_NSECT = (1 << 5),
995 IDE_TFLAG_OUT_HOB_LBAL = (1 << 6),
996 IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
997 IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
998 IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
999 IDE_TFLAG_OUT_HOB_NSECT |
1000 IDE_TFLAG_OUT_HOB_LBAL |
1001 IDE_TFLAG_OUT_HOB_LBAM |
1002 IDE_TFLAG_OUT_HOB_LBAH,
1003 IDE_TFLAG_OUT_FEATURE = (1 << 9),
1004 IDE_TFLAG_OUT_NSECT = (1 << 10),
1005 IDE_TFLAG_OUT_LBAL = (1 << 11),
1006 IDE_TFLAG_OUT_LBAM = (1 << 12),
1007 IDE_TFLAG_OUT_LBAH = (1 << 13),
1008 IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
1009 IDE_TFLAG_OUT_NSECT |
1010 IDE_TFLAG_OUT_LBAL |
1011 IDE_TFLAG_OUT_LBAM |
1012 IDE_TFLAG_OUT_LBAH,
1013 IDE_TFLAG_OUT_DEVICE = (1 << 14),
1014 IDE_TFLAG_WRITE = (1 << 15),
1015 IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16),
1016 IDE_TFLAG_IN_DATA = (1 << 17),
1017 IDE_TFLAG_CUSTOM_HANDLER = (1 << 18),
1018 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19),
1019 IDE_TFLAG_IN_HOB_FEATURE = (1 << 20),
1020 IDE_TFLAG_IN_HOB_NSECT = (1 << 21),
1021 IDE_TFLAG_IN_HOB_LBAL = (1 << 22),
1022 IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
1023 IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
1024 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
1025 IDE_TFLAG_IN_HOB_LBAM |
1026 IDE_TFLAG_IN_HOB_LBAH,
1027 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
1028 IDE_TFLAG_IN_HOB_NSECT |
1029 IDE_TFLAG_IN_HOB_LBA,
1030 IDE_TFLAG_IN_FEATURE = (1 << 1),
1031 IDE_TFLAG_IN_NSECT = (1 << 25),
1032 IDE_TFLAG_IN_LBAL = (1 << 26),
1033 IDE_TFLAG_IN_LBAM = (1 << 27),
1034 IDE_TFLAG_IN_LBAH = (1 << 28),
1035 IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
1036 IDE_TFLAG_IN_LBAM |
1037 IDE_TFLAG_IN_LBAH,
1038 IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
1039 IDE_TFLAG_IN_LBA,
1040 IDE_TFLAG_IN_DEVICE = (1 << 29),
1041 IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB |
1042 IDE_TFLAG_IN_HOB,
1043 IDE_TFLAG_TF = IDE_TFLAG_OUT_TF |
1044 IDE_TFLAG_IN_TF,
1045 IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE |
1046 IDE_TFLAG_IN_DEVICE,
1047 /* force 16-bit I/O operations */
1048 IDE_TFLAG_IO_16BIT = (1 << 30),
1049 /* ide_task_t was allocated using kmalloc() */
1050 IDE_TFLAG_DYN = (1 << 31),
1051 };
1052
1053 struct ide_taskfile {
1054 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
1055
1056 u8 hob_feature; /* 1-5: additional data to support LBA48 */
1057 u8 hob_nsect;
1058 u8 hob_lbal;
1059 u8 hob_lbam;
1060 u8 hob_lbah;
1061
1062 u8 data; /* 6: low data byte (for TASKFILE IOCTL) */
1063
1064 union { /*  7: */
1065 u8 error; /* read: error */
1066 u8 feature; /* write: feature */
1067 };
1068
1069 u8 nsect; /* 8: number of sectors */
1070 u8 lbal; /* 9: LBA low */
1071 u8 lbam; /* 10: LBA mid */
1072 u8 lbah; /* 11: LBA high */
1073
1074 u8 device; /* 12: device select */
1075
1076 union { /* 13: */
1077 u8 status; /*  read: status  */
1078 u8 command; /* write: command */
1079 };
1080 };
1081
1082 typedef struct ide_task_s {
1083 union {
1084 struct ide_taskfile tf;
1085 u8 tf_array[14];
1086 };
1087 u32 tf_flags;
1088 int data_phase;
1089 struct request *rq; /* copy of request */
1090 void *special; /* valid_t generally */
1091 } ide_task_t;
1092
1093 void ide_tf_dump(const char *, struct ide_taskfile *);
1094
1095 void ide_exec_command(ide_hwif_t *, u8);
1096 u8 ide_read_status(ide_hwif_t *);
1097 u8 ide_read_altstatus(ide_hwif_t *);
1098 u8 ide_read_sff_dma_status(ide_hwif_t *);
1099
1100 void ide_set_irq(ide_hwif_t *, int);
1101
1102 void ide_tf_load(ide_drive_t *, ide_task_t *);
1103 void ide_tf_read(ide_drive_t *, ide_task_t *);
1104
1105 void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
1106 void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
1107
1108 int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
1109
1110 extern void SELECT_DRIVE(ide_drive_t *);
1111 void SELECT_MASK(ide_drive_t *, int);
1112
1113 u8 ide_read_error(ide_drive_t *);
1114 void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
1115
1116 extern int drive_is_ready(ide_drive_t *);
1117
1118 void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
1119
1120 int ide_check_atapi_device(ide_drive_t *, const char *);
1121
1122 void ide_init_pc(struct ide_atapi_pc *);
1123
1124 /*
1125 * Special requests for ide-tape block device strategy routine.
1126 *
1127 * In order to service a character device command, we add special requests to
1128 * the tail of our block device request queue and wait for their completion.
1129 */
1130 enum {
1131 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
1132 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
1133 REQ_IDETAPE_READ = (1 << 2),
1134 REQ_IDETAPE_WRITE = (1 << 3),
1135 };
1136
1137 void ide_queue_pc_head(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
1138 struct request *);
1139 int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *);
1140
1141 ide_startstop_t ide_pc_intr(ide_drive_t *drive, struct ide_atapi_pc *pc,
1142 ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry,
1143 void (*update_buffers)(ide_drive_t *, struct ide_atapi_pc *),
1144 void (*retry_pc)(ide_drive_t *), void (*dsc_handle)(ide_drive_t *),
1145 int (*io_buffers)(ide_drive_t *, struct ide_atapi_pc *, unsigned int,
1146 int));
1147 ide_startstop_t ide_transfer_pc(ide_drive_t *, struct ide_atapi_pc *,
1148 ide_handler_t *, unsigned int, ide_expiry_t *);
1149 ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_atapi_pc *,
1150 ide_handler_t *, unsigned int, ide_expiry_t *);
1151
1152 ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
1153
1154 void task_end_request(ide_drive_t *, struct request *, u8);
1155
1156 int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
1157 int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
1158
1159 int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
1160
1161 extern int ide_driveid_update(ide_drive_t *);
1162 extern int ide_config_drive_speed(ide_drive_t *, u8);
1163 extern u8 eighty_ninty_three (ide_drive_t *);
1164 extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
1165
1166 extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
1167
1168 extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1169
1170 extern int ide_spin_wait_hwgroup(ide_drive_t *);
1171 extern void ide_timer_expiry(unsigned long);
1172 extern irqreturn_t ide_intr(int irq, void *dev_id);
1173 extern void do_ide_request(struct request_queue *);
1174
1175 void ide_init_disk(struct gendisk *, ide_drive_t *);
1176
1177 #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
1178 extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
1179 #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
1180 #else
1181 #define ide_pci_register_driver(d) pci_register_driver(d)
1182 #endif
1183
1184 void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
1185 hw_regs_t *, hw_regs_t **);
1186 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1187
1188 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1189 int ide_pci_set_master(struct pci_dev *, const char *);
1190 unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1191 int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
1192 int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1193 #else
1194 static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
1195 const struct ide_port_info *d)
1196 {
1197 return -EINVAL;
1198 }
1199 #endif
1200
1201 typedef struct ide_pci_enablebit_s {
1202 u8 reg; /* byte pci reg holding the enable-bit */
1203 u8 mask; /* mask to isolate the enable-bit */
1204 u8 val; /* value of masked reg when "enabled" */
1205 } ide_pci_enablebit_t;
1206
1207 enum {
1208 /* Uses ISA control ports not PCI ones. */
1209 IDE_HFLAG_ISA_PORTS = (1 << 0),
1210 /* single port device */
1211 IDE_HFLAG_SINGLE = (1 << 1),
1212 /* don't use legacy PIO blacklist */
1213 IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
1214 /* set for the second port of QD65xx */
1215 IDE_HFLAG_QD_2ND_PORT = (1 << 3),
1216 /* use PIO8/9 for prefetch off/on */
1217 IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
1218 /* use PIO6/7 for fast-devsel off/on */
1219 IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
1220 /* use 100-102 and 200-202 PIO values to set DMA modes */
1221 IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
1222 /*
1223 * keep DMA setting when programming PIO mode, may be used only
1224 * for hosts which have separate PIO and DMA timings (ie. PMAC)
1225 */
1226 IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
1227 /* program host for the transfer mode after programming device */
1228 IDE_HFLAG_POST_SET_MODE = (1 << 8),
1229 /* don't program host/device for the transfer mode ("smart" hosts) */
1230 IDE_HFLAG_NO_SET_MODE = (1 << 9),
1231 /* trust BIOS for programming chipset/device for DMA */
1232 IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
1233 /* host is CS5510/CS5520 */
1234 IDE_HFLAG_CS5520 = (1 << 11),
1235 /* ATAPI DMA is unsupported */
1236 IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
1237 /* set if host is a "non-bootable" controller */
1238 IDE_HFLAG_NON_BOOTABLE = (1 << 13),
1239 /* host doesn't support DMA */
1240 IDE_HFLAG_NO_DMA = (1 << 14),
1241 /* check if host is PCI IDE device before allowing DMA */
1242 IDE_HFLAG_NO_AUTODMA = (1 << 15),
1243 /* host uses MMIO */
1244 IDE_HFLAG_MMIO = (1 << 16),
1245 /* no LBA48 */
1246 IDE_HFLAG_NO_LBA48 = (1 << 17),
1247 /* no LBA48 DMA */
1248 IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
1249 /* data FIFO is cleared by an error */
1250 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
1251 /* serialize ports */
1252 IDE_HFLAG_SERIALIZE = (1 << 20),
1253 /* use legacy IRQs */
1254 IDE_HFLAG_LEGACY_IRQS = (1 << 21),
1255 /* force use of legacy IRQs */
1256 IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
1257 /* limit LBA48 requests to 256 sectors */
1258 IDE_HFLAG_RQSIZE_256 = (1 << 23),
1259 /* use 32-bit I/O ops */
1260 IDE_HFLAG_IO_32BIT = (1 << 24),
1261 /* unmask IRQs */
1262 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1263 /* serialize ports if DMA is possible (for sl82c105) */
1264 IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
1265 /* force host out of "simplex" mode */
1266 IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
1267 /* DSC overlap is unsupported */
1268 IDE_HFLAG_NO_DSC = (1 << 29),
1269 /* never use 32-bit I/O ops */
1270 IDE_HFLAG_NO_IO_32BIT = (1 << 30),
1271 /* never unmask IRQs */
1272 IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
1273 };
1274
1275 #ifdef CONFIG_BLK_DEV_OFFBOARD
1276 # define IDE_HFLAG_OFF_BOARD 0
1277 #else
1278 # define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
1279 #endif
1280
1281 struct ide_port_info {
1282 char *name;
1283 unsigned int (*init_chipset)(struct pci_dev *);
1284 void (*init_iops)(ide_hwif_t *);
1285 void (*init_hwif)(ide_hwif_t *);
1286 int (*init_dma)(ide_hwif_t *,
1287 const struct ide_port_info *);
1288
1289 const struct ide_tp_ops *tp_ops;
1290 const struct ide_port_ops *port_ops;
1291 const struct ide_dma_ops *dma_ops;
1292
1293 ide_pci_enablebit_t enablebits[2];
1294 hwif_chipset_t chipset;
1295 u32 host_flags;
1296 u8 pio_mask;
1297 u8 swdma_mask;
1298 u8 mwdma_mask;
1299 u8 udma_mask;
1300 };
1301
1302 int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1303 int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1304 const struct ide_port_info *, void *);
1305 void ide_pci_remove(struct pci_dev *);
1306
1307 #ifdef CONFIG_PM
1308 int ide_pci_suspend(struct pci_dev *, pm_message_t);
1309 int ide_pci_resume(struct pci_dev *);
1310 #else
1311 #define ide_pci_suspend NULL
1312 #define ide_pci_resume NULL
1313 #endif
1314
1315 void ide_map_sg(ide_drive_t *, struct request *);
1316 void ide_init_sg_cmd(ide_drive_t *, struct request *);
1317
1318 #define BAD_DMA_DRIVE 0
1319 #define GOOD_DMA_DRIVE 1
1320
1321 struct drive_list_entry {
1322 const char *id_model;
1323 const char *id_firmware;
1324 };
1325
1326 int ide_in_drive_list(u16 *, const struct drive_list_entry *);
1327
1328 #ifdef CONFIG_BLK_DEV_IDEDMA
1329 int __ide_dma_bad_drive(ide_drive_t *);
1330 int ide_id_dma_bug(ide_drive_t *);
1331
1332 u8 ide_find_dma_mode(ide_drive_t *, u8);
1333
1334 static inline u8 ide_max_dma_mode(ide_drive_t *drive)
1335 {
1336 return ide_find_dma_mode(drive, XFER_UDMA_6);
1337 }
1338
1339 void ide_dma_off_quietly(ide_drive_t *);
1340 void ide_dma_off(ide_drive_t *);
1341 void ide_dma_on(ide_drive_t *);
1342 int ide_set_dma(ide_drive_t *);
1343 void ide_check_dma_crc(ide_drive_t *);
1344 ide_startstop_t ide_dma_intr(ide_drive_t *);
1345
1346 int ide_build_sglist(ide_drive_t *, struct request *);
1347 void ide_destroy_dmatable(ide_drive_t *);
1348
1349 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1350 extern int ide_build_dmatable(ide_drive_t *, struct request *);
1351 int ide_allocate_dma_engine(ide_hwif_t *);
1352 void ide_release_dma_engine(ide_hwif_t *);
1353
1354 void ide_dma_host_set(ide_drive_t *, int);
1355 extern int ide_dma_setup(ide_drive_t *);
1356 void ide_dma_exec_cmd(ide_drive_t *, u8);
1357 extern void ide_dma_start(ide_drive_t *);
1358 extern int __ide_dma_end(ide_drive_t *);
1359 int ide_dma_test_irq(ide_drive_t *);
1360 extern void ide_dma_lost_irq(ide_drive_t *);
1361 extern void ide_dma_timeout(ide_drive_t *);
1362 extern const struct ide_dma_ops sff_dma_ops;
1363 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1364
1365 #else
1366 static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
1367 static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
1368 static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
1369 static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
1370 static inline void ide_dma_off(ide_drive_t *drive) { ; }
1371 static inline void ide_dma_on(ide_drive_t *drive) { ; }
1372 static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1373 static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
1374 static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1375 #endif /* CONFIG_BLK_DEV_IDEDMA */
1376
1377 #ifndef CONFIG_BLK_DEV_IDEDMA_SFF
1378 static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1379 #endif
1380
1381 #ifdef CONFIG_BLK_DEV_IDEACPI
1382 extern int ide_acpi_exec_tfs(ide_drive_t *drive);
1383 extern void ide_acpi_get_timing(ide_hwif_t *hwif);
1384 extern void ide_acpi_push_timing(ide_hwif_t *hwif);
1385 extern void ide_acpi_init(ide_hwif_t *hwif);
1386 void ide_acpi_port_init_devices(ide_hwif_t *);
1387 extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
1388 #else
1389 static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
1390 static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
1391 static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
1392 static inline void ide_acpi_init(ide_hwif_t *hwif) { ; }
1393 static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
1394 static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1395 #endif
1396
1397 void ide_remove_port_from_hwgroup(ide_hwif_t *);
1398 void ide_unregister(ide_hwif_t *);
1399
1400 void ide_register_region(struct gendisk *);
1401 void ide_unregister_region(struct gendisk *);
1402
1403 void ide_undecoded_slave(ide_drive_t *);
1404
1405 void ide_port_apply_params(ide_hwif_t *);
1406
1407 struct ide_host *ide_host_alloc_all(const struct ide_port_info *, hw_regs_t **);
1408 struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
1409 void ide_host_free(struct ide_host *);
1410 int ide_host_register(struct ide_host *, const struct ide_port_info *,
1411 hw_regs_t **);
1412 int ide_host_add(const struct ide_port_info *, hw_regs_t **,
1413 struct ide_host **);
1414 void ide_host_remove(struct ide_host *);
1415 int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
1416 void ide_port_unregister_devices(ide_hwif_t *);
1417 void ide_port_scan(ide_hwif_t *);
1418
1419 static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
1420 {
1421 return hwif->hwif_data;
1422 }
1423
1424 static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
1425 {
1426 hwif->hwif_data = data;
1427 }
1428
1429 const char *ide_xfer_verbose(u8 mode);
1430 extern void ide_toggle_bounce(ide_drive_t *drive, int on);
1431 extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
1432
1433 u64 ide_get_lba_addr(struct ide_taskfile *, int);
1434 u8 ide_dump_status(ide_drive_t *, const char *, u8);
1435
1436 struct ide_timing {
1437 u8 mode;
1438 u8 setup; /* t1 */
1439 u16 act8b; /* t2 for 8-bit io */
1440 u16 rec8b; /* t2i for 8-bit io */
1441 u16 cyc8b; /* t0 for 8-bit io */
1442 u16 active; /* t2 or tD */
1443 u16 recover; /* t2i or tK */
1444 u16 cycle; /* t0 */
1445 u16 udma; /* t2CYCTYP/2 */
1446 };
1447
1448 enum {
1449 IDE_TIMING_SETUP = (1 << 0),
1450 IDE_TIMING_ACT8B = (1 << 1),
1451 IDE_TIMING_REC8B = (1 << 2),
1452 IDE_TIMING_CYC8B = (1 << 3),
1453 IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
1454 IDE_TIMING_CYC8B,
1455 IDE_TIMING_ACTIVE = (1 << 4),
1456 IDE_TIMING_RECOVER = (1 << 5),
1457 IDE_TIMING_CYCLE = (1 << 6),
1458 IDE_TIMING_UDMA = (1 << 7),
1459 IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
1460 IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
1461 IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
1462 };
1463
1464 struct ide_timing *ide_timing_find_mode(u8);
1465 u16 ide_pio_cycle_time(ide_drive_t *, u8);
1466 void ide_timing_merge(struct ide_timing *, struct ide_timing *,
1467 struct ide_timing *, unsigned int);
1468 int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
1469
1470 int ide_scan_pio_blacklist(char *);
1471
1472 u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
1473
1474 int ide_set_pio_mode(ide_drive_t *, u8);
1475 int ide_set_dma_mode(ide_drive_t *, u8);
1476
1477 void ide_set_pio(ide_drive_t *, u8);
1478
1479 static inline void ide_set_max_pio(ide_drive_t *drive)
1480 {
1481 ide_set_pio(drive, 255);
1482 }
1483
1484 extern spinlock_t ide_lock;
1485 extern struct mutex ide_cfg_mtx;
1486 /*
1487 * Structure locking:
1488 *
1489 * ide_cfg_mtx and ide_lock together protect changes to
1490 * ide_hwif_t->{next,hwgroup}
1491 * ide_drive_t->next
1492 *
1493 * ide_hwgroup_t->busy: ide_lock
1494 * ide_hwgroup_t->hwif: ide_lock
1495 * ide_hwif_t->mate: constant, no locking
1496 * ide_drive_t->hwif: constant, no locking
1497 */
1498
1499 #define local_irq_set(flags) do { local_save_flags((flags)); local_irq_enable_in_hardirq(); } while (0)
1500
1501 extern struct bus_type ide_bus_type;
1502 extern struct class *ide_port_class;
1503
1504 static inline void ide_dump_identify(u8 *id)
1505 {
1506 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
1507 }
1508
1509 static inline int hwif_to_node(ide_hwif_t *hwif)
1510 {
1511 return hwif->dev ? dev_to_node(hwif->dev) : -1;
1512 }
1513
1514 static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
1515 {
1516 ide_drive_t *peer = &drive->hwif->drives[(drive->dn ^ 1) & 1];
1517
1518 return peer->present ? peer : NULL;
1519 }
1520 #endif /* _IDE_H */