nlm: Ensure callback code also checks that the files match
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / ide.h
1 #ifndef _IDE_H
2 #define _IDE_H
3 /*
4 * linux/include/linux/ide.h
5 *
6 * Copyright (C) 1994-2002 Linus Torvalds & authors
7 */
8
9 #include <linux/init.h>
10 #include <linux/ioport.h>
11 #include <linux/ata.h>
12 #include <linux/blkdev.h>
13 #include <linux/proc_fs.h>
14 #include <linux/interrupt.h>
15 #include <linux/bitops.h>
16 #include <linux/bio.h>
17 #include <linux/pci.h>
18 #include <linux/completion.h>
19 #include <linux/pm.h>
20 #include <linux/mutex.h>
21 #ifdef CONFIG_BLK_DEV_IDEACPI
22 #include <acpi/acpi.h>
23 #endif
24 #include <asm/byteorder.h>
25 #include <asm/io.h>
26
27 /* for request_sense */
28 #include <linux/cdrom.h>
29
30 #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
31 # define SUPPORT_VLB_SYNC 0
32 #else
33 # define SUPPORT_VLB_SYNC 1
34 #endif
35
36 /*
37 * Probably not wise to fiddle with these
38 */
39 #define IDE_DEFAULT_MAX_FAILURES 1
40 #define ERROR_MAX 8 /* Max read/write errors per sector */
41 #define ERROR_RESET 3 /* Reset controller every 4th retry */
42 #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
43
44 struct device;
45
46 /* Error codes returned in rq->errors to the higher part of the driver. */
47 enum {
48 IDE_DRV_ERROR_GENERAL = 101,
49 IDE_DRV_ERROR_FILEMARK = 102,
50 IDE_DRV_ERROR_EOD = 103,
51 };
52
53 /*
54 * Definitions for accessing IDE controller registers
55 */
56 #define IDE_NR_PORTS (10)
57
58 struct ide_io_ports {
59 unsigned long data_addr;
60
61 union {
62 unsigned long error_addr; /* read: error */
63 unsigned long feature_addr; /* write: feature */
64 };
65
66 unsigned long nsect_addr;
67 unsigned long lbal_addr;
68 unsigned long lbam_addr;
69 unsigned long lbah_addr;
70
71 unsigned long device_addr;
72
73 union {
74 unsigned long status_addr; /*  read: status  */
75 unsigned long command_addr; /* write: command */
76 };
77
78 unsigned long ctl_addr;
79
80 unsigned long irq_addr;
81 };
82
83 #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
84
85 #define BAD_R_STAT (ATA_BUSY | ATA_ERR)
86 #define BAD_W_STAT (BAD_R_STAT | ATA_DF)
87 #define BAD_STAT (BAD_R_STAT | ATA_DRQ)
88 #define DRIVE_READY (ATA_DRDY | ATA_DSC)
89
90 #define BAD_CRC (ATA_ABORTED | ATA_ICRC)
91
92 #define SATA_NR_PORTS (3) /* 16 possible ?? */
93
94 #define SATA_STATUS_OFFSET (0)
95 #define SATA_ERROR_OFFSET (1)
96 #define SATA_CONTROL_OFFSET (2)
97
98 /*
99 * Our Physical Region Descriptor (PRD) table should be large enough
100 * to handle the biggest I/O request we are likely to see. Since requests
101 * can have no more than 256 sectors, and since the typical blocksize is
102 * two or more sectors, we could get by with a limit of 128 entries here for
103 * the usual worst case. Most requests seem to include some contiguous blocks,
104 * further reducing the number of table entries required.
105 *
106 * The driver reverts to PIO mode for individual requests that exceed
107 * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
108 * 100% of all crazy scenarios here is not necessary.
109 *
110 * As it turns out though, we must allocate a full 4KB page for this,
111 * so the two PRD tables (ide0 & ide1) will each get half of that,
112 * allowing each to have about 256 entries (8 bytes each) from this.
113 */
114 #define PRD_BYTES 8
115 #define PRD_ENTRIES 256
116
117 /*
118 * Some more useful definitions
119 */
120 #define PARTN_BITS 6 /* number of minor dev bits for partitions */
121 #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
122 #define SECTOR_SIZE 512
123
124 /*
125 * Timeouts for various operations:
126 */
127 enum {
128 /* spec allows up to 20ms, but CF cards and SSD drives need more */
129 WAIT_DRQ = 1 * HZ, /* 1s */
130 /* some laptops are very slow */
131 WAIT_READY = 5 * HZ, /* 5s */
132 /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
133 WAIT_PIDENTIFY = 10 * HZ, /* 10s */
134 /* worst case when spinning up */
135 WAIT_WORSTCASE = 30 * HZ, /* 30s */
136 /* maximum wait for an IRQ to happen */
137 WAIT_CMD = 10 * HZ, /* 10s */
138 /* Some drives require a longer IRQ timeout. */
139 WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
140 /*
141 * Some drives (for example, Seagate STT3401A Travan) require a very
142 * long timeout, because they don't return an interrupt or clear their
143 * BSY bit until after the command completes (even retension commands).
144 */
145 WAIT_TAPE_CMD = 900 * HZ, /* 900s */
146 /* minimum sleep time */
147 WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
148 };
149
150 /*
151 * Op codes for special requests to be handled by ide_special_rq().
152 * Values should be in the range of 0x20 to 0x3f.
153 */
154 #define REQ_DRIVE_RESET 0x20
155 #define REQ_DEVSET_EXEC 0x21
156 #define REQ_PARK_HEADS 0x22
157 #define REQ_UNPARK_HEADS 0x23
158
159 /*
160 * hwif_chipset_t is used to keep track of the specific hardware
161 * chipset used by each IDE interface, if known.
162 */
163 enum { ide_unknown, ide_generic, ide_pci,
164 ide_cmd640, ide_dtc2278, ide_ali14xx,
165 ide_qd65xx, ide_umc8672, ide_ht6560b,
166 ide_4drives, ide_pmac, ide_acorn,
167 ide_au1xxx, ide_palm3710
168 };
169
170 typedef u8 hwif_chipset_t;
171
172 /*
173 * Structure to hold all information about the location of this port
174 */
175 struct ide_hw {
176 union {
177 struct ide_io_ports io_ports;
178 unsigned long io_ports_array[IDE_NR_PORTS];
179 };
180
181 int irq; /* our irq number */
182 struct device *dev, *parent;
183 unsigned long config;
184 };
185
186 static inline void ide_std_init_ports(struct ide_hw *hw,
187 unsigned long io_addr,
188 unsigned long ctl_addr)
189 {
190 unsigned int i;
191
192 for (i = 0; i <= 7; i++)
193 hw->io_ports_array[i] = io_addr++;
194
195 hw->io_ports.ctl_addr = ctl_addr;
196 }
197
198 #define MAX_HWIFS 10
199
200 /*
201 * Now for the data we need to maintain per-drive: ide_drive_t
202 */
203
204 #define ide_scsi 0x21
205 #define ide_disk 0x20
206 #define ide_optical 0x7
207 #define ide_cdrom 0x5
208 #define ide_tape 0x1
209 #define ide_floppy 0x0
210
211 /*
212 * Special Driver Flags
213 */
214 enum {
215 IDE_SFLAG_SET_GEOMETRY = (1 << 0),
216 IDE_SFLAG_RECALIBRATE = (1 << 1),
217 IDE_SFLAG_SET_MULTMODE = (1 << 2),
218 };
219
220 /*
221 * Status returned from various ide_ functions
222 */
223 typedef enum {
224 ide_stopped, /* no drive operation was started */
225 ide_started, /* a drive operation was started, handler was set */
226 } ide_startstop_t;
227
228 enum {
229 IDE_VALID_ERROR = (1 << 1),
230 IDE_VALID_FEATURE = IDE_VALID_ERROR,
231 IDE_VALID_NSECT = (1 << 2),
232 IDE_VALID_LBAL = (1 << 3),
233 IDE_VALID_LBAM = (1 << 4),
234 IDE_VALID_LBAH = (1 << 5),
235 IDE_VALID_DEVICE = (1 << 6),
236 IDE_VALID_LBA = IDE_VALID_LBAL |
237 IDE_VALID_LBAM |
238 IDE_VALID_LBAH,
239 IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
240 IDE_VALID_NSECT |
241 IDE_VALID_LBA,
242 IDE_VALID_IN_TF = IDE_VALID_NSECT |
243 IDE_VALID_LBA,
244 IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
245 IDE_VALID_IN_HOB = IDE_VALID_ERROR |
246 IDE_VALID_NSECT |
247 IDE_VALID_LBA,
248 };
249
250 enum {
251 IDE_TFLAG_LBA48 = (1 << 0),
252 IDE_TFLAG_WRITE = (1 << 1),
253 IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
254 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
255 /* force 16-bit I/O operations */
256 IDE_TFLAG_IO_16BIT = (1 << 4),
257 /* struct ide_cmd was allocated using kmalloc() */
258 IDE_TFLAG_DYN = (1 << 5),
259 IDE_TFLAG_FS = (1 << 6),
260 IDE_TFLAG_MULTI_PIO = (1 << 7),
261 IDE_TFLAG_SET_XFER = (1 << 8),
262 };
263
264 enum {
265 IDE_FTFLAG_FLAGGED = (1 << 0),
266 IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
267 IDE_FTFLAG_OUT_DATA = (1 << 2),
268 IDE_FTFLAG_IN_DATA = (1 << 3),
269 };
270
271 struct ide_taskfile {
272 u8 data; /* 0: data byte (for TASKFILE ioctl) */
273 union { /* 1: */
274 u8 error; /* read: error */
275 u8 feature; /* write: feature */
276 };
277 u8 nsect; /* 2: number of sectors */
278 u8 lbal; /* 3: LBA low */
279 u8 lbam; /* 4: LBA mid */
280 u8 lbah; /* 5: LBA high */
281 u8 device; /* 6: device select */
282 union { /* 7: */
283 u8 status; /* read: status */
284 u8 command; /* write: command */
285 };
286 };
287
288 struct ide_cmd {
289 struct ide_taskfile tf;
290 struct ide_taskfile hob;
291 struct {
292 struct {
293 u8 tf;
294 u8 hob;
295 } out, in;
296 } valid;
297
298 u16 tf_flags;
299 u8 ftf_flags; /* for TASKFILE ioctl */
300 int protocol;
301
302 int sg_nents; /* number of sg entries */
303 int orig_sg_nents;
304 int sg_dma_direction; /* DMA transfer direction */
305
306 unsigned int nbytes;
307 unsigned int nleft;
308 unsigned int last_xfer_len;
309
310 struct scatterlist *cursg;
311 unsigned int cursg_ofs;
312
313 struct request *rq; /* copy of request */
314 };
315
316 /* ATAPI packet command flags */
317 enum {
318 /* set when an error is considered normal - no retry (ide-tape) */
319 PC_FLAG_ABORT = (1 << 0),
320 PC_FLAG_SUPPRESS_ERROR = (1 << 1),
321 PC_FLAG_WAIT_FOR_DSC = (1 << 2),
322 PC_FLAG_DMA_OK = (1 << 3),
323 PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
324 PC_FLAG_DMA_ERROR = (1 << 5),
325 PC_FLAG_WRITING = (1 << 6),
326 };
327
328 #define ATAPI_WAIT_PC (60 * HZ)
329
330 struct ide_atapi_pc {
331 /* actual packet bytes */
332 u8 c[12];
333 /* incremented on each retry */
334 int retries;
335 int error;
336
337 /* bytes to transfer */
338 int req_xfer;
339
340 /* the corresponding request */
341 struct request *rq;
342
343 unsigned long flags;
344
345 /*
346 * those are more or less driver-specific and some of them are subject
347 * to change/removal later.
348 */
349 unsigned long timeout;
350 };
351
352 struct ide_devset;
353 struct ide_driver;
354
355 #ifdef CONFIG_BLK_DEV_IDEACPI
356 struct ide_acpi_drive_link;
357 struct ide_acpi_hwif_link;
358 #endif
359
360 struct ide_drive_s;
361
362 struct ide_disk_ops {
363 int (*check)(struct ide_drive_s *, const char *);
364 int (*get_capacity)(struct ide_drive_s *);
365 void (*unlock_native_capacity)(struct ide_drive_s *);
366 void (*setup)(struct ide_drive_s *);
367 void (*flush)(struct ide_drive_s *);
368 int (*init_media)(struct ide_drive_s *, struct gendisk *);
369 int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
370 int);
371 ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
372 sector_t);
373 int (*ioctl)(struct ide_drive_s *, struct block_device *,
374 fmode_t, unsigned int, unsigned long);
375 };
376
377 /* ATAPI device flags */
378 enum {
379 IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
380
381 /* ide-cd */
382 /* Drive cannot eject the disc. */
383 IDE_AFLAG_NO_EJECT = (1 << 1),
384 /* Drive is a pre ATAPI 1.2 drive. */
385 IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
386 /* TOC addresses are in BCD. */
387 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
388 /* TOC track numbers are in BCD. */
389 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
390 /* Saved TOC information is current. */
391 IDE_AFLAG_TOC_VALID = (1 << 6),
392 /* We think that the drive door is locked. */
393 IDE_AFLAG_DOOR_LOCKED = (1 << 7),
394 /* SET_CD_SPEED command is unsupported. */
395 IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
396 IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
397 IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
398 IDE_AFLAG_SANYO_3CD = (1 << 11),
399 IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
400 IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
401 IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
402
403 /* ide-floppy */
404 /* Avoid commands not supported in Clik drive */
405 IDE_AFLAG_CLIK_DRIVE = (1 << 15),
406 /* Requires BH algorithm for packets */
407 IDE_AFLAG_ZIP_DRIVE = (1 << 16),
408 /* Supports format progress report */
409 IDE_AFLAG_SRFP = (1 << 17),
410
411 /* ide-tape */
412 IDE_AFLAG_IGNORE_DSC = (1 << 18),
413 /* 0 When the tape position is unknown */
414 IDE_AFLAG_ADDRESS_VALID = (1 << 19),
415 /* Device already opened */
416 IDE_AFLAG_BUSY = (1 << 20),
417 /* Attempt to auto-detect the current user block size */
418 IDE_AFLAG_DETECT_BS = (1 << 21),
419 /* Currently on a filemark */
420 IDE_AFLAG_FILEMARK = (1 << 22),
421 /* 0 = no tape is loaded, so we don't rewind after ejecting */
422 IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
423
424 IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
425 };
426
427 /* device flags */
428 enum {
429 /* restore settings after device reset */
430 IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
431 /* device is using DMA for read/write */
432 IDE_DFLAG_USING_DMA = (1 << 1),
433 /* okay to unmask other IRQs */
434 IDE_DFLAG_UNMASK = (1 << 2),
435 /* don't attempt flushes */
436 IDE_DFLAG_NOFLUSH = (1 << 3),
437 /* DSC overlap */
438 IDE_DFLAG_DSC_OVERLAP = (1 << 4),
439 /* give potential excess bandwidth */
440 IDE_DFLAG_NICE1 = (1 << 5),
441 /* device is physically present */
442 IDE_DFLAG_PRESENT = (1 << 6),
443 /* disable Host Protected Area */
444 IDE_DFLAG_NOHPA = (1 << 7),
445 /* id read from device (synthetic if not set) */
446 IDE_DFLAG_ID_READ = (1 << 8),
447 IDE_DFLAG_NOPROBE = (1 << 9),
448 /* need to do check_media_change() */
449 IDE_DFLAG_REMOVABLE = (1 << 10),
450 /* needed for removable devices */
451 IDE_DFLAG_ATTACH = (1 << 11),
452 IDE_DFLAG_FORCED_GEOM = (1 << 12),
453 /* disallow setting unmask bit */
454 IDE_DFLAG_NO_UNMASK = (1 << 13),
455 /* disallow enabling 32-bit I/O */
456 IDE_DFLAG_NO_IO_32BIT = (1 << 14),
457 /* for removable only: door lock/unlock works */
458 IDE_DFLAG_DOORLOCKING = (1 << 15),
459 /* disallow DMA */
460 IDE_DFLAG_NODMA = (1 << 16),
461 /* powermanagement told us not to do anything, so sleep nicely */
462 IDE_DFLAG_BLOCKED = (1 << 17),
463 /* sleeping & sleep field valid */
464 IDE_DFLAG_SLEEPING = (1 << 18),
465 IDE_DFLAG_POST_RESET = (1 << 19),
466 IDE_DFLAG_UDMA33_WARNED = (1 << 20),
467 IDE_DFLAG_LBA48 = (1 << 21),
468 /* status of write cache */
469 IDE_DFLAG_WCACHE = (1 << 22),
470 /* used for ignoring ATA_DF */
471 IDE_DFLAG_NOWERR = (1 << 23),
472 /* retrying in PIO */
473 IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
474 IDE_DFLAG_LBA = (1 << 25),
475 /* don't unload heads */
476 IDE_DFLAG_NO_UNLOAD = (1 << 26),
477 /* heads unloaded, please don't reset port */
478 IDE_DFLAG_PARKED = (1 << 27),
479 IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
480 /* write protect */
481 IDE_DFLAG_WP = (1 << 29),
482 IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
483 IDE_DFLAG_NIEN_QUIRK = (1 << 31),
484 };
485
486 struct ide_drive_s {
487 char name[4]; /* drive name, such as "hda" */
488 char driver_req[10]; /* requests specific driver */
489
490 struct request_queue *queue; /* request queue */
491
492 struct request *rq; /* current request */
493 void *driver_data; /* extra driver data */
494 u16 *id; /* identification info */
495 #ifdef CONFIG_IDE_PROC_FS
496 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
497 const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
498 #endif
499 struct hwif_s *hwif; /* actually (ide_hwif_t *) */
500
501 const struct ide_disk_ops *disk_ops;
502
503 unsigned long dev_flags;
504
505 unsigned long sleep; /* sleep until this time */
506 unsigned long timeout; /* max time to wait for irq */
507
508 u8 special_flags; /* special action flags */
509
510 u8 select; /* basic drive/head select reg value */
511 u8 retry_pio; /* retrying dma capable host in pio */
512 u8 waiting_for_dma; /* dma currently in progress */
513 u8 dma; /* atapi dma flag */
514
515 u8 init_speed; /* transfer rate set at boot */
516 u8 current_speed; /* current transfer rate set */
517 u8 desired_speed; /* desired transfer rate set */
518 u8 pio_mode; /* for ->set_pio_mode _only_ */
519 u8 dma_mode; /* for ->set_dma_mode _only_ */
520 u8 dn; /* now wide spread use */
521 u8 acoustic; /* acoustic management */
522 u8 media; /* disk, cdrom, tape, floppy, ... */
523 u8 ready_stat; /* min status value for drive ready */
524 u8 mult_count; /* current multiple sector setting */
525 u8 mult_req; /* requested multiple sector setting */
526 u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
527 u8 bad_wstat; /* used for ignoring ATA_DF */
528 u8 head; /* "real" number of heads */
529 u8 sect; /* "real" sectors per track */
530 u8 bios_head; /* BIOS/fdisk/LILO number of heads */
531 u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
532
533 /* delay this long before sending packet command */
534 u8 pc_delay;
535
536 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
537 unsigned int cyl; /* "real" number of cyls */
538 void *drive_data; /* used by set_pio_mode/dev_select() */
539 unsigned int failures; /* current failure count */
540 unsigned int max_failures; /* maximum allowed failure count */
541 u64 probed_capacity;/* initial/native media capacity */
542 u64 capacity64; /* total number of sectors */
543
544 int lun; /* logical unit */
545 int crc_count; /* crc counter to reduce drive speed */
546
547 unsigned long debug_mask; /* debugging levels switch */
548
549 #ifdef CONFIG_BLK_DEV_IDEACPI
550 struct ide_acpi_drive_link *acpidata;
551 #endif
552 struct list_head list;
553 struct device gendev;
554 struct completion gendev_rel_comp; /* to deal with device release() */
555
556 /* current packet command */
557 struct ide_atapi_pc *pc;
558
559 /* last failed packet command */
560 struct ide_atapi_pc *failed_pc;
561
562 /* callback for packet commands */
563 int (*pc_callback)(struct ide_drive_s *, int);
564
565 ide_startstop_t (*irq_handler)(struct ide_drive_s *);
566
567 unsigned long atapi_flags;
568
569 struct ide_atapi_pc request_sense_pc;
570
571 /* current sense rq and buffer */
572 bool sense_rq_armed;
573 struct request sense_rq;
574 struct request_sense sense_data;
575 };
576
577 typedef struct ide_drive_s ide_drive_t;
578
579 #define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
580
581 #define to_ide_drv(obj, cont_type) \
582 container_of(obj, struct cont_type, dev)
583
584 #define ide_drv_g(disk, cont_type) \
585 container_of((disk)->private_data, struct cont_type, driver)
586
587 struct ide_port_info;
588
589 struct ide_tp_ops {
590 void (*exec_command)(struct hwif_s *, u8);
591 u8 (*read_status)(struct hwif_s *);
592 u8 (*read_altstatus)(struct hwif_s *);
593 void (*write_devctl)(struct hwif_s *, u8);
594
595 void (*dev_select)(ide_drive_t *);
596 void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
597 void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
598
599 void (*input_data)(ide_drive_t *, struct ide_cmd *,
600 void *, unsigned int);
601 void (*output_data)(ide_drive_t *, struct ide_cmd *,
602 void *, unsigned int);
603 };
604
605 extern const struct ide_tp_ops default_tp_ops;
606
607 /**
608 * struct ide_port_ops - IDE port operations
609 *
610 * @init_dev: host specific initialization of a device
611 * @set_pio_mode: routine to program host for PIO mode
612 * @set_dma_mode: routine to program host for DMA mode
613 * @reset_poll: chipset polling based on hba specifics
614 * @pre_reset: chipset specific changes to default for device-hba resets
615 * @resetproc: routine to reset controller after a disk reset
616 * @maskproc: special host masking for drive selection
617 * @quirkproc: check host's drive quirk list
618 * @clear_irq: clear IRQ
619 *
620 * @mdma_filter: filter MDMA modes
621 * @udma_filter: filter UDMA modes
622 *
623 * @cable_detect: detect cable type
624 */
625 struct ide_port_ops {
626 void (*init_dev)(ide_drive_t *);
627 void (*set_pio_mode)(struct hwif_s *, ide_drive_t *);
628 void (*set_dma_mode)(struct hwif_s *, ide_drive_t *);
629 int (*reset_poll)(ide_drive_t *);
630 void (*pre_reset)(ide_drive_t *);
631 void (*resetproc)(ide_drive_t *);
632 void (*maskproc)(ide_drive_t *, int);
633 void (*quirkproc)(ide_drive_t *);
634 void (*clear_irq)(ide_drive_t *);
635 int (*test_irq)(struct hwif_s *);
636
637 u8 (*mdma_filter)(ide_drive_t *);
638 u8 (*udma_filter)(ide_drive_t *);
639
640 u8 (*cable_detect)(struct hwif_s *);
641 };
642
643 struct ide_dma_ops {
644 void (*dma_host_set)(struct ide_drive_s *, int);
645 int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
646 void (*dma_start)(struct ide_drive_s *);
647 int (*dma_end)(struct ide_drive_s *);
648 int (*dma_test_irq)(struct ide_drive_s *);
649 void (*dma_lost_irq)(struct ide_drive_s *);
650 /* below ones are optional */
651 int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
652 int (*dma_timer_expiry)(struct ide_drive_s *);
653 void (*dma_clear)(struct ide_drive_s *);
654 /*
655 * The following method is optional and only required to be
656 * implemented for the SFF-8038i compatible controllers.
657 */
658 u8 (*dma_sff_read_status)(struct hwif_s *);
659 };
660
661 enum {
662 IDE_PFLAG_PROBING = (1 << 0),
663 };
664
665 struct ide_host;
666
667 typedef struct hwif_s {
668 struct hwif_s *mate; /* other hwif from same PCI chip */
669 struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
670
671 struct ide_host *host;
672
673 char name[6]; /* name of interface, eg. "ide0" */
674
675 struct ide_io_ports io_ports;
676
677 unsigned long sata_scr[SATA_NR_PORTS];
678
679 ide_drive_t *devices[MAX_DRIVES + 1];
680
681 unsigned long port_flags;
682
683 u8 major; /* our major number */
684 u8 index; /* 0 for ide0; 1 for ide1; ... */
685 u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
686
687 u32 host_flags;
688
689 u8 pio_mask;
690
691 u8 ultra_mask;
692 u8 mwdma_mask;
693 u8 swdma_mask;
694
695 u8 cbl; /* cable type */
696
697 hwif_chipset_t chipset; /* sub-module for tuning.. */
698
699 struct device *dev;
700
701 void (*rw_disk)(ide_drive_t *, struct request *);
702
703 const struct ide_tp_ops *tp_ops;
704 const struct ide_port_ops *port_ops;
705 const struct ide_dma_ops *dma_ops;
706
707 /* dma physical region descriptor table (cpu view) */
708 unsigned int *dmatable_cpu;
709 /* dma physical region descriptor table (dma view) */
710 dma_addr_t dmatable_dma;
711
712 /* maximum number of PRD table entries */
713 int prd_max_nents;
714 /* PRD entry size in bytes */
715 int prd_ent_size;
716
717 /* Scatter-gather list used to build the above */
718 struct scatterlist *sg_table;
719 int sg_max_nents; /* Maximum number of entries in it */
720
721 struct ide_cmd cmd; /* current command */
722
723 int rqsize; /* max sectors per request */
724 int irq; /* our irq number */
725
726 unsigned long dma_base; /* base addr for dma ports */
727
728 unsigned long config_data; /* for use by chipset-specific code */
729 unsigned long select_data; /* for use by chipset-specific code */
730
731 unsigned long extra_base; /* extra addr for dma ports */
732 unsigned extra_ports; /* number of extra dma ports */
733
734 unsigned present : 1; /* this interface exists */
735 unsigned busy : 1; /* serializes devices on a port */
736
737 struct device gendev;
738 struct device *portdev;
739
740 struct completion gendev_rel_comp; /* To deal with device release() */
741
742 void *hwif_data; /* extra hwif data */
743
744 #ifdef CONFIG_BLK_DEV_IDEACPI
745 struct ide_acpi_hwif_link *acpidata;
746 #endif
747
748 /* IRQ handler, if active */
749 ide_startstop_t (*handler)(ide_drive_t *);
750
751 /* BOOL: polling active & poll_timeout field valid */
752 unsigned int polling : 1;
753
754 /* current drive */
755 ide_drive_t *cur_dev;
756
757 /* current request */
758 struct request *rq;
759
760 /* failsafe timer */
761 struct timer_list timer;
762 /* timeout value during long polls */
763 unsigned long poll_timeout;
764 /* queried upon timeouts */
765 int (*expiry)(ide_drive_t *);
766
767 int req_gen;
768 int req_gen_timer;
769
770 spinlock_t lock;
771 } ____cacheline_internodealigned_in_smp ide_hwif_t;
772
773 #define MAX_HOST_PORTS 4
774
775 struct ide_host {
776 ide_hwif_t *ports[MAX_HOST_PORTS + 1];
777 unsigned int n_ports;
778 struct device *dev[2];
779
780 int (*init_chipset)(struct pci_dev *);
781
782 void (*get_lock)(irq_handler_t, void *);
783 void (*release_lock)(void);
784
785 irq_handler_t irq_handler;
786
787 unsigned long host_flags;
788
789 int irq_flags;
790
791 void *host_priv;
792 ide_hwif_t *cur_port; /* for hosts requiring serialization */
793
794 /* used for hosts requiring serialization */
795 volatile unsigned long host_busy;
796 };
797
798 #define IDE_HOST_BUSY 0
799
800 /*
801 * internal ide interrupt handler type
802 */
803 typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
804 typedef int (ide_expiry_t)(ide_drive_t *);
805
806 /* used by ide-cd, ide-floppy, etc. */
807 typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
808
809 extern struct mutex ide_setting_mtx;
810
811 /*
812 * configurable drive settings
813 */
814
815 #define DS_SYNC (1 << 0)
816
817 struct ide_devset {
818 int (*get)(ide_drive_t *);
819 int (*set)(ide_drive_t *, int);
820 unsigned int flags;
821 };
822
823 #define __DEVSET(_flags, _get, _set) { \
824 .flags = _flags, \
825 .get = _get, \
826 .set = _set, \
827 }
828
829 #define ide_devset_get(name, field) \
830 static int get_##name(ide_drive_t *drive) \
831 { \
832 return drive->field; \
833 }
834
835 #define ide_devset_set(name, field) \
836 static int set_##name(ide_drive_t *drive, int arg) \
837 { \
838 drive->field = arg; \
839 return 0; \
840 }
841
842 #define ide_devset_get_flag(name, flag) \
843 static int get_##name(ide_drive_t *drive) \
844 { \
845 return !!(drive->dev_flags & flag); \
846 }
847
848 #define ide_devset_set_flag(name, flag) \
849 static int set_##name(ide_drive_t *drive, int arg) \
850 { \
851 if (arg) \
852 drive->dev_flags |= flag; \
853 else \
854 drive->dev_flags &= ~flag; \
855 return 0; \
856 }
857
858 #define __IDE_DEVSET(_name, _flags, _get, _set) \
859 const struct ide_devset ide_devset_##_name = \
860 __DEVSET(_flags, _get, _set)
861
862 #define IDE_DEVSET(_name, _flags, _get, _set) \
863 static __IDE_DEVSET(_name, _flags, _get, _set)
864
865 #define ide_devset_rw(_name, _func) \
866 IDE_DEVSET(_name, 0, get_##_func, set_##_func)
867
868 #define ide_devset_w(_name, _func) \
869 IDE_DEVSET(_name, 0, NULL, set_##_func)
870
871 #define ide_ext_devset_rw(_name, _func) \
872 __IDE_DEVSET(_name, 0, get_##_func, set_##_func)
873
874 #define ide_ext_devset_rw_sync(_name, _func) \
875 __IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
876
877 #define ide_decl_devset(_name) \
878 extern const struct ide_devset ide_devset_##_name
879
880 ide_decl_devset(io_32bit);
881 ide_decl_devset(keepsettings);
882 ide_decl_devset(pio_mode);
883 ide_decl_devset(unmaskirq);
884 ide_decl_devset(using_dma);
885
886 #ifdef CONFIG_IDE_PROC_FS
887 /*
888 * /proc/ide interface
889 */
890
891 #define ide_devset_rw_field(_name, _field) \
892 ide_devset_get(_name, _field); \
893 ide_devset_set(_name, _field); \
894 IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
895
896 #define ide_devset_rw_flag(_name, _field) \
897 ide_devset_get_flag(_name, _field); \
898 ide_devset_set_flag(_name, _field); \
899 IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
900
901 struct ide_proc_devset {
902 const char *name;
903 const struct ide_devset *setting;
904 int min, max;
905 int (*mulf)(ide_drive_t *);
906 int (*divf)(ide_drive_t *);
907 };
908
909 #define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
910 .name = __stringify(_name), \
911 .setting = &ide_devset_##_name, \
912 .min = _min, \
913 .max = _max, \
914 .mulf = _mulf, \
915 .divf = _divf, \
916 }
917
918 #define IDE_PROC_DEVSET(_name, _min, _max) \
919 __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
920
921 typedef struct {
922 const char *name;
923 umode_t mode;
924 const struct file_operations *proc_fops;
925 } ide_proc_entry_t;
926
927 void proc_ide_create(void);
928 void proc_ide_destroy(void);
929 void ide_proc_register_port(ide_hwif_t *);
930 void ide_proc_port_register_devices(ide_hwif_t *);
931 void ide_proc_unregister_device(ide_drive_t *);
932 void ide_proc_unregister_port(ide_hwif_t *);
933 void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
934 void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
935
936 extern const struct file_operations ide_capacity_proc_fops;
937 extern const struct file_operations ide_geometry_proc_fops;
938 #else
939 static inline void proc_ide_create(void) { ; }
940 static inline void proc_ide_destroy(void) { ; }
941 static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
942 static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
943 static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
944 static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
945 static inline void ide_proc_register_driver(ide_drive_t *drive,
946 struct ide_driver *driver) { ; }
947 static inline void ide_proc_unregister_driver(ide_drive_t *drive,
948 struct ide_driver *driver) { ; }
949 #endif
950
951 enum {
952 /* enter/exit functions */
953 IDE_DBG_FUNC = (1 << 0),
954 /* sense key/asc handling */
955 IDE_DBG_SENSE = (1 << 1),
956 /* packet commands handling */
957 IDE_DBG_PC = (1 << 2),
958 /* request handling */
959 IDE_DBG_RQ = (1 << 3),
960 /* driver probing/setup */
961 IDE_DBG_PROBE = (1 << 4),
962 };
963
964 /* DRV_NAME has to be defined in the driver before using the macro below */
965 #define __ide_debug_log(lvl, fmt, args...) \
966 { \
967 if (unlikely(drive->debug_mask & lvl)) \
968 printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
969 __func__, ## args); \
970 }
971
972 /*
973 * Power Management state machine (rq->pm->pm_step).
974 *
975 * For each step, the core calls ide_start_power_step() first.
976 * This can return:
977 * - ide_stopped : In this case, the core calls us back again unless
978 * step have been set to ide_power_state_completed.
979 * - ide_started : In this case, the channel is left busy until an
980 * async event (interrupt) occurs.
981 * Typically, ide_start_power_step() will issue a taskfile request with
982 * do_rw_taskfile().
983 *
984 * Upon reception of the interrupt, the core will call ide_complete_power_step()
985 * with the error code if any. This routine should update the step value
986 * and return. It should not start a new request. The core will call
987 * ide_start_power_step() for the new step value, unless step have been
988 * set to IDE_PM_COMPLETED.
989 */
990 enum {
991 IDE_PM_START_SUSPEND,
992 IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
993 IDE_PM_STANDBY,
994
995 IDE_PM_START_RESUME,
996 IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
997 IDE_PM_IDLE,
998 IDE_PM_RESTORE_DMA,
999
1000 IDE_PM_COMPLETED,
1001 };
1002
1003 int generic_ide_suspend(struct device *, pm_message_t);
1004 int generic_ide_resume(struct device *);
1005
1006 void ide_complete_power_step(ide_drive_t *, struct request *);
1007 ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
1008 void ide_complete_pm_rq(ide_drive_t *, struct request *);
1009 void ide_check_pm_state(ide_drive_t *, struct request *);
1010
1011 /*
1012 * Subdrivers support.
1013 *
1014 * The gendriver.owner field should be set to the module owner of this driver.
1015 * The gendriver.name field should be set to the name of this driver
1016 */
1017 struct ide_driver {
1018 const char *version;
1019 ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
1020 struct device_driver gen_driver;
1021 int (*probe)(ide_drive_t *);
1022 void (*remove)(ide_drive_t *);
1023 void (*resume)(ide_drive_t *);
1024 void (*shutdown)(ide_drive_t *);
1025 #ifdef CONFIG_IDE_PROC_FS
1026 ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
1027 const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
1028 #endif
1029 };
1030
1031 #define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
1032
1033 int ide_device_get(ide_drive_t *);
1034 void ide_device_put(ide_drive_t *);
1035
1036 struct ide_ioctl_devset {
1037 unsigned int get_ioctl;
1038 unsigned int set_ioctl;
1039 const struct ide_devset *setting;
1040 };
1041
1042 int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
1043 unsigned long, const struct ide_ioctl_devset *);
1044
1045 int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
1046
1047 extern int ide_vlb_clk;
1048 extern int ide_pci_clk;
1049
1050 int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
1051 void ide_kill_rq(ide_drive_t *, struct request *);
1052
1053 void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1054 void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1055
1056 void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
1057 unsigned int);
1058
1059 void ide_pad_transfer(ide_drive_t *, int, int);
1060
1061 ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
1062
1063 void ide_fix_driveid(u16 *);
1064
1065 extern void ide_fixstring(u8 *, const int, const int);
1066
1067 int ide_busy_sleep(ide_drive_t *, unsigned long, int);
1068
1069 int __ide_wait_stat(ide_drive_t *, u8, u8, unsigned long, u8 *);
1070 int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
1071
1072 ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
1073 ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
1074
1075 extern ide_startstop_t ide_do_reset (ide_drive_t *);
1076
1077 extern int ide_devset_execute(ide_drive_t *drive,
1078 const struct ide_devset *setting, int arg);
1079
1080 void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
1081 int ide_complete_rq(ide_drive_t *, int, unsigned int);
1082
1083 void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
1084 void ide_tf_dump(const char *, struct ide_cmd *);
1085
1086 void ide_exec_command(ide_hwif_t *, u8);
1087 u8 ide_read_status(ide_hwif_t *);
1088 u8 ide_read_altstatus(ide_hwif_t *);
1089 void ide_write_devctl(ide_hwif_t *, u8);
1090
1091 void ide_dev_select(ide_drive_t *);
1092 void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
1093 void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
1094
1095 void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1096 void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1097
1098 void SELECT_MASK(ide_drive_t *, int);
1099
1100 u8 ide_read_error(ide_drive_t *);
1101 void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
1102
1103 int ide_check_ireason(ide_drive_t *, struct request *, int, int, int);
1104
1105 int ide_check_atapi_device(ide_drive_t *, const char *);
1106
1107 void ide_init_pc(struct ide_atapi_pc *);
1108
1109 /* Disk head parking */
1110 extern wait_queue_head_t ide_park_wq;
1111 ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
1112 char *buf);
1113 ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
1114 const char *buf, size_t len);
1115
1116 /*
1117 * Special requests for ide-tape block device strategy routine.
1118 *
1119 * In order to service a character device command, we add special requests to
1120 * the tail of our block device request queue and wait for their completion.
1121 */
1122 enum {
1123 REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
1124 REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
1125 REQ_IDETAPE_READ = (1 << 2),
1126 REQ_IDETAPE_WRITE = (1 << 3),
1127 };
1128
1129 int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *,
1130 void *, unsigned int);
1131
1132 int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
1133 int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
1134 int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
1135 void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
1136 void ide_retry_pc(ide_drive_t *drive);
1137
1138 void ide_prep_sense(ide_drive_t *drive, struct request *rq);
1139 int ide_queue_sense_rq(ide_drive_t *drive, void *special);
1140
1141 int ide_cd_expiry(ide_drive_t *);
1142
1143 int ide_cd_get_xferlen(struct request *);
1144
1145 ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
1146
1147 ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
1148
1149 void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
1150
1151 void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
1152
1153 int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
1154 int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
1155
1156 int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
1157
1158 int ide_dev_read_id(ide_drive_t *, u8, u16 *, int);
1159
1160 extern int ide_driveid_update(ide_drive_t *);
1161 extern int ide_config_drive_speed(ide_drive_t *, u8);
1162 extern u8 eighty_ninty_three (ide_drive_t *);
1163 extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
1164
1165 extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
1166
1167 extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
1168
1169 extern void ide_timer_expiry(unsigned long);
1170 extern irqreturn_t ide_intr(int irq, void *dev_id);
1171 extern void do_ide_request(struct request_queue *);
1172 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
1173
1174 void ide_init_disk(struct gendisk *, ide_drive_t *);
1175
1176 #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
1177 extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
1178 #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
1179 #else
1180 #define ide_pci_register_driver(d) pci_register_driver(d)
1181 #endif
1182
1183 static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
1184 {
1185 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
1186 return 1;
1187 return 0;
1188 }
1189
1190 void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
1191 struct ide_hw *, struct ide_hw **);
1192 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
1193
1194 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
1195 int ide_pci_set_master(struct pci_dev *, const char *);
1196 unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
1197 int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
1198 int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
1199 #else
1200 static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
1201 const struct ide_port_info *d)
1202 {
1203 return -EINVAL;
1204 }
1205 #endif
1206
1207 struct ide_pci_enablebit {
1208 u8 reg; /* byte pci reg holding the enable-bit */
1209 u8 mask; /* mask to isolate the enable-bit */
1210 u8 val; /* value of masked reg when "enabled" */
1211 };
1212
1213 enum {
1214 /* Uses ISA control ports not PCI ones. */
1215 IDE_HFLAG_ISA_PORTS = (1 << 0),
1216 /* single port device */
1217 IDE_HFLAG_SINGLE = (1 << 1),
1218 /* don't use legacy PIO blacklist */
1219 IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
1220 /* set for the second port of QD65xx */
1221 IDE_HFLAG_QD_2ND_PORT = (1 << 3),
1222 /* use PIO8/9 for prefetch off/on */
1223 IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
1224 /* use PIO6/7 for fast-devsel off/on */
1225 IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
1226 /* use 100-102 and 200-202 PIO values to set DMA modes */
1227 IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
1228 /*
1229 * keep DMA setting when programming PIO mode, may be used only
1230 * for hosts which have separate PIO and DMA timings (ie. PMAC)
1231 */
1232 IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
1233 /* program host for the transfer mode after programming device */
1234 IDE_HFLAG_POST_SET_MODE = (1 << 8),
1235 /* don't program host/device for the transfer mode ("smart" hosts) */
1236 IDE_HFLAG_NO_SET_MODE = (1 << 9),
1237 /* trust BIOS for programming chipset/device for DMA */
1238 IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
1239 /* host is CS5510/CS5520 */
1240 IDE_HFLAG_CS5520 = (1 << 11),
1241 /* ATAPI DMA is unsupported */
1242 IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
1243 /* set if host is a "non-bootable" controller */
1244 IDE_HFLAG_NON_BOOTABLE = (1 << 13),
1245 /* host doesn't support DMA */
1246 IDE_HFLAG_NO_DMA = (1 << 14),
1247 /* check if host is PCI IDE device before allowing DMA */
1248 IDE_HFLAG_NO_AUTODMA = (1 << 15),
1249 /* host uses MMIO */
1250 IDE_HFLAG_MMIO = (1 << 16),
1251 /* no LBA48 */
1252 IDE_HFLAG_NO_LBA48 = (1 << 17),
1253 /* no LBA48 DMA */
1254 IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
1255 /* data FIFO is cleared by an error */
1256 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
1257 /* serialize ports */
1258 IDE_HFLAG_SERIALIZE = (1 << 20),
1259 /* host is DTC2278 */
1260 IDE_HFLAG_DTC2278 = (1 << 21),
1261 /* 4 devices on a single set of I/O ports */
1262 IDE_HFLAG_4DRIVES = (1 << 22),
1263 /* host is TRM290 */
1264 IDE_HFLAG_TRM290 = (1 << 23),
1265 /* use 32-bit I/O ops */
1266 IDE_HFLAG_IO_32BIT = (1 << 24),
1267 /* unmask IRQs */
1268 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1269 IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
1270 /* serialize ports if DMA is possible (for sl82c105) */
1271 IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
1272 /* force host out of "simplex" mode */
1273 IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
1274 /* DSC overlap is unsupported */
1275 IDE_HFLAG_NO_DSC = (1 << 29),
1276 /* never use 32-bit I/O ops */
1277 IDE_HFLAG_NO_IO_32BIT = (1 << 30),
1278 /* never unmask IRQs */
1279 IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
1280 };
1281
1282 #ifdef CONFIG_BLK_DEV_OFFBOARD
1283 # define IDE_HFLAG_OFF_BOARD 0
1284 #else
1285 # define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
1286 #endif
1287
1288 struct ide_port_info {
1289 char *name;
1290
1291 int (*init_chipset)(struct pci_dev *);
1292
1293 void (*get_lock)(irq_handler_t, void *);
1294 void (*release_lock)(void);
1295
1296 void (*init_iops)(ide_hwif_t *);
1297 void (*init_hwif)(ide_hwif_t *);
1298 int (*init_dma)(ide_hwif_t *,
1299 const struct ide_port_info *);
1300
1301 const struct ide_tp_ops *tp_ops;
1302 const struct ide_port_ops *port_ops;
1303 const struct ide_dma_ops *dma_ops;
1304
1305 struct ide_pci_enablebit enablebits[2];
1306
1307 hwif_chipset_t chipset;
1308
1309 u16 max_sectors; /* if < than the default one */
1310
1311 u32 host_flags;
1312
1313 int irq_flags;
1314
1315 u8 pio_mask;
1316 u8 swdma_mask;
1317 u8 mwdma_mask;
1318 u8 udma_mask;
1319 };
1320
1321 int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
1322 int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
1323 const struct ide_port_info *, void *);
1324 void ide_pci_remove(struct pci_dev *);
1325
1326 #ifdef CONFIG_PM
1327 int ide_pci_suspend(struct pci_dev *, pm_message_t);
1328 int ide_pci_resume(struct pci_dev *);
1329 #else
1330 #define ide_pci_suspend NULL
1331 #define ide_pci_resume NULL
1332 #endif
1333
1334 void ide_map_sg(ide_drive_t *, struct ide_cmd *);
1335 void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
1336
1337 #define BAD_DMA_DRIVE 0
1338 #define GOOD_DMA_DRIVE 1
1339
1340 struct drive_list_entry {
1341 const char *id_model;
1342 const char *id_firmware;
1343 };
1344
1345 int ide_in_drive_list(u16 *, const struct drive_list_entry *);
1346
1347 #ifdef CONFIG_BLK_DEV_IDEDMA
1348 int ide_dma_good_drive(ide_drive_t *);
1349 int __ide_dma_bad_drive(ide_drive_t *);
1350
1351 u8 ide_find_dma_mode(ide_drive_t *, u8);
1352
1353 static inline u8 ide_max_dma_mode(ide_drive_t *drive)
1354 {
1355 return ide_find_dma_mode(drive, XFER_UDMA_6);
1356 }
1357
1358 void ide_dma_off_quietly(ide_drive_t *);
1359 void ide_dma_off(ide_drive_t *);
1360 void ide_dma_on(ide_drive_t *);
1361 int ide_set_dma(ide_drive_t *);
1362 void ide_check_dma_crc(ide_drive_t *);
1363 ide_startstop_t ide_dma_intr(ide_drive_t *);
1364
1365 int ide_allocate_dma_engine(ide_hwif_t *);
1366 void ide_release_dma_engine(ide_hwif_t *);
1367
1368 int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
1369 void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
1370
1371 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1372 int config_drive_for_dma(ide_drive_t *);
1373 int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
1374 void ide_dma_host_set(ide_drive_t *, int);
1375 int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
1376 extern void ide_dma_start(ide_drive_t *);
1377 int ide_dma_end(ide_drive_t *);
1378 int ide_dma_test_irq(ide_drive_t *);
1379 int ide_dma_sff_timer_expiry(ide_drive_t *);
1380 u8 ide_dma_sff_read_status(ide_hwif_t *);
1381 extern const struct ide_dma_ops sff_dma_ops;
1382 #else
1383 static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
1384 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1385
1386 void ide_dma_lost_irq(ide_drive_t *);
1387 ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
1388
1389 #else
1390 static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
1391 static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
1392 static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
1393 static inline void ide_dma_off(ide_drive_t *drive) { ; }
1394 static inline void ide_dma_on(ide_drive_t *drive) { ; }
1395 static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1396 static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
1397 static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1398 static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
1399 static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
1400 static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1401 static inline int ide_dma_prepare(ide_drive_t *drive,
1402 struct ide_cmd *cmd) { return 1; }
1403 static inline void ide_dma_unmap_sg(ide_drive_t *drive,
1404 struct ide_cmd *cmd) { ; }
1405 #endif /* CONFIG_BLK_DEV_IDEDMA */
1406
1407 #ifdef CONFIG_BLK_DEV_IDEACPI
1408 int ide_acpi_init(void);
1409 bool ide_port_acpi(ide_hwif_t *hwif);
1410 extern int ide_acpi_exec_tfs(ide_drive_t *drive);
1411 extern void ide_acpi_get_timing(ide_hwif_t *hwif);
1412 extern void ide_acpi_push_timing(ide_hwif_t *hwif);
1413 void ide_acpi_init_port(ide_hwif_t *);
1414 void ide_acpi_port_init_devices(ide_hwif_t *);
1415 extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
1416 #else
1417 static inline int ide_acpi_init(void) { return 0; }
1418 static inline bool ide_port_acpi(ide_hwif_t *hwif) { return 0; }
1419 static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
1420 static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
1421 static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
1422 static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
1423 static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
1424 static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
1425 #endif
1426
1427 void ide_register_region(struct gendisk *);
1428 void ide_unregister_region(struct gendisk *);
1429
1430 void ide_check_nien_quirk_list(ide_drive_t *);
1431 void ide_undecoded_slave(ide_drive_t *);
1432
1433 void ide_port_apply_params(ide_hwif_t *);
1434 int ide_sysfs_register_port(ide_hwif_t *);
1435
1436 struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
1437 unsigned int);
1438 void ide_host_free(struct ide_host *);
1439 int ide_host_register(struct ide_host *, const struct ide_port_info *,
1440 struct ide_hw **);
1441 int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
1442 struct ide_host **);
1443 void ide_host_remove(struct ide_host *);
1444 int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
1445 void ide_port_unregister_devices(ide_hwif_t *);
1446 void ide_port_scan(ide_hwif_t *);
1447
1448 static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
1449 {
1450 return hwif->hwif_data;
1451 }
1452
1453 static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
1454 {
1455 hwif->hwif_data = data;
1456 }
1457
1458 extern void ide_toggle_bounce(ide_drive_t *drive, int on);
1459
1460 u64 ide_get_lba_addr(struct ide_cmd *, int);
1461 u8 ide_dump_status(ide_drive_t *, const char *, u8);
1462
1463 struct ide_timing {
1464 u8 mode;
1465 u8 setup; /* t1 */
1466 u16 act8b; /* t2 for 8-bit io */
1467 u16 rec8b; /* t2i for 8-bit io */
1468 u16 cyc8b; /* t0 for 8-bit io */
1469 u16 active; /* t2 or tD */
1470 u16 recover; /* t2i or tK */
1471 u16 cycle; /* t0 */
1472 u16 udma; /* t2CYCTYP/2 */
1473 };
1474
1475 enum {
1476 IDE_TIMING_SETUP = (1 << 0),
1477 IDE_TIMING_ACT8B = (1 << 1),
1478 IDE_TIMING_REC8B = (1 << 2),
1479 IDE_TIMING_CYC8B = (1 << 3),
1480 IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
1481 IDE_TIMING_CYC8B,
1482 IDE_TIMING_ACTIVE = (1 << 4),
1483 IDE_TIMING_RECOVER = (1 << 5),
1484 IDE_TIMING_CYCLE = (1 << 6),
1485 IDE_TIMING_UDMA = (1 << 7),
1486 IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
1487 IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
1488 IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
1489 };
1490
1491 struct ide_timing *ide_timing_find_mode(u8);
1492 u16 ide_pio_cycle_time(ide_drive_t *, u8);
1493 void ide_timing_merge(struct ide_timing *, struct ide_timing *,
1494 struct ide_timing *, unsigned int);
1495 int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
1496
1497 #ifdef CONFIG_IDE_XFER_MODE
1498 int ide_scan_pio_blacklist(char *);
1499 const char *ide_xfer_verbose(u8);
1500 int ide_pio_need_iordy(ide_drive_t *, const u8);
1501 int ide_set_pio_mode(ide_drive_t *, u8);
1502 int ide_set_dma_mode(ide_drive_t *, u8);
1503 void ide_set_pio(ide_drive_t *, u8);
1504 int ide_set_xfer_rate(ide_drive_t *, u8);
1505 #else
1506 static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
1507 static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
1508 #endif
1509
1510 static inline void ide_set_max_pio(ide_drive_t *drive)
1511 {
1512 ide_set_pio(drive, 255);
1513 }
1514
1515 char *ide_media_string(ide_drive_t *);
1516
1517 extern struct device_attribute ide_dev_attrs[];
1518 extern struct bus_type ide_bus_type;
1519 extern struct class *ide_port_class;
1520
1521 static inline void ide_dump_identify(u8 *id)
1522 {
1523 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
1524 }
1525
1526 static inline int hwif_to_node(ide_hwif_t *hwif)
1527 {
1528 return hwif->dev ? dev_to_node(hwif->dev) : -1;
1529 }
1530
1531 static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
1532 {
1533 ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
1534
1535 return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
1536 }
1537
1538 static inline void *ide_get_drivedata(ide_drive_t *drive)
1539 {
1540 return drive->drive_data;
1541 }
1542
1543 static inline void ide_set_drivedata(ide_drive_t *drive, void *data)
1544 {
1545 drive->drive_data = data;
1546 }
1547
1548 #define ide_port_for_each_dev(i, dev, port) \
1549 for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
1550
1551 #define ide_port_for_each_present_dev(i, dev, port) \
1552 for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
1553 if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
1554
1555 #define ide_host_for_each_port(i, port, host) \
1556 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
1557
1558 #endif /* _IDE_H */