[libata] ahci: Match PCI class code for AHCI
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / ahci.c
1 /*
2 * ahci.c - AHCI SATA support
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2004-2005 Red Hat, Inc.
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 *
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
28 *
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
32 *
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
48 #include <asm/io.h>
49
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
52
53
54 enum {
55 AHCI_PCI_BAR = 5,
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
59 AHCI_MAX_CMDS = 32,
60 AHCI_CMD_SZ = 32,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
62 AHCI_RX_FIS_SZ = 256,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
68 AHCI_RX_FIS_SZ,
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
75
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
78
79 board_ahci = 0,
80 board_ahci_vt8251 = 1,
81 board_ahci_ign_iferr = 2,
82
83 /* global controller registers */
84 HOST_CAP = 0x00, /* host capabilities */
85 HOST_CTL = 0x04, /* global host control */
86 HOST_IRQ_STAT = 0x08, /* interrupt status */
87 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
88 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
89
90 /* HOST_CTL bits */
91 HOST_RESET = (1 << 0), /* reset controller; self-clear */
92 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
93 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
94
95 /* HOST_CAP bits */
96 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
97 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
98 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
99 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
100 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
101
102 /* registers for each SATA port */
103 PORT_LST_ADDR = 0x00, /* command list DMA addr */
104 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
105 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
106 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
107 PORT_IRQ_STAT = 0x10, /* interrupt status */
108 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
109 PORT_CMD = 0x18, /* port command */
110 PORT_TFDATA = 0x20, /* taskfile data */
111 PORT_SIG = 0x24, /* device TF signature */
112 PORT_CMD_ISSUE = 0x38, /* command issue */
113 PORT_SCR = 0x28, /* SATA phy register block */
114 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
115 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
116 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
117 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
118
119 /* PORT_IRQ_{STAT,MASK} bits */
120 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
121 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
122 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
123 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
124 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
125 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
126 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
127 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
128
129 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
130 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
131 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
132 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
133 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
134 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
135 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
136 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
137 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
138
139 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
140 PORT_IRQ_IF_ERR |
141 PORT_IRQ_CONNECT |
142 PORT_IRQ_PHYRDY |
143 PORT_IRQ_UNK_FIS,
144 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
145 PORT_IRQ_TF_ERR |
146 PORT_IRQ_HBUS_DATA_ERR,
147 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
148 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
149 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
150
151 /* PORT_CMD bits */
152 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
153 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
154 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
155 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
156 PORT_CMD_CLO = (1 << 3), /* Command list override */
157 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
158 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
159 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
160
161 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
162 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
163 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
164 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
165
166 /* hpriv->flags bits */
167 AHCI_FLAG_MSI = (1 << 0),
168
169 /* ap->flags bits */
170 AHCI_FLAG_NO_NCQ = (1 << 24),
171 AHCI_FLAG_IGN_IRQ_IF_ERR = (1 << 25), /* ignore IRQ_IF_ERR */
172 };
173
174 struct ahci_cmd_hdr {
175 u32 opts;
176 u32 status;
177 u32 tbl_addr;
178 u32 tbl_addr_hi;
179 u32 reserved[4];
180 };
181
182 struct ahci_sg {
183 u32 addr;
184 u32 addr_hi;
185 u32 reserved;
186 u32 flags_size;
187 };
188
189 struct ahci_host_priv {
190 unsigned long flags;
191 u32 cap; /* cache of HOST_CAP register */
192 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
193 };
194
195 struct ahci_port_priv {
196 struct ahci_cmd_hdr *cmd_slot;
197 dma_addr_t cmd_slot_dma;
198 void *cmd_tbl;
199 dma_addr_t cmd_tbl_dma;
200 void *rx_fis;
201 dma_addr_t rx_fis_dma;
202 };
203
204 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
205 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
206 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
207 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
208 static irqreturn_t ahci_interrupt (int irq, void *dev_instance);
209 static void ahci_irq_clear(struct ata_port *ap);
210 static int ahci_port_start(struct ata_port *ap);
211 static void ahci_port_stop(struct ata_port *ap);
212 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
213 static void ahci_qc_prep(struct ata_queued_cmd *qc);
214 static u8 ahci_check_status(struct ata_port *ap);
215 static void ahci_freeze(struct ata_port *ap);
216 static void ahci_thaw(struct ata_port *ap);
217 static void ahci_error_handler(struct ata_port *ap);
218 static void ahci_vt8251_error_handler(struct ata_port *ap);
219 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
220 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
221 static int ahci_port_resume(struct ata_port *ap);
222 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
223 static int ahci_pci_device_resume(struct pci_dev *pdev);
224 static void ahci_remove_one (struct pci_dev *pdev);
225
226 static struct scsi_host_template ahci_sht = {
227 .module = THIS_MODULE,
228 .name = DRV_NAME,
229 .ioctl = ata_scsi_ioctl,
230 .queuecommand = ata_scsi_queuecmd,
231 .change_queue_depth = ata_scsi_change_queue_depth,
232 .can_queue = AHCI_MAX_CMDS - 1,
233 .this_id = ATA_SHT_THIS_ID,
234 .sg_tablesize = AHCI_MAX_SG,
235 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
236 .emulated = ATA_SHT_EMULATED,
237 .use_clustering = AHCI_USE_CLUSTERING,
238 .proc_name = DRV_NAME,
239 .dma_boundary = AHCI_DMA_BOUNDARY,
240 .slave_configure = ata_scsi_slave_config,
241 .slave_destroy = ata_scsi_slave_destroy,
242 .bios_param = ata_std_bios_param,
243 .suspend = ata_scsi_device_suspend,
244 .resume = ata_scsi_device_resume,
245 };
246
247 static const struct ata_port_operations ahci_ops = {
248 .port_disable = ata_port_disable,
249
250 .check_status = ahci_check_status,
251 .check_altstatus = ahci_check_status,
252 .dev_select = ata_noop_dev_select,
253
254 .tf_read = ahci_tf_read,
255
256 .qc_prep = ahci_qc_prep,
257 .qc_issue = ahci_qc_issue,
258
259 .irq_handler = ahci_interrupt,
260 .irq_clear = ahci_irq_clear,
261
262 .scr_read = ahci_scr_read,
263 .scr_write = ahci_scr_write,
264
265 .freeze = ahci_freeze,
266 .thaw = ahci_thaw,
267
268 .error_handler = ahci_error_handler,
269 .post_internal_cmd = ahci_post_internal_cmd,
270
271 .port_suspend = ahci_port_suspend,
272 .port_resume = ahci_port_resume,
273
274 .port_start = ahci_port_start,
275 .port_stop = ahci_port_stop,
276 };
277
278 static const struct ata_port_operations ahci_vt8251_ops = {
279 .port_disable = ata_port_disable,
280
281 .check_status = ahci_check_status,
282 .check_altstatus = ahci_check_status,
283 .dev_select = ata_noop_dev_select,
284
285 .tf_read = ahci_tf_read,
286
287 .qc_prep = ahci_qc_prep,
288 .qc_issue = ahci_qc_issue,
289
290 .irq_handler = ahci_interrupt,
291 .irq_clear = ahci_irq_clear,
292
293 .scr_read = ahci_scr_read,
294 .scr_write = ahci_scr_write,
295
296 .freeze = ahci_freeze,
297 .thaw = ahci_thaw,
298
299 .error_handler = ahci_vt8251_error_handler,
300 .post_internal_cmd = ahci_post_internal_cmd,
301
302 .port_suspend = ahci_port_suspend,
303 .port_resume = ahci_port_resume,
304
305 .port_start = ahci_port_start,
306 .port_stop = ahci_port_stop,
307 };
308
309 static const struct ata_port_info ahci_port_info[] = {
310 /* board_ahci */
311 {
312 .sht = &ahci_sht,
313 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
314 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
315 ATA_FLAG_SKIP_D2H_BSY,
316 .pio_mask = 0x1f, /* pio0-4 */
317 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
318 .port_ops = &ahci_ops,
319 },
320 /* board_ahci_vt8251 */
321 {
322 .sht = &ahci_sht,
323 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
324 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
325 ATA_FLAG_SKIP_D2H_BSY |
326 ATA_FLAG_HRST_TO_RESUME | AHCI_FLAG_NO_NCQ,
327 .pio_mask = 0x1f, /* pio0-4 */
328 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
329 .port_ops = &ahci_vt8251_ops,
330 },
331 /* board_ahci_ign_iferr */
332 {
333 .sht = &ahci_sht,
334 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
335 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
336 ATA_FLAG_SKIP_D2H_BSY |
337 AHCI_FLAG_IGN_IRQ_IF_ERR,
338 .pio_mask = 0x1f, /* pio0-4 */
339 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
340 .port_ops = &ahci_ops,
341 },
342 };
343
344 static const struct pci_device_id ahci_pci_tbl[] = {
345 /* Intel */
346 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
347 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
348 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
349 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
350 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
351 { PCI_VDEVICE(AL, 0x5288), board_ahci }, /* ULi M5288 */
352 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
353 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
354 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
355 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
356 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
357 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
358 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
359 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
360 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
361 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
362 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
363 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
364 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
365 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
366 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
367 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
368 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
369 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
370 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
371 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
372
373 /* JMicron */
374 { PCI_VDEVICE(JMICRON, 0x2360), board_ahci_ign_iferr }, /* JMB360 */
375 { PCI_VDEVICE(JMICRON, 0x2361), board_ahci_ign_iferr }, /* JMB361 */
376 { PCI_VDEVICE(JMICRON, 0x2363), board_ahci_ign_iferr }, /* JMB363 */
377 { PCI_VDEVICE(JMICRON, 0x2365), board_ahci_ign_iferr }, /* JMB365 */
378 { PCI_VDEVICE(JMICRON, 0x2366), board_ahci_ign_iferr }, /* JMB366 */
379
380 /* ATI */
381 { PCI_VDEVICE(ATI, 0x4380), board_ahci }, /* ATI SB600 non-raid */
382 { PCI_VDEVICE(ATI, 0x4381), board_ahci }, /* ATI SB600 raid */
383
384 /* VIA */
385 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
386
387 /* NVIDIA */
388 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
389 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
390 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
391 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
392 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
393 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
394 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
395 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
396 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
397 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
398 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
399 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
400
401 /* SiS */
402 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
403 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
404 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
405
406 /* Generic, PCI class code for AHCI */
407 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
408 0x010601, 0xffffff, board_ahci },
409
410 { } /* terminate list */
411 };
412
413
414 static struct pci_driver ahci_pci_driver = {
415 .name = DRV_NAME,
416 .id_table = ahci_pci_tbl,
417 .probe = ahci_init_one,
418 .suspend = ahci_pci_device_suspend,
419 .resume = ahci_pci_device_resume,
420 .remove = ahci_remove_one,
421 };
422
423
424 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
425 {
426 return base + 0x100 + (port * 0x80);
427 }
428
429 static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
430 {
431 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
432 }
433
434 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
435 {
436 unsigned int sc_reg;
437
438 switch (sc_reg_in) {
439 case SCR_STATUS: sc_reg = 0; break;
440 case SCR_CONTROL: sc_reg = 1; break;
441 case SCR_ERROR: sc_reg = 2; break;
442 case SCR_ACTIVE: sc_reg = 3; break;
443 default:
444 return 0xffffffffU;
445 }
446
447 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
448 }
449
450
451 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
452 u32 val)
453 {
454 unsigned int sc_reg;
455
456 switch (sc_reg_in) {
457 case SCR_STATUS: sc_reg = 0; break;
458 case SCR_CONTROL: sc_reg = 1; break;
459 case SCR_ERROR: sc_reg = 2; break;
460 case SCR_ACTIVE: sc_reg = 3; break;
461 default:
462 return;
463 }
464
465 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
466 }
467
468 static void ahci_start_engine(void __iomem *port_mmio)
469 {
470 u32 tmp;
471
472 /* start DMA */
473 tmp = readl(port_mmio + PORT_CMD);
474 tmp |= PORT_CMD_START;
475 writel(tmp, port_mmio + PORT_CMD);
476 readl(port_mmio + PORT_CMD); /* flush */
477 }
478
479 static int ahci_stop_engine(void __iomem *port_mmio)
480 {
481 u32 tmp;
482
483 tmp = readl(port_mmio + PORT_CMD);
484
485 /* check if the HBA is idle */
486 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
487 return 0;
488
489 /* setting HBA to idle */
490 tmp &= ~PORT_CMD_START;
491 writel(tmp, port_mmio + PORT_CMD);
492
493 /* wait for engine to stop. This could be as long as 500 msec */
494 tmp = ata_wait_register(port_mmio + PORT_CMD,
495 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
496 if (tmp & PORT_CMD_LIST_ON)
497 return -EIO;
498
499 return 0;
500 }
501
502 static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
503 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
504 {
505 u32 tmp;
506
507 /* set FIS registers */
508 if (cap & HOST_CAP_64)
509 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
510 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
511
512 if (cap & HOST_CAP_64)
513 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
514 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
515
516 /* enable FIS reception */
517 tmp = readl(port_mmio + PORT_CMD);
518 tmp |= PORT_CMD_FIS_RX;
519 writel(tmp, port_mmio + PORT_CMD);
520
521 /* flush */
522 readl(port_mmio + PORT_CMD);
523 }
524
525 static int ahci_stop_fis_rx(void __iomem *port_mmio)
526 {
527 u32 tmp;
528
529 /* disable FIS reception */
530 tmp = readl(port_mmio + PORT_CMD);
531 tmp &= ~PORT_CMD_FIS_RX;
532 writel(tmp, port_mmio + PORT_CMD);
533
534 /* wait for completion, spec says 500ms, give it 1000 */
535 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
536 PORT_CMD_FIS_ON, 10, 1000);
537 if (tmp & PORT_CMD_FIS_ON)
538 return -EBUSY;
539
540 return 0;
541 }
542
543 static void ahci_power_up(void __iomem *port_mmio, u32 cap)
544 {
545 u32 cmd;
546
547 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
548
549 /* spin up device */
550 if (cap & HOST_CAP_SSS) {
551 cmd |= PORT_CMD_SPIN_UP;
552 writel(cmd, port_mmio + PORT_CMD);
553 }
554
555 /* wake up link */
556 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
557 }
558
559 static void ahci_power_down(void __iomem *port_mmio, u32 cap)
560 {
561 u32 cmd, scontrol;
562
563 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
564
565 if (cap & HOST_CAP_SSC) {
566 /* enable transitions to slumber mode */
567 scontrol = readl(port_mmio + PORT_SCR_CTL);
568 if ((scontrol & 0x0f00) > 0x100) {
569 scontrol &= ~0xf00;
570 writel(scontrol, port_mmio + PORT_SCR_CTL);
571 }
572
573 /* put device into slumber mode */
574 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
575
576 /* wait for the transition to complete */
577 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
578 PORT_CMD_ICC_SLUMBER, 1, 50);
579 }
580
581 /* put device into listen mode */
582 if (cap & HOST_CAP_SSS) {
583 /* first set PxSCTL.DET to 0 */
584 scontrol = readl(port_mmio + PORT_SCR_CTL);
585 scontrol &= ~0xf;
586 writel(scontrol, port_mmio + PORT_SCR_CTL);
587
588 /* then set PxCMD.SUD to 0 */
589 cmd &= ~PORT_CMD_SPIN_UP;
590 writel(cmd, port_mmio + PORT_CMD);
591 }
592 }
593
594 static void ahci_init_port(void __iomem *port_mmio, u32 cap,
595 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
596 {
597 /* power up */
598 ahci_power_up(port_mmio, cap);
599
600 /* enable FIS reception */
601 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
602
603 /* enable DMA */
604 ahci_start_engine(port_mmio);
605 }
606
607 static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
608 {
609 int rc;
610
611 /* disable DMA */
612 rc = ahci_stop_engine(port_mmio);
613 if (rc) {
614 *emsg = "failed to stop engine";
615 return rc;
616 }
617
618 /* disable FIS reception */
619 rc = ahci_stop_fis_rx(port_mmio);
620 if (rc) {
621 *emsg = "failed stop FIS RX";
622 return rc;
623 }
624
625 /* put device into slumber mode */
626 ahci_power_down(port_mmio, cap);
627
628 return 0;
629 }
630
631 static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
632 {
633 u32 cap_save, tmp;
634
635 cap_save = readl(mmio + HOST_CAP);
636 cap_save &= ( (1<<28) | (1<<17) );
637 cap_save |= (1 << 27);
638
639 /* global controller reset */
640 tmp = readl(mmio + HOST_CTL);
641 if ((tmp & HOST_RESET) == 0) {
642 writel(tmp | HOST_RESET, mmio + HOST_CTL);
643 readl(mmio + HOST_CTL); /* flush */
644 }
645
646 /* reset must complete within 1 second, or
647 * the hardware should be considered fried.
648 */
649 ssleep(1);
650
651 tmp = readl(mmio + HOST_CTL);
652 if (tmp & HOST_RESET) {
653 dev_printk(KERN_ERR, &pdev->dev,
654 "controller reset failed (0x%x)\n", tmp);
655 return -EIO;
656 }
657
658 writel(HOST_AHCI_EN, mmio + HOST_CTL);
659 (void) readl(mmio + HOST_CTL); /* flush */
660 writel(cap_save, mmio + HOST_CAP);
661 writel(0xf, mmio + HOST_PORTS_IMPL);
662 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
663
664 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
665 u16 tmp16;
666
667 /* configure PCS */
668 pci_read_config_word(pdev, 0x92, &tmp16);
669 tmp16 |= 0xf;
670 pci_write_config_word(pdev, 0x92, tmp16);
671 }
672
673 return 0;
674 }
675
676 static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
677 int n_ports, u32 cap)
678 {
679 int i, rc;
680 u32 tmp;
681
682 for (i = 0; i < n_ports; i++) {
683 void __iomem *port_mmio = ahci_port_base(mmio, i);
684 const char *emsg = NULL;
685
686 #if 0 /* BIOSen initialize this incorrectly */
687 if (!(hpriv->port_map & (1 << i)))
688 continue;
689 #endif
690
691 /* make sure port is not active */
692 rc = ahci_deinit_port(port_mmio, cap, &emsg);
693 if (rc)
694 dev_printk(KERN_WARNING, &pdev->dev,
695 "%s (%d)\n", emsg, rc);
696
697 /* clear SError */
698 tmp = readl(port_mmio + PORT_SCR_ERR);
699 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
700 writel(tmp, port_mmio + PORT_SCR_ERR);
701
702 /* clear port IRQ */
703 tmp = readl(port_mmio + PORT_IRQ_STAT);
704 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
705 if (tmp)
706 writel(tmp, port_mmio + PORT_IRQ_STAT);
707
708 writel(1 << i, mmio + HOST_IRQ_STAT);
709 }
710
711 tmp = readl(mmio + HOST_CTL);
712 VPRINTK("HOST_CTL 0x%x\n", tmp);
713 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
714 tmp = readl(mmio + HOST_CTL);
715 VPRINTK("HOST_CTL 0x%x\n", tmp);
716 }
717
718 static unsigned int ahci_dev_classify(struct ata_port *ap)
719 {
720 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
721 struct ata_taskfile tf;
722 u32 tmp;
723
724 tmp = readl(port_mmio + PORT_SIG);
725 tf.lbah = (tmp >> 24) & 0xff;
726 tf.lbam = (tmp >> 16) & 0xff;
727 tf.lbal = (tmp >> 8) & 0xff;
728 tf.nsect = (tmp) & 0xff;
729
730 return ata_dev_classify(&tf);
731 }
732
733 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
734 u32 opts)
735 {
736 dma_addr_t cmd_tbl_dma;
737
738 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
739
740 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
741 pp->cmd_slot[tag].status = 0;
742 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
743 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
744 }
745
746 static int ahci_clo(struct ata_port *ap)
747 {
748 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
749 struct ahci_host_priv *hpriv = ap->host->private_data;
750 u32 tmp;
751
752 if (!(hpriv->cap & HOST_CAP_CLO))
753 return -EOPNOTSUPP;
754
755 tmp = readl(port_mmio + PORT_CMD);
756 tmp |= PORT_CMD_CLO;
757 writel(tmp, port_mmio + PORT_CMD);
758
759 tmp = ata_wait_register(port_mmio + PORT_CMD,
760 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
761 if (tmp & PORT_CMD_CLO)
762 return -EIO;
763
764 return 0;
765 }
766
767 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
768 {
769 struct ahci_port_priv *pp = ap->private_data;
770 void __iomem *mmio = ap->host->mmio_base;
771 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
772 const u32 cmd_fis_len = 5; /* five dwords */
773 const char *reason = NULL;
774 struct ata_taskfile tf;
775 u32 tmp;
776 u8 *fis;
777 int rc;
778
779 DPRINTK("ENTER\n");
780
781 if (ata_port_offline(ap)) {
782 DPRINTK("PHY reports no device\n");
783 *class = ATA_DEV_NONE;
784 return 0;
785 }
786
787 /* prepare for SRST (AHCI-1.1 10.4.1) */
788 rc = ahci_stop_engine(port_mmio);
789 if (rc) {
790 reason = "failed to stop engine";
791 goto fail_restart;
792 }
793
794 /* check BUSY/DRQ, perform Command List Override if necessary */
795 if (ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ)) {
796 rc = ahci_clo(ap);
797
798 if (rc == -EOPNOTSUPP) {
799 reason = "port busy but CLO unavailable";
800 goto fail_restart;
801 } else if (rc) {
802 reason = "port busy but CLO failed";
803 goto fail_restart;
804 }
805 }
806
807 /* restart engine */
808 ahci_start_engine(port_mmio);
809
810 ata_tf_init(ap->device, &tf);
811 fis = pp->cmd_tbl;
812
813 /* issue the first D2H Register FIS */
814 ahci_fill_cmd_slot(pp, 0,
815 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
816
817 tf.ctl |= ATA_SRST;
818 ata_tf_to_fis(&tf, fis, 0);
819 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
820
821 writel(1, port_mmio + PORT_CMD_ISSUE);
822
823 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
824 if (tmp & 0x1) {
825 rc = -EIO;
826 reason = "1st FIS failed";
827 goto fail;
828 }
829
830 /* spec says at least 5us, but be generous and sleep for 1ms */
831 msleep(1);
832
833 /* issue the second D2H Register FIS */
834 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
835
836 tf.ctl &= ~ATA_SRST;
837 ata_tf_to_fis(&tf, fis, 0);
838 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
839
840 writel(1, port_mmio + PORT_CMD_ISSUE);
841 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
842
843 /* spec mandates ">= 2ms" before checking status.
844 * We wait 150ms, because that was the magic delay used for
845 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
846 * between when the ATA command register is written, and then
847 * status is checked. Because waiting for "a while" before
848 * checking status is fine, post SRST, we perform this magic
849 * delay here as well.
850 */
851 msleep(150);
852
853 *class = ATA_DEV_NONE;
854 if (ata_port_online(ap)) {
855 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
856 rc = -EIO;
857 reason = "device not ready";
858 goto fail;
859 }
860 *class = ahci_dev_classify(ap);
861 }
862
863 DPRINTK("EXIT, class=%u\n", *class);
864 return 0;
865
866 fail_restart:
867 ahci_start_engine(port_mmio);
868 fail:
869 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
870 return rc;
871 }
872
873 static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
874 {
875 struct ahci_port_priv *pp = ap->private_data;
876 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
877 struct ata_taskfile tf;
878 void __iomem *mmio = ap->host->mmio_base;
879 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
880 int rc;
881
882 DPRINTK("ENTER\n");
883
884 ahci_stop_engine(port_mmio);
885
886 /* clear D2H reception area to properly wait for D2H FIS */
887 ata_tf_init(ap->device, &tf);
888 tf.command = 0xff;
889 ata_tf_to_fis(&tf, d2h_fis, 0);
890
891 rc = sata_std_hardreset(ap, class);
892
893 ahci_start_engine(port_mmio);
894
895 if (rc == 0 && ata_port_online(ap))
896 *class = ahci_dev_classify(ap);
897 if (*class == ATA_DEV_UNKNOWN)
898 *class = ATA_DEV_NONE;
899
900 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
901 return rc;
902 }
903
904 static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
905 {
906 void __iomem *mmio = ap->host->mmio_base;
907 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
908 int rc;
909
910 DPRINTK("ENTER\n");
911
912 ahci_stop_engine(port_mmio);
913
914 rc = sata_port_hardreset(ap, sata_ehc_deb_timing(&ap->eh_context));
915
916 /* vt8251 needs SError cleared for the port to operate */
917 ahci_scr_write(ap, SCR_ERROR, ahci_scr_read(ap, SCR_ERROR));
918
919 ahci_start_engine(port_mmio);
920
921 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
922
923 /* vt8251 doesn't clear BSY on signature FIS reception,
924 * request follow-up softreset.
925 */
926 return rc ?: -EAGAIN;
927 }
928
929 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
930 {
931 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
932 u32 new_tmp, tmp;
933
934 ata_std_postreset(ap, class);
935
936 /* Make sure port's ATAPI bit is set appropriately */
937 new_tmp = tmp = readl(port_mmio + PORT_CMD);
938 if (*class == ATA_DEV_ATAPI)
939 new_tmp |= PORT_CMD_ATAPI;
940 else
941 new_tmp &= ~PORT_CMD_ATAPI;
942 if (new_tmp != tmp) {
943 writel(new_tmp, port_mmio + PORT_CMD);
944 readl(port_mmio + PORT_CMD); /* flush */
945 }
946 }
947
948 static u8 ahci_check_status(struct ata_port *ap)
949 {
950 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
951
952 return readl(mmio + PORT_TFDATA) & 0xFF;
953 }
954
955 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
956 {
957 struct ahci_port_priv *pp = ap->private_data;
958 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
959
960 ata_tf_from_fis(d2h_fis, tf);
961 }
962
963 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
964 {
965 struct scatterlist *sg;
966 struct ahci_sg *ahci_sg;
967 unsigned int n_sg = 0;
968
969 VPRINTK("ENTER\n");
970
971 /*
972 * Next, the S/G list.
973 */
974 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
975 ata_for_each_sg(sg, qc) {
976 dma_addr_t addr = sg_dma_address(sg);
977 u32 sg_len = sg_dma_len(sg);
978
979 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
980 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
981 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
982
983 ahci_sg++;
984 n_sg++;
985 }
986
987 return n_sg;
988 }
989
990 static void ahci_qc_prep(struct ata_queued_cmd *qc)
991 {
992 struct ata_port *ap = qc->ap;
993 struct ahci_port_priv *pp = ap->private_data;
994 int is_atapi = is_atapi_taskfile(&qc->tf);
995 void *cmd_tbl;
996 u32 opts;
997 const u32 cmd_fis_len = 5; /* five dwords */
998 unsigned int n_elem;
999
1000 /*
1001 * Fill in command table information. First, the header,
1002 * a SATA Register - Host to Device command FIS.
1003 */
1004 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1005
1006 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
1007 if (is_atapi) {
1008 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1009 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1010 }
1011
1012 n_elem = 0;
1013 if (qc->flags & ATA_QCFLAG_DMAMAP)
1014 n_elem = ahci_fill_sg(qc, cmd_tbl);
1015
1016 /*
1017 * Fill in command slot information.
1018 */
1019 opts = cmd_fis_len | n_elem << 16;
1020 if (qc->tf.flags & ATA_TFLAG_WRITE)
1021 opts |= AHCI_CMD_WRITE;
1022 if (is_atapi)
1023 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1024
1025 ahci_fill_cmd_slot(pp, qc->tag, opts);
1026 }
1027
1028 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1029 {
1030 struct ahci_port_priv *pp = ap->private_data;
1031 struct ata_eh_info *ehi = &ap->eh_info;
1032 unsigned int err_mask = 0, action = 0;
1033 struct ata_queued_cmd *qc;
1034 u32 serror;
1035
1036 ata_ehi_clear_desc(ehi);
1037
1038 /* AHCI needs SError cleared; otherwise, it might lock up */
1039 serror = ahci_scr_read(ap, SCR_ERROR);
1040 ahci_scr_write(ap, SCR_ERROR, serror);
1041
1042 /* analyze @irq_stat */
1043 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
1044
1045 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1046 if (ap->flags & AHCI_FLAG_IGN_IRQ_IF_ERR)
1047 irq_stat &= ~PORT_IRQ_IF_ERR;
1048
1049 if (irq_stat & PORT_IRQ_TF_ERR)
1050 err_mask |= AC_ERR_DEV;
1051
1052 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1053 err_mask |= AC_ERR_HOST_BUS;
1054 action |= ATA_EH_SOFTRESET;
1055 }
1056
1057 if (irq_stat & PORT_IRQ_IF_ERR) {
1058 err_mask |= AC_ERR_ATA_BUS;
1059 action |= ATA_EH_SOFTRESET;
1060 ata_ehi_push_desc(ehi, ", interface fatal error");
1061 }
1062
1063 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1064 ata_ehi_hotplugged(ehi);
1065 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
1066 "connection status changed" : "PHY RDY changed");
1067 }
1068
1069 if (irq_stat & PORT_IRQ_UNK_FIS) {
1070 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1071
1072 err_mask |= AC_ERR_HSM;
1073 action |= ATA_EH_SOFTRESET;
1074 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
1075 unk[0], unk[1], unk[2], unk[3]);
1076 }
1077
1078 /* okay, let's hand over to EH */
1079 ehi->serror |= serror;
1080 ehi->action |= action;
1081
1082 qc = ata_qc_from_tag(ap, ap->active_tag);
1083 if (qc)
1084 qc->err_mask |= err_mask;
1085 else
1086 ehi->err_mask |= err_mask;
1087
1088 if (irq_stat & PORT_IRQ_FREEZE)
1089 ata_port_freeze(ap);
1090 else
1091 ata_port_abort(ap);
1092 }
1093
1094 static void ahci_host_intr(struct ata_port *ap)
1095 {
1096 void __iomem *mmio = ap->host->mmio_base;
1097 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1098 struct ata_eh_info *ehi = &ap->eh_info;
1099 u32 status, qc_active;
1100 int rc;
1101
1102 status = readl(port_mmio + PORT_IRQ_STAT);
1103 writel(status, port_mmio + PORT_IRQ_STAT);
1104
1105 if (unlikely(status & PORT_IRQ_ERROR)) {
1106 ahci_error_intr(ap, status);
1107 return;
1108 }
1109
1110 if (ap->sactive)
1111 qc_active = readl(port_mmio + PORT_SCR_ACT);
1112 else
1113 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1114
1115 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
1116 if (rc > 0)
1117 return;
1118 if (rc < 0) {
1119 ehi->err_mask |= AC_ERR_HSM;
1120 ehi->action |= ATA_EH_SOFTRESET;
1121 ata_port_freeze(ap);
1122 return;
1123 }
1124
1125 /* hmmm... a spurious interupt */
1126
1127 /* some devices send D2H reg with I bit set during NCQ command phase */
1128 if (ap->sactive && (status & PORT_IRQ_D2H_REG_FIS))
1129 return;
1130
1131 /* ignore interim PIO setup fis interrupts */
1132 if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
1133 return;
1134
1135 if (ata_ratelimit())
1136 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
1137 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
1138 status, ap->active_tag, ap->sactive);
1139 }
1140
1141 static void ahci_irq_clear(struct ata_port *ap)
1142 {
1143 /* TODO */
1144 }
1145
1146 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1147 {
1148 struct ata_host *host = dev_instance;
1149 struct ahci_host_priv *hpriv;
1150 unsigned int i, handled = 0;
1151 void __iomem *mmio;
1152 u32 irq_stat, irq_ack = 0;
1153
1154 VPRINTK("ENTER\n");
1155
1156 hpriv = host->private_data;
1157 mmio = host->mmio_base;
1158
1159 /* sigh. 0xffffffff is a valid return from h/w */
1160 irq_stat = readl(mmio + HOST_IRQ_STAT);
1161 irq_stat &= hpriv->port_map;
1162 if (!irq_stat)
1163 return IRQ_NONE;
1164
1165 spin_lock(&host->lock);
1166
1167 for (i = 0; i < host->n_ports; i++) {
1168 struct ata_port *ap;
1169
1170 if (!(irq_stat & (1 << i)))
1171 continue;
1172
1173 ap = host->ports[i];
1174 if (ap) {
1175 ahci_host_intr(ap);
1176 VPRINTK("port %u\n", i);
1177 } else {
1178 VPRINTK("port %u (no irq)\n", i);
1179 if (ata_ratelimit())
1180 dev_printk(KERN_WARNING, host->dev,
1181 "interrupt on disabled port %u\n", i);
1182 }
1183
1184 irq_ack |= (1 << i);
1185 }
1186
1187 if (irq_ack) {
1188 writel(irq_ack, mmio + HOST_IRQ_STAT);
1189 handled = 1;
1190 }
1191
1192 spin_unlock(&host->lock);
1193
1194 VPRINTK("EXIT\n");
1195
1196 return IRQ_RETVAL(handled);
1197 }
1198
1199 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1200 {
1201 struct ata_port *ap = qc->ap;
1202 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1203
1204 if (qc->tf.protocol == ATA_PROT_NCQ)
1205 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1206 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1207 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1208
1209 return 0;
1210 }
1211
1212 static void ahci_freeze(struct ata_port *ap)
1213 {
1214 void __iomem *mmio = ap->host->mmio_base;
1215 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1216
1217 /* turn IRQ off */
1218 writel(0, port_mmio + PORT_IRQ_MASK);
1219 }
1220
1221 static void ahci_thaw(struct ata_port *ap)
1222 {
1223 void __iomem *mmio = ap->host->mmio_base;
1224 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1225 u32 tmp;
1226
1227 /* clear IRQ */
1228 tmp = readl(port_mmio + PORT_IRQ_STAT);
1229 writel(tmp, port_mmio + PORT_IRQ_STAT);
1230 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1231
1232 /* turn IRQ back on */
1233 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1234 }
1235
1236 static void ahci_error_handler(struct ata_port *ap)
1237 {
1238 void __iomem *mmio = ap->host->mmio_base;
1239 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1240
1241 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1242 /* restart engine */
1243 ahci_stop_engine(port_mmio);
1244 ahci_start_engine(port_mmio);
1245 }
1246
1247 /* perform recovery */
1248 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_hardreset,
1249 ahci_postreset);
1250 }
1251
1252 static void ahci_vt8251_error_handler(struct ata_port *ap)
1253 {
1254 void __iomem *mmio = ap->host->mmio_base;
1255 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1256
1257 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1258 /* restart engine */
1259 ahci_stop_engine(port_mmio);
1260 ahci_start_engine(port_mmio);
1261 }
1262
1263 /* perform recovery */
1264 ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
1265 ahci_postreset);
1266 }
1267
1268 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1269 {
1270 struct ata_port *ap = qc->ap;
1271 void __iomem *mmio = ap->host->mmio_base;
1272 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1273
1274 if (qc->flags & ATA_QCFLAG_FAILED)
1275 qc->err_mask |= AC_ERR_OTHER;
1276
1277 if (qc->err_mask) {
1278 /* make DMA engine forget about the failed command */
1279 ahci_stop_engine(port_mmio);
1280 ahci_start_engine(port_mmio);
1281 }
1282 }
1283
1284 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
1285 {
1286 struct ahci_host_priv *hpriv = ap->host->private_data;
1287 struct ahci_port_priv *pp = ap->private_data;
1288 void __iomem *mmio = ap->host->mmio_base;
1289 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1290 const char *emsg = NULL;
1291 int rc;
1292
1293 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1294 if (rc) {
1295 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
1296 ahci_init_port(port_mmio, hpriv->cap,
1297 pp->cmd_slot_dma, pp->rx_fis_dma);
1298 }
1299
1300 return rc;
1301 }
1302
1303 static int ahci_port_resume(struct ata_port *ap)
1304 {
1305 struct ahci_port_priv *pp = ap->private_data;
1306 struct ahci_host_priv *hpriv = ap->host->private_data;
1307 void __iomem *mmio = ap->host->mmio_base;
1308 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1309
1310 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1311
1312 return 0;
1313 }
1314
1315 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
1316 {
1317 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1318 void __iomem *mmio = host->mmio_base;
1319 u32 ctl;
1320
1321 if (mesg.event == PM_EVENT_SUSPEND) {
1322 /* AHCI spec rev1.1 section 8.3.3:
1323 * Software must disable interrupts prior to requesting a
1324 * transition of the HBA to D3 state.
1325 */
1326 ctl = readl(mmio + HOST_CTL);
1327 ctl &= ~HOST_IRQ_EN;
1328 writel(ctl, mmio + HOST_CTL);
1329 readl(mmio + HOST_CTL); /* flush */
1330 }
1331
1332 return ata_pci_device_suspend(pdev, mesg);
1333 }
1334
1335 static int ahci_pci_device_resume(struct pci_dev *pdev)
1336 {
1337 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1338 struct ahci_host_priv *hpriv = host->private_data;
1339 void __iomem *mmio = host->mmio_base;
1340 int rc;
1341
1342 ata_pci_device_do_resume(pdev);
1343
1344 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
1345 rc = ahci_reset_controller(mmio, pdev);
1346 if (rc)
1347 return rc;
1348
1349 ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
1350 }
1351
1352 ata_host_resume(host);
1353
1354 return 0;
1355 }
1356
1357 static int ahci_port_start(struct ata_port *ap)
1358 {
1359 struct device *dev = ap->host->dev;
1360 struct ahci_host_priv *hpriv = ap->host->private_data;
1361 struct ahci_port_priv *pp;
1362 void __iomem *mmio = ap->host->mmio_base;
1363 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1364 void *mem;
1365 dma_addr_t mem_dma;
1366 int rc;
1367
1368 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1369 if (!pp)
1370 return -ENOMEM;
1371 memset(pp, 0, sizeof(*pp));
1372
1373 rc = ata_pad_alloc(ap, dev);
1374 if (rc) {
1375 kfree(pp);
1376 return rc;
1377 }
1378
1379 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1380 if (!mem) {
1381 ata_pad_free(ap, dev);
1382 kfree(pp);
1383 return -ENOMEM;
1384 }
1385 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1386
1387 /*
1388 * First item in chunk of DMA memory: 32-slot command table,
1389 * 32 bytes each in size
1390 */
1391 pp->cmd_slot = mem;
1392 pp->cmd_slot_dma = mem_dma;
1393
1394 mem += AHCI_CMD_SLOT_SZ;
1395 mem_dma += AHCI_CMD_SLOT_SZ;
1396
1397 /*
1398 * Second item: Received-FIS area
1399 */
1400 pp->rx_fis = mem;
1401 pp->rx_fis_dma = mem_dma;
1402
1403 mem += AHCI_RX_FIS_SZ;
1404 mem_dma += AHCI_RX_FIS_SZ;
1405
1406 /*
1407 * Third item: data area for storing a single command
1408 * and its scatter-gather table
1409 */
1410 pp->cmd_tbl = mem;
1411 pp->cmd_tbl_dma = mem_dma;
1412
1413 ap->private_data = pp;
1414
1415 /* initialize port */
1416 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1417
1418 return 0;
1419 }
1420
1421 static void ahci_port_stop(struct ata_port *ap)
1422 {
1423 struct device *dev = ap->host->dev;
1424 struct ahci_host_priv *hpriv = ap->host->private_data;
1425 struct ahci_port_priv *pp = ap->private_data;
1426 void __iomem *mmio = ap->host->mmio_base;
1427 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1428 const char *emsg = NULL;
1429 int rc;
1430
1431 /* de-initialize port */
1432 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1433 if (rc)
1434 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1435
1436 ap->private_data = NULL;
1437 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1438 pp->cmd_slot, pp->cmd_slot_dma);
1439 ata_pad_free(ap, dev);
1440 kfree(pp);
1441 }
1442
1443 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1444 unsigned int port_idx)
1445 {
1446 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1447 base = ahci_port_base_ul(base, port_idx);
1448 VPRINTK("base now==0x%lx\n", base);
1449
1450 port->cmd_addr = base;
1451 port->scr_addr = base + PORT_SCR;
1452
1453 VPRINTK("EXIT\n");
1454 }
1455
1456 static int ahci_host_init(struct ata_probe_ent *probe_ent)
1457 {
1458 struct ahci_host_priv *hpriv = probe_ent->private_data;
1459 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1460 void __iomem *mmio = probe_ent->mmio_base;
1461 unsigned int i, using_dac;
1462 int rc;
1463
1464 rc = ahci_reset_controller(mmio, pdev);
1465 if (rc)
1466 return rc;
1467
1468 hpriv->cap = readl(mmio + HOST_CAP);
1469 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1470 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1471
1472 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1473 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1474
1475 using_dac = hpriv->cap & HOST_CAP_64;
1476 if (using_dac &&
1477 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1478 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1479 if (rc) {
1480 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1481 if (rc) {
1482 dev_printk(KERN_ERR, &pdev->dev,
1483 "64-bit DMA enable failed\n");
1484 return rc;
1485 }
1486 }
1487 } else {
1488 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1489 if (rc) {
1490 dev_printk(KERN_ERR, &pdev->dev,
1491 "32-bit DMA enable failed\n");
1492 return rc;
1493 }
1494 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1495 if (rc) {
1496 dev_printk(KERN_ERR, &pdev->dev,
1497 "32-bit consistent DMA enable failed\n");
1498 return rc;
1499 }
1500 }
1501
1502 for (i = 0; i < probe_ent->n_ports; i++)
1503 ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
1504
1505 ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
1506
1507 pci_set_master(pdev);
1508
1509 return 0;
1510 }
1511
1512 static void ahci_print_info(struct ata_probe_ent *probe_ent)
1513 {
1514 struct ahci_host_priv *hpriv = probe_ent->private_data;
1515 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1516 void __iomem *mmio = probe_ent->mmio_base;
1517 u32 vers, cap, impl, speed;
1518 const char *speed_s;
1519 u16 cc;
1520 const char *scc_s;
1521
1522 vers = readl(mmio + HOST_VERSION);
1523 cap = hpriv->cap;
1524 impl = hpriv->port_map;
1525
1526 speed = (cap >> 20) & 0xf;
1527 if (speed == 1)
1528 speed_s = "1.5";
1529 else if (speed == 2)
1530 speed_s = "3";
1531 else
1532 speed_s = "?";
1533
1534 pci_read_config_word(pdev, 0x0a, &cc);
1535 if (cc == 0x0101)
1536 scc_s = "IDE";
1537 else if (cc == 0x0106)
1538 scc_s = "SATA";
1539 else if (cc == 0x0104)
1540 scc_s = "RAID";
1541 else
1542 scc_s = "unknown";
1543
1544 dev_printk(KERN_INFO, &pdev->dev,
1545 "AHCI %02x%02x.%02x%02x "
1546 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1547 ,
1548
1549 (vers >> 24) & 0xff,
1550 (vers >> 16) & 0xff,
1551 (vers >> 8) & 0xff,
1552 vers & 0xff,
1553
1554 ((cap >> 8) & 0x1f) + 1,
1555 (cap & 0x1f) + 1,
1556 speed_s,
1557 impl,
1558 scc_s);
1559
1560 dev_printk(KERN_INFO, &pdev->dev,
1561 "flags: "
1562 "%s%s%s%s%s%s"
1563 "%s%s%s%s%s%s%s\n"
1564 ,
1565
1566 cap & (1 << 31) ? "64bit " : "",
1567 cap & (1 << 30) ? "ncq " : "",
1568 cap & (1 << 28) ? "ilck " : "",
1569 cap & (1 << 27) ? "stag " : "",
1570 cap & (1 << 26) ? "pm " : "",
1571 cap & (1 << 25) ? "led " : "",
1572
1573 cap & (1 << 24) ? "clo " : "",
1574 cap & (1 << 19) ? "nz " : "",
1575 cap & (1 << 18) ? "only " : "",
1576 cap & (1 << 17) ? "pmp " : "",
1577 cap & (1 << 15) ? "pio " : "",
1578 cap & (1 << 14) ? "slum " : "",
1579 cap & (1 << 13) ? "part " : ""
1580 );
1581 }
1582
1583 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1584 {
1585 static int printed_version;
1586 struct ata_probe_ent *probe_ent = NULL;
1587 struct ahci_host_priv *hpriv;
1588 unsigned long base;
1589 void __iomem *mmio_base;
1590 unsigned int board_idx = (unsigned int) ent->driver_data;
1591 int have_msi, pci_dev_busy = 0;
1592 int rc;
1593
1594 VPRINTK("ENTER\n");
1595
1596 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1597
1598 if (!printed_version++)
1599 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1600
1601 /* JMicron-specific fixup: make sure we're in AHCI mode */
1602 /* This is protected from races with ata_jmicron by the pci probe
1603 locking */
1604 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1605 /* AHCI enable, AHCI on function 0 */
1606 pci_write_config_byte(pdev, 0x41, 0xa1);
1607 /* Function 1 is the PATA controller */
1608 if (PCI_FUNC(pdev->devfn))
1609 return -ENODEV;
1610 }
1611
1612 rc = pci_enable_device(pdev);
1613 if (rc)
1614 return rc;
1615
1616 rc = pci_request_regions(pdev, DRV_NAME);
1617 if (rc) {
1618 pci_dev_busy = 1;
1619 goto err_out;
1620 }
1621
1622 if (pci_enable_msi(pdev) == 0)
1623 have_msi = 1;
1624 else {
1625 pci_intx(pdev, 1);
1626 have_msi = 0;
1627 }
1628
1629 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1630 if (probe_ent == NULL) {
1631 rc = -ENOMEM;
1632 goto err_out_msi;
1633 }
1634
1635 memset(probe_ent, 0, sizeof(*probe_ent));
1636 probe_ent->dev = pci_dev_to_dev(pdev);
1637 INIT_LIST_HEAD(&probe_ent->node);
1638
1639 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1640 if (mmio_base == NULL) {
1641 rc = -ENOMEM;
1642 goto err_out_free_ent;
1643 }
1644 base = (unsigned long) mmio_base;
1645
1646 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1647 if (!hpriv) {
1648 rc = -ENOMEM;
1649 goto err_out_iounmap;
1650 }
1651 memset(hpriv, 0, sizeof(*hpriv));
1652
1653 probe_ent->sht = ahci_port_info[board_idx].sht;
1654 probe_ent->port_flags = ahci_port_info[board_idx].flags;
1655 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1656 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1657 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1658
1659 probe_ent->irq = pdev->irq;
1660 probe_ent->irq_flags = IRQF_SHARED;
1661 probe_ent->mmio_base = mmio_base;
1662 probe_ent->private_data = hpriv;
1663
1664 if (have_msi)
1665 hpriv->flags |= AHCI_FLAG_MSI;
1666
1667 /* initialize adapter */
1668 rc = ahci_host_init(probe_ent);
1669 if (rc)
1670 goto err_out_hpriv;
1671
1672 if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
1673 (hpriv->cap & HOST_CAP_NCQ))
1674 probe_ent->port_flags |= ATA_FLAG_NCQ;
1675
1676 ahci_print_info(probe_ent);
1677
1678 /* FIXME: check ata_device_add return value */
1679 ata_device_add(probe_ent);
1680 kfree(probe_ent);
1681
1682 return 0;
1683
1684 err_out_hpriv:
1685 kfree(hpriv);
1686 err_out_iounmap:
1687 pci_iounmap(pdev, mmio_base);
1688 err_out_free_ent:
1689 kfree(probe_ent);
1690 err_out_msi:
1691 if (have_msi)
1692 pci_disable_msi(pdev);
1693 else
1694 pci_intx(pdev, 0);
1695 pci_release_regions(pdev);
1696 err_out:
1697 if (!pci_dev_busy)
1698 pci_disable_device(pdev);
1699 return rc;
1700 }
1701
1702 static void ahci_remove_one (struct pci_dev *pdev)
1703 {
1704 struct device *dev = pci_dev_to_dev(pdev);
1705 struct ata_host *host = dev_get_drvdata(dev);
1706 struct ahci_host_priv *hpriv = host->private_data;
1707 unsigned int i;
1708 int have_msi;
1709
1710 for (i = 0; i < host->n_ports; i++)
1711 ata_port_detach(host->ports[i]);
1712
1713 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1714 free_irq(host->irq, host);
1715
1716 for (i = 0; i < host->n_ports; i++) {
1717 struct ata_port *ap = host->ports[i];
1718
1719 ata_scsi_release(ap->scsi_host);
1720 scsi_host_put(ap->scsi_host);
1721 }
1722
1723 kfree(hpriv);
1724 pci_iounmap(pdev, host->mmio_base);
1725 kfree(host);
1726
1727 if (have_msi)
1728 pci_disable_msi(pdev);
1729 else
1730 pci_intx(pdev, 0);
1731 pci_release_regions(pdev);
1732 pci_disable_device(pdev);
1733 dev_set_drvdata(dev, NULL);
1734 }
1735
1736 static int __init ahci_init(void)
1737 {
1738 return pci_register_driver(&ahci_pci_driver);
1739 }
1740
1741 static void __exit ahci_exit(void)
1742 {
1743 pci_unregister_driver(&ahci_pci_driver);
1744 }
1745
1746
1747 MODULE_AUTHOR("Jeff Garzik");
1748 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1749 MODULE_LICENSE("GPL");
1750 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1751 MODULE_VERSION(DRV_VERSION);
1752
1753 module_init(ahci_init);
1754 module_exit(ahci_exit);