Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_nv.c
1 /*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
37 */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
55
56 enum {
57 NV_MMIO_BAR = 5,
58
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
65
66 /* INT_STATUS/ENABLE */
67 NV_INT_STATUS = 0x10,
68 NV_INT_ENABLE = 0x11,
69 NV_INT_STATUS_CK804 = 0x440,
70 NV_INT_ENABLE_CK804 = 0x441,
71
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
80 NV_INT_ALL = 0x0f,
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
83
84 /* INT_CONFIG */
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
87
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
166 NV_ADMA_STAT_TIMEOUT,
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
171
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210 };
211
212 enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
231 u8 len; /* 3 */
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
250 u64 adma_dma_mask;
251 u8 flags;
252 int last_issue_ncq;
253 };
254
255 struct nv_host_priv {
256 unsigned long type;
257 };
258
259 struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264 };
265
266 enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271 };
272
273 struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292 };
293
294
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
307
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
311 unsigned long deadline);
312 static void nv_ck804_freeze(struct ata_port *ap);
313 static void nv_ck804_thaw(struct ata_port *ap);
314 static int nv_adma_slave_config(struct scsi_device *sdev);
315 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
316 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319 static void nv_adma_irq_clear(struct ata_port *ap);
320 static int nv_adma_port_start(struct ata_port *ap);
321 static void nv_adma_port_stop(struct ata_port *ap);
322 #ifdef CONFIG_PM
323 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324 static int nv_adma_port_resume(struct ata_port *ap);
325 #endif
326 static void nv_adma_freeze(struct ata_port *ap);
327 static void nv_adma_thaw(struct ata_port *ap);
328 static void nv_adma_error_handler(struct ata_port *ap);
329 static void nv_adma_host_stop(struct ata_host *host);
330 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
331 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
332
333 static void nv_mcp55_thaw(struct ata_port *ap);
334 static void nv_mcp55_freeze(struct ata_port *ap);
335 static void nv_swncq_error_handler(struct ata_port *ap);
336 static int nv_swncq_slave_config(struct scsi_device *sdev);
337 static int nv_swncq_port_start(struct ata_port *ap);
338 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343 #ifdef CONFIG_PM
344 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345 static int nv_swncq_port_resume(struct ata_port *ap);
346 #endif
347
348 enum nv_host_type
349 {
350 GENERIC,
351 NFORCE2,
352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
353 CK804,
354 ADMA,
355 SWNCQ,
356 };
357
358 static const struct pci_device_id nv_pci_tbl[] = {
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
373
374 { } /* terminate list */
375 };
376
377 static struct pci_driver nv_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = nv_pci_tbl,
380 .probe = nv_init_one,
381 #ifdef CONFIG_PM
382 .suspend = ata_pci_device_suspend,
383 .resume = nv_pci_device_resume,
384 #endif
385 .remove = ata_pci_remove_one,
386 };
387
388 static struct scsi_host_template nv_sht = {
389 ATA_BMDMA_SHT(DRV_NAME),
390 };
391
392 static struct scsi_host_template nv_adma_sht = {
393 ATA_NCQ_SHT(DRV_NAME),
394 .can_queue = NV_ADMA_MAX_CPBS,
395 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
396 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
397 .slave_configure = nv_adma_slave_config,
398 };
399
400 static struct scsi_host_template nv_swncq_sht = {
401 ATA_NCQ_SHT(DRV_NAME),
402 .can_queue = ATA_MAX_QUEUE,
403 .sg_tablesize = LIBATA_MAX_PRD,
404 .dma_boundary = ATA_DMA_BOUNDARY,
405 .slave_configure = nv_swncq_slave_config,
406 };
407
408 static struct ata_port_operations nv_common_ops = {
409 .inherits = &ata_bmdma_port_ops,
410 .scr_read = nv_scr_read,
411 .scr_write = nv_scr_write,
412 };
413
414 /* OSDL bz11195 reports that link doesn't come online after hardreset
415 * on generic nv's and there have been several other similar reports
416 * on linux-ide. Disable hardreset for generic nv's.
417 */
418 static struct ata_port_operations nv_generic_ops = {
419 .inherits = &nv_common_ops,
420 .hardreset = ATA_OP_NULL,
421 };
422
423 /* OSDL bz3352 reports that nf2/3 controllers can't determine device
424 * signature reliably. Also, the following thread reports detection
425 * failure on cold boot with the standard debouncing timing.
426 *
427 * http://thread.gmane.org/gmane.linux.ide/34098
428 *
429 * Debounce with hotplug timing and request follow-up SRST.
430 */
431 static struct ata_port_operations nv_nf2_ops = {
432 .inherits = &nv_common_ops,
433 .freeze = nv_nf2_freeze,
434 .thaw = nv_nf2_thaw,
435 .hardreset = nv_nf2_hardreset,
436 };
437
438 /* CK804 finally gets hardreset right */
439 static struct ata_port_operations nv_ck804_ops = {
440 .inherits = &nv_common_ops,
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
443 .host_stop = nv_ck804_host_stop,
444 };
445
446 static struct ata_port_operations nv_adma_ops = {
447 .inherits = &nv_ck804_ops,
448
449 .check_atapi_dma = nv_adma_check_atapi_dma,
450 .sff_tf_read = nv_adma_tf_read,
451 .qc_defer = ata_std_qc_defer,
452 .qc_prep = nv_adma_qc_prep,
453 .qc_issue = nv_adma_qc_issue,
454 .sff_irq_clear = nv_adma_irq_clear,
455
456 .freeze = nv_adma_freeze,
457 .thaw = nv_adma_thaw,
458 .error_handler = nv_adma_error_handler,
459 .post_internal_cmd = nv_adma_post_internal_cmd,
460
461 .port_start = nv_adma_port_start,
462 .port_stop = nv_adma_port_stop,
463 #ifdef CONFIG_PM
464 .port_suspend = nv_adma_port_suspend,
465 .port_resume = nv_adma_port_resume,
466 #endif
467 .host_stop = nv_adma_host_stop,
468 };
469
470 static struct ata_port_operations nv_swncq_ops = {
471 .inherits = &nv_generic_ops,
472
473 .qc_defer = ata_std_qc_defer,
474 .qc_prep = nv_swncq_qc_prep,
475 .qc_issue = nv_swncq_qc_issue,
476
477 .freeze = nv_mcp55_freeze,
478 .thaw = nv_mcp55_thaw,
479 .error_handler = nv_swncq_error_handler,
480
481 #ifdef CONFIG_PM
482 .port_suspend = nv_swncq_port_suspend,
483 .port_resume = nv_swncq_port_resume,
484 #endif
485 .port_start = nv_swncq_port_start,
486 };
487
488 struct nv_pi_priv {
489 irq_handler_t irq_handler;
490 struct scsi_host_template *sht;
491 };
492
493 #define NV_PI_PRIV(_irq_handler, _sht) \
494 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
495
496 static const struct ata_port_info nv_port_info[] = {
497 /* generic */
498 {
499 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
500 .pio_mask = NV_PIO_MASK,
501 .mwdma_mask = NV_MWDMA_MASK,
502 .udma_mask = NV_UDMA_MASK,
503 .port_ops = &nv_generic_ops,
504 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
505 },
506 /* nforce2/3 */
507 {
508 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
509 .pio_mask = NV_PIO_MASK,
510 .mwdma_mask = NV_MWDMA_MASK,
511 .udma_mask = NV_UDMA_MASK,
512 .port_ops = &nv_nf2_ops,
513 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
514 },
515 /* ck804 */
516 {
517 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
518 .pio_mask = NV_PIO_MASK,
519 .mwdma_mask = NV_MWDMA_MASK,
520 .udma_mask = NV_UDMA_MASK,
521 .port_ops = &nv_ck804_ops,
522 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
523 },
524 /* ADMA */
525 {
526 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
527 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
528 .pio_mask = NV_PIO_MASK,
529 .mwdma_mask = NV_MWDMA_MASK,
530 .udma_mask = NV_UDMA_MASK,
531 .port_ops = &nv_adma_ops,
532 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
533 },
534 /* SWNCQ */
535 {
536 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
537 ATA_FLAG_NCQ,
538 .pio_mask = NV_PIO_MASK,
539 .mwdma_mask = NV_MWDMA_MASK,
540 .udma_mask = NV_UDMA_MASK,
541 .port_ops = &nv_swncq_ops,
542 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
543 },
544 };
545
546 MODULE_AUTHOR("NVIDIA");
547 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
548 MODULE_LICENSE("GPL");
549 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
550 MODULE_VERSION(DRV_VERSION);
551
552 static int adma_enabled;
553 static int swncq_enabled = 1;
554
555 static void nv_adma_register_mode(struct ata_port *ap)
556 {
557 struct nv_adma_port_priv *pp = ap->private_data;
558 void __iomem *mmio = pp->ctl_block;
559 u16 tmp, status;
560 int count = 0;
561
562 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
563 return;
564
565 status = readw(mmio + NV_ADMA_STAT);
566 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
567 ndelay(50);
568 status = readw(mmio + NV_ADMA_STAT);
569 count++;
570 }
571 if (count == 20)
572 ata_port_printk(ap, KERN_WARNING,
573 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
574 status);
575
576 tmp = readw(mmio + NV_ADMA_CTL);
577 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
578
579 count = 0;
580 status = readw(mmio + NV_ADMA_STAT);
581 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
582 ndelay(50);
583 status = readw(mmio + NV_ADMA_STAT);
584 count++;
585 }
586 if (count == 20)
587 ata_port_printk(ap, KERN_WARNING,
588 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
589 status);
590
591 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
592 }
593
594 static void nv_adma_mode(struct ata_port *ap)
595 {
596 struct nv_adma_port_priv *pp = ap->private_data;
597 void __iomem *mmio = pp->ctl_block;
598 u16 tmp, status;
599 int count = 0;
600
601 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
602 return;
603
604 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
605
606 tmp = readw(mmio + NV_ADMA_CTL);
607 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
608
609 status = readw(mmio + NV_ADMA_STAT);
610 while (((status & NV_ADMA_STAT_LEGACY) ||
611 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
612 ndelay(50);
613 status = readw(mmio + NV_ADMA_STAT);
614 count++;
615 }
616 if (count == 20)
617 ata_port_printk(ap, KERN_WARNING,
618 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
619 status);
620
621 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
622 }
623
624 static int nv_adma_slave_config(struct scsi_device *sdev)
625 {
626 struct ata_port *ap = ata_shost_to_port(sdev->host);
627 struct nv_adma_port_priv *pp = ap->private_data;
628 struct nv_adma_port_priv *port0, *port1;
629 struct scsi_device *sdev0, *sdev1;
630 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
631 unsigned long segment_boundary, flags;
632 unsigned short sg_tablesize;
633 int rc;
634 int adma_enable;
635 u32 current_reg, new_reg, config_mask;
636
637 rc = ata_scsi_slave_config(sdev);
638
639 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
640 /* Not a proper libata device, ignore */
641 return rc;
642
643 spin_lock_irqsave(ap->lock, flags);
644
645 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
646 /*
647 * NVIDIA reports that ADMA mode does not support ATAPI commands.
648 * Therefore ATAPI commands are sent through the legacy interface.
649 * However, the legacy interface only supports 32-bit DMA.
650 * Restrict DMA parameters as required by the legacy interface
651 * when an ATAPI device is connected.
652 */
653 segment_boundary = ATA_DMA_BOUNDARY;
654 /* Subtract 1 since an extra entry may be needed for padding, see
655 libata-scsi.c */
656 sg_tablesize = LIBATA_MAX_PRD - 1;
657
658 /* Since the legacy DMA engine is in use, we need to disable ADMA
659 on the port. */
660 adma_enable = 0;
661 nv_adma_register_mode(ap);
662 } else {
663 segment_boundary = NV_ADMA_DMA_BOUNDARY;
664 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
665 adma_enable = 1;
666 }
667
668 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
669
670 if (ap->port_no == 1)
671 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
672 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
673 else
674 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
675 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
676
677 if (adma_enable) {
678 new_reg = current_reg | config_mask;
679 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
680 } else {
681 new_reg = current_reg & ~config_mask;
682 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
683 }
684
685 if (current_reg != new_reg)
686 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
687
688 port0 = ap->host->ports[0]->private_data;
689 port1 = ap->host->ports[1]->private_data;
690 sdev0 = ap->host->ports[0]->link.device[0].sdev;
691 sdev1 = ap->host->ports[1]->link.device[0].sdev;
692 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
693 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
694 /** We have to set the DMA mask to 32-bit if either port is in
695 ATAPI mode, since they are on the same PCI device which is
696 used for DMA mapping. If we set the mask we also need to set
697 the bounce limit on both ports to ensure that the block
698 layer doesn't feed addresses that cause DMA mapping to
699 choke. If either SCSI device is not allocated yet, it's OK
700 since that port will discover its correct setting when it
701 does get allocated.
702 Note: Setting 32-bit mask should not fail. */
703 if (sdev0)
704 blk_queue_bounce_limit(sdev0->request_queue,
705 ATA_DMA_MASK);
706 if (sdev1)
707 blk_queue_bounce_limit(sdev1->request_queue,
708 ATA_DMA_MASK);
709
710 pci_set_dma_mask(pdev, ATA_DMA_MASK);
711 } else {
712 /** This shouldn't fail as it was set to this value before */
713 pci_set_dma_mask(pdev, pp->adma_dma_mask);
714 if (sdev0)
715 blk_queue_bounce_limit(sdev0->request_queue,
716 pp->adma_dma_mask);
717 if (sdev1)
718 blk_queue_bounce_limit(sdev1->request_queue,
719 pp->adma_dma_mask);
720 }
721
722 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
723 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
724 ata_port_printk(ap, KERN_INFO,
725 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
726 (unsigned long long)*ap->host->dev->dma_mask,
727 segment_boundary, sg_tablesize);
728
729 spin_unlock_irqrestore(ap->lock, flags);
730
731 return rc;
732 }
733
734 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
735 {
736 struct nv_adma_port_priv *pp = qc->ap->private_data;
737 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
738 }
739
740 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
741 {
742 /* Other than when internal or pass-through commands are executed,
743 the only time this function will be called in ADMA mode will be
744 if a command fails. In the failure case we don't care about going
745 into register mode with ADMA commands pending, as the commands will
746 all shortly be aborted anyway. We assume that NCQ commands are not
747 issued via passthrough, which is the only way that switching into
748 ADMA mode could abort outstanding commands. */
749 nv_adma_register_mode(ap);
750
751 ata_sff_tf_read(ap, tf);
752 }
753
754 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
755 {
756 unsigned int idx = 0;
757
758 if (tf->flags & ATA_TFLAG_ISADDR) {
759 if (tf->flags & ATA_TFLAG_LBA48) {
760 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
761 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
762 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
763 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
764 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
765 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
766 } else
767 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
768
769 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
770 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
771 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
772 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
773 }
774
775 if (tf->flags & ATA_TFLAG_DEVICE)
776 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
777
778 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
779
780 while (idx < 12)
781 cpb[idx++] = cpu_to_le16(IGN);
782
783 return idx;
784 }
785
786 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
787 {
788 struct nv_adma_port_priv *pp = ap->private_data;
789 u8 flags = pp->cpb[cpb_num].resp_flags;
790
791 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
792
793 if (unlikely((force_err ||
794 flags & (NV_CPB_RESP_ATA_ERR |
795 NV_CPB_RESP_CMD_ERR |
796 NV_CPB_RESP_CPB_ERR)))) {
797 struct ata_eh_info *ehi = &ap->link.eh_info;
798 int freeze = 0;
799
800 ata_ehi_clear_desc(ehi);
801 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
802 if (flags & NV_CPB_RESP_ATA_ERR) {
803 ata_ehi_push_desc(ehi, "ATA error");
804 ehi->err_mask |= AC_ERR_DEV;
805 } else if (flags & NV_CPB_RESP_CMD_ERR) {
806 ata_ehi_push_desc(ehi, "CMD error");
807 ehi->err_mask |= AC_ERR_DEV;
808 } else if (flags & NV_CPB_RESP_CPB_ERR) {
809 ata_ehi_push_desc(ehi, "CPB error");
810 ehi->err_mask |= AC_ERR_SYSTEM;
811 freeze = 1;
812 } else {
813 /* notifier error, but no error in CPB flags? */
814 ata_ehi_push_desc(ehi, "unknown");
815 ehi->err_mask |= AC_ERR_OTHER;
816 freeze = 1;
817 }
818 /* Kill all commands. EH will determine what actually failed. */
819 if (freeze)
820 ata_port_freeze(ap);
821 else
822 ata_port_abort(ap);
823 return 1;
824 }
825
826 if (likely(flags & NV_CPB_RESP_DONE)) {
827 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
828 VPRINTK("CPB flags done, flags=0x%x\n", flags);
829 if (likely(qc)) {
830 DPRINTK("Completing qc from tag %d\n", cpb_num);
831 ata_qc_complete(qc);
832 } else {
833 struct ata_eh_info *ehi = &ap->link.eh_info;
834 /* Notifier bits set without a command may indicate the drive
835 is misbehaving. Raise host state machine violation on this
836 condition. */
837 ata_port_printk(ap, KERN_ERR,
838 "notifier for tag %d with no cmd?\n",
839 cpb_num);
840 ehi->err_mask |= AC_ERR_HSM;
841 ehi->action |= ATA_EH_RESET;
842 ata_port_freeze(ap);
843 return 1;
844 }
845 }
846 return 0;
847 }
848
849 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
850 {
851 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
852
853 /* freeze if hotplugged */
854 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
855 ata_port_freeze(ap);
856 return 1;
857 }
858
859 /* bail out if not our interrupt */
860 if (!(irq_stat & NV_INT_DEV))
861 return 0;
862
863 /* DEV interrupt w/ no active qc? */
864 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
865 ata_sff_check_status(ap);
866 return 1;
867 }
868
869 /* handle interrupt */
870 return ata_sff_host_intr(ap, qc);
871 }
872
873 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
874 {
875 struct ata_host *host = dev_instance;
876 int i, handled = 0;
877 u32 notifier_clears[2];
878
879 spin_lock(&host->lock);
880
881 for (i = 0; i < host->n_ports; i++) {
882 struct ata_port *ap = host->ports[i];
883 notifier_clears[i] = 0;
884
885 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
886 struct nv_adma_port_priv *pp = ap->private_data;
887 void __iomem *mmio = pp->ctl_block;
888 u16 status;
889 u32 gen_ctl;
890 u32 notifier, notifier_error;
891
892 /* if ADMA is disabled, use standard ata interrupt handler */
893 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
894 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
895 >> (NV_INT_PORT_SHIFT * i);
896 handled += nv_host_intr(ap, irq_stat);
897 continue;
898 }
899
900 /* if in ATA register mode, check for standard interrupts */
901 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
902 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
903 >> (NV_INT_PORT_SHIFT * i);
904 if (ata_tag_valid(ap->link.active_tag))
905 /** NV_INT_DEV indication seems unreliable at times
906 at least in ADMA mode. Force it on always when a
907 command is active, to prevent losing interrupts. */
908 irq_stat |= NV_INT_DEV;
909 handled += nv_host_intr(ap, irq_stat);
910 }
911
912 notifier = readl(mmio + NV_ADMA_NOTIFIER);
913 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
914 notifier_clears[i] = notifier | notifier_error;
915
916 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
917
918 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
919 !notifier_error)
920 /* Nothing to do */
921 continue;
922
923 status = readw(mmio + NV_ADMA_STAT);
924
925 /* Clear status. Ensure the controller sees the clearing before we start
926 looking at any of the CPB statuses, so that any CPB completions after
927 this point in the handler will raise another interrupt. */
928 writew(status, mmio + NV_ADMA_STAT);
929 readw(mmio + NV_ADMA_STAT); /* flush posted write */
930 rmb();
931
932 handled++; /* irq handled if we got here */
933
934 /* freeze if hotplugged or controller error */
935 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
936 NV_ADMA_STAT_HOTUNPLUG |
937 NV_ADMA_STAT_TIMEOUT |
938 NV_ADMA_STAT_SERROR))) {
939 struct ata_eh_info *ehi = &ap->link.eh_info;
940
941 ata_ehi_clear_desc(ehi);
942 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
943 if (status & NV_ADMA_STAT_TIMEOUT) {
944 ehi->err_mask |= AC_ERR_SYSTEM;
945 ata_ehi_push_desc(ehi, "timeout");
946 } else if (status & NV_ADMA_STAT_HOTPLUG) {
947 ata_ehi_hotplugged(ehi);
948 ata_ehi_push_desc(ehi, "hotplug");
949 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
950 ata_ehi_hotplugged(ehi);
951 ata_ehi_push_desc(ehi, "hot unplug");
952 } else if (status & NV_ADMA_STAT_SERROR) {
953 /* let libata analyze SError and figure out the cause */
954 ata_ehi_push_desc(ehi, "SError");
955 } else
956 ata_ehi_push_desc(ehi, "unknown");
957 ata_port_freeze(ap);
958 continue;
959 }
960
961 if (status & (NV_ADMA_STAT_DONE |
962 NV_ADMA_STAT_CPBERR |
963 NV_ADMA_STAT_CMD_COMPLETE)) {
964 u32 check_commands = notifier_clears[i];
965 int pos, error = 0;
966
967 if (status & NV_ADMA_STAT_CPBERR) {
968 /* Check all active commands */
969 if (ata_tag_valid(ap->link.active_tag))
970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->
974 link.sactive;
975 }
976
977 /** Check CPBs for completed commands */
978 while ((pos = ffs(check_commands)) && !error) {
979 pos--;
980 error = nv_adma_check_cpb(ap, pos,
981 notifier_error & (1 << pos));
982 check_commands &= ~(1 << pos);
983 }
984 }
985 }
986 }
987
988 if (notifier_clears[0] || notifier_clears[1]) {
989 /* Note: Both notifier clear registers must be written
990 if either is set, even if one is zero, according to NVIDIA. */
991 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
992 writel(notifier_clears[0], pp->notifier_clear_block);
993 pp = host->ports[1]->private_data;
994 writel(notifier_clears[1], pp->notifier_clear_block);
995 }
996
997 spin_unlock(&host->lock);
998
999 return IRQ_RETVAL(handled);
1000 }
1001
1002 static void nv_adma_freeze(struct ata_port *ap)
1003 {
1004 struct nv_adma_port_priv *pp = ap->private_data;
1005 void __iomem *mmio = pp->ctl_block;
1006 u16 tmp;
1007
1008 nv_ck804_freeze(ap);
1009
1010 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1011 return;
1012
1013 /* clear any outstanding CK804 notifications */
1014 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1015 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1016
1017 /* Disable interrupt */
1018 tmp = readw(mmio + NV_ADMA_CTL);
1019 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1020 mmio + NV_ADMA_CTL);
1021 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1022 }
1023
1024 static void nv_adma_thaw(struct ata_port *ap)
1025 {
1026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
1028 u16 tmp;
1029
1030 nv_ck804_thaw(ap);
1031
1032 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1033 return;
1034
1035 /* Enable interrupt */
1036 tmp = readw(mmio + NV_ADMA_CTL);
1037 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1038 mmio + NV_ADMA_CTL);
1039 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1040 }
1041
1042 static void nv_adma_irq_clear(struct ata_port *ap)
1043 {
1044 struct nv_adma_port_priv *pp = ap->private_data;
1045 void __iomem *mmio = pp->ctl_block;
1046 u32 notifier_clears[2];
1047
1048 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1049 ata_sff_irq_clear(ap);
1050 return;
1051 }
1052
1053 /* clear any outstanding CK804 notifications */
1054 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1055 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1056
1057 /* clear ADMA status */
1058 writew(0xffff, mmio + NV_ADMA_STAT);
1059
1060 /* clear notifiers - note both ports need to be written with
1061 something even though we are only clearing on one */
1062 if (ap->port_no == 0) {
1063 notifier_clears[0] = 0xFFFFFFFF;
1064 notifier_clears[1] = 0;
1065 } else {
1066 notifier_clears[0] = 0;
1067 notifier_clears[1] = 0xFFFFFFFF;
1068 }
1069 pp = ap->host->ports[0]->private_data;
1070 writel(notifier_clears[0], pp->notifier_clear_block);
1071 pp = ap->host->ports[1]->private_data;
1072 writel(notifier_clears[1], pp->notifier_clear_block);
1073 }
1074
1075 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1076 {
1077 struct nv_adma_port_priv *pp = qc->ap->private_data;
1078
1079 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1080 ata_sff_post_internal_cmd(qc);
1081 }
1082
1083 static int nv_adma_port_start(struct ata_port *ap)
1084 {
1085 struct device *dev = ap->host->dev;
1086 struct nv_adma_port_priv *pp;
1087 int rc;
1088 void *mem;
1089 dma_addr_t mem_dma;
1090 void __iomem *mmio;
1091 struct pci_dev *pdev = to_pci_dev(dev);
1092 u16 tmp;
1093
1094 VPRINTK("ENTER\n");
1095
1096 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1097 pad buffers */
1098 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1099 if (rc)
1100 return rc;
1101 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1102 if (rc)
1103 return rc;
1104
1105 rc = ata_port_start(ap);
1106 if (rc)
1107 return rc;
1108
1109 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1110 if (!pp)
1111 return -ENOMEM;
1112
1113 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1114 ap->port_no * NV_ADMA_PORT_SIZE;
1115 pp->ctl_block = mmio;
1116 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1117 pp->notifier_clear_block = pp->gen_block +
1118 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1119
1120 /* Now that the legacy PRD and padding buffer are allocated we can
1121 safely raise the DMA mask to allocate the CPB/APRD table.
1122 These are allowed to fail since we store the value that ends up
1123 being used to set as the bounce limit in slave_config later if
1124 needed. */
1125 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1126 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1127 pp->adma_dma_mask = *dev->dma_mask;
1128
1129 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1130 &mem_dma, GFP_KERNEL);
1131 if (!mem)
1132 return -ENOMEM;
1133 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1134
1135 /*
1136 * First item in chunk of DMA memory:
1137 * 128-byte command parameter block (CPB)
1138 * one for each command tag
1139 */
1140 pp->cpb = mem;
1141 pp->cpb_dma = mem_dma;
1142
1143 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1144 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1145
1146 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1147 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148
1149 /*
1150 * Second item: block of ADMA_SGTBL_LEN s/g entries
1151 */
1152 pp->aprd = mem;
1153 pp->aprd_dma = mem_dma;
1154
1155 ap->private_data = pp;
1156
1157 /* clear any outstanding interrupt conditions */
1158 writew(0xffff, mmio + NV_ADMA_STAT);
1159
1160 /* initialize port variables */
1161 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1162
1163 /* clear CPB fetch count */
1164 writew(0, mmio + NV_ADMA_CPB_COUNT);
1165
1166 /* clear GO for register mode, enable interrupt */
1167 tmp = readw(mmio + NV_ADMA_CTL);
1168 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1169 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1170
1171 tmp = readw(mmio + NV_ADMA_CTL);
1172 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1173 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1174 udelay(1);
1175 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1176 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1177
1178 return 0;
1179 }
1180
1181 static void nv_adma_port_stop(struct ata_port *ap)
1182 {
1183 struct nv_adma_port_priv *pp = ap->private_data;
1184 void __iomem *mmio = pp->ctl_block;
1185
1186 VPRINTK("ENTER\n");
1187 writew(0, mmio + NV_ADMA_CTL);
1188 }
1189
1190 #ifdef CONFIG_PM
1191 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1192 {
1193 struct nv_adma_port_priv *pp = ap->private_data;
1194 void __iomem *mmio = pp->ctl_block;
1195
1196 /* Go to register mode - clears GO */
1197 nv_adma_register_mode(ap);
1198
1199 /* clear CPB fetch count */
1200 writew(0, mmio + NV_ADMA_CPB_COUNT);
1201
1202 /* disable interrupt, shut down port */
1203 writew(0, mmio + NV_ADMA_CTL);
1204
1205 return 0;
1206 }
1207
1208 static int nv_adma_port_resume(struct ata_port *ap)
1209 {
1210 struct nv_adma_port_priv *pp = ap->private_data;
1211 void __iomem *mmio = pp->ctl_block;
1212 u16 tmp;
1213
1214 /* set CPB block location */
1215 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
1216 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
1217
1218 /* clear any outstanding interrupt conditions */
1219 writew(0xffff, mmio + NV_ADMA_STAT);
1220
1221 /* initialize port variables */
1222 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1223
1224 /* clear CPB fetch count */
1225 writew(0, mmio + NV_ADMA_CPB_COUNT);
1226
1227 /* clear GO for register mode, enable interrupt */
1228 tmp = readw(mmio + NV_ADMA_CTL);
1229 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1230 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1231
1232 tmp = readw(mmio + NV_ADMA_CTL);
1233 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1234 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1235 udelay(1);
1236 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1237 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1238
1239 return 0;
1240 }
1241 #endif
1242
1243 static void nv_adma_setup_port(struct ata_port *ap)
1244 {
1245 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1246 struct ata_ioports *ioport = &ap->ioaddr;
1247
1248 VPRINTK("ENTER\n");
1249
1250 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1251
1252 ioport->cmd_addr = mmio;
1253 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
1254 ioport->error_addr =
1255 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1256 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1257 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1258 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1259 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1260 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
1261 ioport->status_addr =
1262 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
1263 ioport->altstatus_addr =
1264 ioport->ctl_addr = mmio + 0x20;
1265 }
1266
1267 static int nv_adma_host_init(struct ata_host *host)
1268 {
1269 struct pci_dev *pdev = to_pci_dev(host->dev);
1270 unsigned int i;
1271 u32 tmp32;
1272
1273 VPRINTK("ENTER\n");
1274
1275 /* enable ADMA on the ports */
1276 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1277 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1278 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1279 NV_MCP_SATA_CFG_20_PORT1_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1281
1282 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1283
1284 for (i = 0; i < host->n_ports; i++)
1285 nv_adma_setup_port(host->ports[i]);
1286
1287 return 0;
1288 }
1289
1290 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1291 struct scatterlist *sg,
1292 int idx,
1293 struct nv_adma_prd *aprd)
1294 {
1295 u8 flags = 0;
1296 if (qc->tf.flags & ATA_TFLAG_WRITE)
1297 flags |= NV_APRD_WRITE;
1298 if (idx == qc->n_elem - 1)
1299 flags |= NV_APRD_END;
1300 else if (idx != 4)
1301 flags |= NV_APRD_CONT;
1302
1303 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1304 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1305 aprd->flags = flags;
1306 aprd->packet_len = 0;
1307 }
1308
1309 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1310 {
1311 struct nv_adma_port_priv *pp = qc->ap->private_data;
1312 struct nv_adma_prd *aprd;
1313 struct scatterlist *sg;
1314 unsigned int si;
1315
1316 VPRINTK("ENTER\n");
1317
1318 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1319 aprd = (si < 5) ? &cpb->aprd[si] :
1320 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1321 nv_adma_fill_aprd(qc, sg, si, aprd);
1322 }
1323 if (si > 5)
1324 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1325 else
1326 cpb->next_aprd = cpu_to_le64(0);
1327 }
1328
1329 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1330 {
1331 struct nv_adma_port_priv *pp = qc->ap->private_data;
1332
1333 /* ADMA engine can only be used for non-ATAPI DMA commands,
1334 or interrupt-driven no-data commands. */
1335 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1336 (qc->tf.flags & ATA_TFLAG_POLLING))
1337 return 1;
1338
1339 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1340 (qc->tf.protocol == ATA_PROT_NODATA))
1341 return 0;
1342
1343 return 1;
1344 }
1345
1346 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1347 {
1348 struct nv_adma_port_priv *pp = qc->ap->private_data;
1349 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1350 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1351 NV_CPB_CTL_IEN;
1352
1353 if (nv_adma_use_reg_mode(qc)) {
1354 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1355 (qc->flags & ATA_QCFLAG_DMAMAP));
1356 nv_adma_register_mode(qc->ap);
1357 ata_sff_qc_prep(qc);
1358 return;
1359 }
1360
1361 cpb->resp_flags = NV_CPB_RESP_DONE;
1362 wmb();
1363 cpb->ctl_flags = 0;
1364 wmb();
1365
1366 cpb->len = 3;
1367 cpb->tag = qc->tag;
1368 cpb->next_cpb_idx = 0;
1369
1370 /* turn on NCQ flags for NCQ commands */
1371 if (qc->tf.protocol == ATA_PROT_NCQ)
1372 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1373
1374 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1375
1376 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1377
1378 if (qc->flags & ATA_QCFLAG_DMAMAP) {
1379 nv_adma_fill_sg(qc, cpb);
1380 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1381 } else
1382 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1383
1384 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1385 until we are finished filling in all of the contents */
1386 wmb();
1387 cpb->ctl_flags = ctl_flags;
1388 wmb();
1389 cpb->resp_flags = 0;
1390 }
1391
1392 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1393 {
1394 struct nv_adma_port_priv *pp = qc->ap->private_data;
1395 void __iomem *mmio = pp->ctl_block;
1396 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1397
1398 VPRINTK("ENTER\n");
1399
1400 /* We can't handle result taskfile with NCQ commands, since
1401 retrieving the taskfile switches us out of ADMA mode and would abort
1402 existing commands. */
1403 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1404 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1405 ata_dev_printk(qc->dev, KERN_ERR,
1406 "NCQ w/ RESULT_TF not allowed\n");
1407 return AC_ERR_SYSTEM;
1408 }
1409
1410 if (nv_adma_use_reg_mode(qc)) {
1411 /* use ATA register mode */
1412 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1413 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1414 (qc->flags & ATA_QCFLAG_DMAMAP));
1415 nv_adma_register_mode(qc->ap);
1416 return ata_sff_qc_issue(qc);
1417 } else
1418 nv_adma_mode(qc->ap);
1419
1420 /* write append register, command tag in lower 8 bits
1421 and (number of cpbs to append -1) in top 8 bits */
1422 wmb();
1423
1424 if (curr_ncq != pp->last_issue_ncq) {
1425 /* Seems to need some delay before switching between NCQ and
1426 non-NCQ commands, else we get command timeouts and such. */
1427 udelay(20);
1428 pp->last_issue_ncq = curr_ncq;
1429 }
1430
1431 writew(qc->tag, mmio + NV_ADMA_APPEND);
1432
1433 DPRINTK("Issued tag %u\n", qc->tag);
1434
1435 return 0;
1436 }
1437
1438 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1439 {
1440 struct ata_host *host = dev_instance;
1441 unsigned int i;
1442 unsigned int handled = 0;
1443 unsigned long flags;
1444
1445 spin_lock_irqsave(&host->lock, flags);
1446
1447 for (i = 0; i < host->n_ports; i++) {
1448 struct ata_port *ap;
1449
1450 ap = host->ports[i];
1451 if (ap &&
1452 !(ap->flags & ATA_FLAG_DISABLED)) {
1453 struct ata_queued_cmd *qc;
1454
1455 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1456 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1457 handled += ata_sff_host_intr(ap, qc);
1458 else
1459 // No request pending? Clear interrupt status
1460 // anyway, in case there's one pending.
1461 ap->ops->sff_check_status(ap);
1462 }
1463
1464 }
1465
1466 spin_unlock_irqrestore(&host->lock, flags);
1467
1468 return IRQ_RETVAL(handled);
1469 }
1470
1471 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1472 {
1473 int i, handled = 0;
1474
1475 for (i = 0; i < host->n_ports; i++) {
1476 struct ata_port *ap = host->ports[i];
1477
1478 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1479 handled += nv_host_intr(ap, irq_stat);
1480
1481 irq_stat >>= NV_INT_PORT_SHIFT;
1482 }
1483
1484 return IRQ_RETVAL(handled);
1485 }
1486
1487 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1488 {
1489 struct ata_host *host = dev_instance;
1490 u8 irq_stat;
1491 irqreturn_t ret;
1492
1493 spin_lock(&host->lock);
1494 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1495 ret = nv_do_interrupt(host, irq_stat);
1496 spin_unlock(&host->lock);
1497
1498 return ret;
1499 }
1500
1501 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1502 {
1503 struct ata_host *host = dev_instance;
1504 u8 irq_stat;
1505 irqreturn_t ret;
1506
1507 spin_lock(&host->lock);
1508 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1509 ret = nv_do_interrupt(host, irq_stat);
1510 spin_unlock(&host->lock);
1511
1512 return ret;
1513 }
1514
1515 static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1516 {
1517 if (sc_reg > SCR_CONTROL)
1518 return -EINVAL;
1519
1520 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
1521 return 0;
1522 }
1523
1524 static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1525 {
1526 if (sc_reg > SCR_CONTROL)
1527 return -EINVAL;
1528
1529 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
1530 return 0;
1531 }
1532
1533 static void nv_nf2_freeze(struct ata_port *ap)
1534 {
1535 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1536 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1537 u8 mask;
1538
1539 mask = ioread8(scr_addr + NV_INT_ENABLE);
1540 mask &= ~(NV_INT_ALL << shift);
1541 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1542 }
1543
1544 static void nv_nf2_thaw(struct ata_port *ap)
1545 {
1546 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1547 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1548 u8 mask;
1549
1550 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1551
1552 mask = ioread8(scr_addr + NV_INT_ENABLE);
1553 mask |= (NV_INT_MASK << shift);
1554 iowrite8(mask, scr_addr + NV_INT_ENABLE);
1555 }
1556
1557 static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
1558 unsigned long deadline)
1559 {
1560 bool online;
1561 int rc;
1562
1563 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1564 &online, NULL);
1565 return online ? -EAGAIN : rc;
1566 }
1567
1568 static void nv_ck804_freeze(struct ata_port *ap)
1569 {
1570 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1571 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1572 u8 mask;
1573
1574 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1575 mask &= ~(NV_INT_ALL << shift);
1576 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1577 }
1578
1579 static void nv_ck804_thaw(struct ata_port *ap)
1580 {
1581 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1582 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1583 u8 mask;
1584
1585 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1586
1587 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1588 mask |= (NV_INT_MASK << shift);
1589 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1590 }
1591
1592 static void nv_mcp55_freeze(struct ata_port *ap)
1593 {
1594 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1595 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1596 u32 mask;
1597
1598 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1599
1600 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1601 mask &= ~(NV_INT_ALL_MCP55 << shift);
1602 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1603 ata_sff_freeze(ap);
1604 }
1605
1606 static void nv_mcp55_thaw(struct ata_port *ap)
1607 {
1608 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1609 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1610 u32 mask;
1611
1612 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1613
1614 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1615 mask |= (NV_INT_MASK_MCP55 << shift);
1616 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1617 ata_sff_thaw(ap);
1618 }
1619
1620 static void nv_adma_error_handler(struct ata_port *ap)
1621 {
1622 struct nv_adma_port_priv *pp = ap->private_data;
1623 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1624 void __iomem *mmio = pp->ctl_block;
1625 int i;
1626 u16 tmp;
1627
1628 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1629 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1630 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1631 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1632 u32 status = readw(mmio + NV_ADMA_STAT);
1633 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1634 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1635
1636 ata_port_printk(ap, KERN_ERR,
1637 "EH in ADMA mode, notifier 0x%X "
1638 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1639 "next cpb count 0x%X next cpb idx 0x%x\n",
1640 notifier, notifier_error, gen_ctl, status,
1641 cpb_count, next_cpb_idx);
1642
1643 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1644 struct nv_adma_cpb *cpb = &pp->cpb[i];
1645 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1646 ap->link.sactive & (1 << i))
1647 ata_port_printk(ap, KERN_ERR,
1648 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1649 i, cpb->ctl_flags, cpb->resp_flags);
1650 }
1651 }
1652
1653 /* Push us back into port register mode for error handling. */
1654 nv_adma_register_mode(ap);
1655
1656 /* Mark all of the CPBs as invalid to prevent them from
1657 being executed */
1658 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1659 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1660
1661 /* clear CPB fetch count */
1662 writew(0, mmio + NV_ADMA_CPB_COUNT);
1663
1664 /* Reset channel */
1665 tmp = readw(mmio + NV_ADMA_CTL);
1666 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1667 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1668 udelay(1);
1669 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1670 readw(mmio + NV_ADMA_CTL); /* flush posted write */
1671 }
1672
1673 ata_sff_error_handler(ap);
1674 }
1675
1676 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1677 {
1678 struct nv_swncq_port_priv *pp = ap->private_data;
1679 struct defer_queue *dq = &pp->defer_queue;
1680
1681 /* queue is full */
1682 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1683 dq->defer_bits |= (1 << qc->tag);
1684 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1685 }
1686
1687 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1688 {
1689 struct nv_swncq_port_priv *pp = ap->private_data;
1690 struct defer_queue *dq = &pp->defer_queue;
1691 unsigned int tag;
1692
1693 if (dq->head == dq->tail) /* null queue */
1694 return NULL;
1695
1696 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1697 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1698 WARN_ON(!(dq->defer_bits & (1 << tag)));
1699 dq->defer_bits &= ~(1 << tag);
1700
1701 return ata_qc_from_tag(ap, tag);
1702 }
1703
1704 static void nv_swncq_fis_reinit(struct ata_port *ap)
1705 {
1706 struct nv_swncq_port_priv *pp = ap->private_data;
1707
1708 pp->dhfis_bits = 0;
1709 pp->dmafis_bits = 0;
1710 pp->sdbfis_bits = 0;
1711 pp->ncq_flags = 0;
1712 }
1713
1714 static void nv_swncq_pp_reinit(struct ata_port *ap)
1715 {
1716 struct nv_swncq_port_priv *pp = ap->private_data;
1717 struct defer_queue *dq = &pp->defer_queue;
1718
1719 dq->head = 0;
1720 dq->tail = 0;
1721 dq->defer_bits = 0;
1722 pp->qc_active = 0;
1723 pp->last_issue_tag = ATA_TAG_POISON;
1724 nv_swncq_fis_reinit(ap);
1725 }
1726
1727 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1728 {
1729 struct nv_swncq_port_priv *pp = ap->private_data;
1730
1731 writew(fis, pp->irq_block);
1732 }
1733
1734 static void __ata_bmdma_stop(struct ata_port *ap)
1735 {
1736 struct ata_queued_cmd qc;
1737
1738 qc.ap = ap;
1739 ata_bmdma_stop(&qc);
1740 }
1741
1742 static void nv_swncq_ncq_stop(struct ata_port *ap)
1743 {
1744 struct nv_swncq_port_priv *pp = ap->private_data;
1745 unsigned int i;
1746 u32 sactive;
1747 u32 done_mask;
1748
1749 ata_port_printk(ap, KERN_ERR,
1750 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1751 ap->qc_active, ap->link.sactive);
1752 ata_port_printk(ap, KERN_ERR,
1753 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1754 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1755 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1756 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1757
1758 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1759 ap->ops->sff_check_status(ap),
1760 ioread8(ap->ioaddr.error_addr));
1761
1762 sactive = readl(pp->sactive_block);
1763 done_mask = pp->qc_active ^ sactive;
1764
1765 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1766 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1767 u8 err = 0;
1768 if (pp->qc_active & (1 << i))
1769 err = 0;
1770 else if (done_mask & (1 << i))
1771 err = 1;
1772 else
1773 continue;
1774
1775 ata_port_printk(ap, KERN_ERR,
1776 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1777 (pp->dhfis_bits >> i) & 0x1,
1778 (pp->dmafis_bits >> i) & 0x1,
1779 (pp->sdbfis_bits >> i) & 0x1,
1780 (sactive >> i) & 0x1,
1781 (err ? "error! tag doesn't exit" : " "));
1782 }
1783
1784 nv_swncq_pp_reinit(ap);
1785 ap->ops->sff_irq_clear(ap);
1786 __ata_bmdma_stop(ap);
1787 nv_swncq_irq_clear(ap, 0xffff);
1788 }
1789
1790 static void nv_swncq_error_handler(struct ata_port *ap)
1791 {
1792 struct ata_eh_context *ehc = &ap->link.eh_context;
1793
1794 if (ap->link.sactive) {
1795 nv_swncq_ncq_stop(ap);
1796 ehc->i.action |= ATA_EH_RESET;
1797 }
1798
1799 ata_sff_error_handler(ap);
1800 }
1801
1802 #ifdef CONFIG_PM
1803 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1804 {
1805 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1806 u32 tmp;
1807
1808 /* clear irq */
1809 writel(~0, mmio + NV_INT_STATUS_MCP55);
1810
1811 /* disable irq */
1812 writel(0, mmio + NV_INT_ENABLE_MCP55);
1813
1814 /* disable swncq */
1815 tmp = readl(mmio + NV_CTL_MCP55);
1816 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1817 writel(tmp, mmio + NV_CTL_MCP55);
1818
1819 return 0;
1820 }
1821
1822 static int nv_swncq_port_resume(struct ata_port *ap)
1823 {
1824 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1825 u32 tmp;
1826
1827 /* clear irq */
1828 writel(~0, mmio + NV_INT_STATUS_MCP55);
1829
1830 /* enable irq */
1831 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1832
1833 /* enable swncq */
1834 tmp = readl(mmio + NV_CTL_MCP55);
1835 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1836
1837 return 0;
1838 }
1839 #endif
1840
1841 static void nv_swncq_host_init(struct ata_host *host)
1842 {
1843 u32 tmp;
1844 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1845 struct pci_dev *pdev = to_pci_dev(host->dev);
1846 u8 regval;
1847
1848 /* disable ECO 398 */
1849 pci_read_config_byte(pdev, 0x7f, &regval);
1850 regval &= ~(1 << 7);
1851 pci_write_config_byte(pdev, 0x7f, regval);
1852
1853 /* enable swncq */
1854 tmp = readl(mmio + NV_CTL_MCP55);
1855 VPRINTK("HOST_CTL:0x%X\n", tmp);
1856 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1857
1858 /* enable irq intr */
1859 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1860 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1861 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1862
1863 /* clear port irq */
1864 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1865 }
1866
1867 static int nv_swncq_slave_config(struct scsi_device *sdev)
1868 {
1869 struct ata_port *ap = ata_shost_to_port(sdev->host);
1870 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1871 struct ata_device *dev;
1872 int rc;
1873 u8 rev;
1874 u8 check_maxtor = 0;
1875 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1876
1877 rc = ata_scsi_slave_config(sdev);
1878 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1879 /* Not a proper libata device, ignore */
1880 return rc;
1881
1882 dev = &ap->link.device[sdev->id];
1883 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1884 return rc;
1885
1886 /* if MCP51 and Maxtor, then disable ncq */
1887 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1888 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1889 check_maxtor = 1;
1890
1891 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1892 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1893 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1894 pci_read_config_byte(pdev, 0x8, &rev);
1895 if (rev <= 0xa2)
1896 check_maxtor = 1;
1897 }
1898
1899 if (!check_maxtor)
1900 return rc;
1901
1902 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1903
1904 if (strncmp(model_num, "Maxtor", 6) == 0) {
1905 ata_scsi_change_queue_depth(sdev, 1);
1906 ata_dev_printk(dev, KERN_NOTICE,
1907 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1908 }
1909
1910 return rc;
1911 }
1912
1913 static int nv_swncq_port_start(struct ata_port *ap)
1914 {
1915 struct device *dev = ap->host->dev;
1916 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1917 struct nv_swncq_port_priv *pp;
1918 int rc;
1919
1920 rc = ata_port_start(ap);
1921 if (rc)
1922 return rc;
1923
1924 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1925 if (!pp)
1926 return -ENOMEM;
1927
1928 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1929 &pp->prd_dma, GFP_KERNEL);
1930 if (!pp->prd)
1931 return -ENOMEM;
1932 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1933
1934 ap->private_data = pp;
1935 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1936 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1937 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1938
1939 return 0;
1940 }
1941
1942 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1943 {
1944 if (qc->tf.protocol != ATA_PROT_NCQ) {
1945 ata_sff_qc_prep(qc);
1946 return;
1947 }
1948
1949 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1950 return;
1951
1952 nv_swncq_fill_sg(qc);
1953 }
1954
1955 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1956 {
1957 struct ata_port *ap = qc->ap;
1958 struct scatterlist *sg;
1959 struct nv_swncq_port_priv *pp = ap->private_data;
1960 struct ata_prd *prd;
1961 unsigned int si, idx;
1962
1963 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1964
1965 idx = 0;
1966 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1967 u32 addr, offset;
1968 u32 sg_len, len;
1969
1970 addr = (u32)sg_dma_address(sg);
1971 sg_len = sg_dma_len(sg);
1972
1973 while (sg_len) {
1974 offset = addr & 0xffff;
1975 len = sg_len;
1976 if ((offset + sg_len) > 0x10000)
1977 len = 0x10000 - offset;
1978
1979 prd[idx].addr = cpu_to_le32(addr);
1980 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1981
1982 idx++;
1983 sg_len -= len;
1984 addr += len;
1985 }
1986 }
1987
1988 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1989 }
1990
1991 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1992 struct ata_queued_cmd *qc)
1993 {
1994 struct nv_swncq_port_priv *pp = ap->private_data;
1995
1996 if (qc == NULL)
1997 return 0;
1998
1999 DPRINTK("Enter\n");
2000
2001 writel((1 << qc->tag), pp->sactive_block);
2002 pp->last_issue_tag = qc->tag;
2003 pp->dhfis_bits &= ~(1 << qc->tag);
2004 pp->dmafis_bits &= ~(1 << qc->tag);
2005 pp->qc_active |= (0x1 << qc->tag);
2006
2007 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2008 ap->ops->sff_exec_command(ap, &qc->tf);
2009
2010 DPRINTK("Issued tag %u\n", qc->tag);
2011
2012 return 0;
2013 }
2014
2015 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2016 {
2017 struct ata_port *ap = qc->ap;
2018 struct nv_swncq_port_priv *pp = ap->private_data;
2019
2020 if (qc->tf.protocol != ATA_PROT_NCQ)
2021 return ata_sff_qc_issue(qc);
2022
2023 DPRINTK("Enter\n");
2024
2025 if (!pp->qc_active)
2026 nv_swncq_issue_atacmd(ap, qc);
2027 else
2028 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2029
2030 return 0;
2031 }
2032
2033 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2034 {
2035 u32 serror;
2036 struct ata_eh_info *ehi = &ap->link.eh_info;
2037
2038 ata_ehi_clear_desc(ehi);
2039
2040 /* AHCI needs SError cleared; otherwise, it might lock up */
2041 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2042 sata_scr_write(&ap->link, SCR_ERROR, serror);
2043
2044 /* analyze @irq_stat */
2045 if (fis & NV_SWNCQ_IRQ_ADDED)
2046 ata_ehi_push_desc(ehi, "hot plug");
2047 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2048 ata_ehi_push_desc(ehi, "hot unplug");
2049
2050 ata_ehi_hotplugged(ehi);
2051
2052 /* okay, let's hand over to EH */
2053 ehi->serror |= serror;
2054
2055 ata_port_freeze(ap);
2056 }
2057
2058 static int nv_swncq_sdbfis(struct ata_port *ap)
2059 {
2060 struct ata_queued_cmd *qc;
2061 struct nv_swncq_port_priv *pp = ap->private_data;
2062 struct ata_eh_info *ehi = &ap->link.eh_info;
2063 u32 sactive;
2064 int nr_done = 0;
2065 u32 done_mask;
2066 int i;
2067 u8 host_stat;
2068 u8 lack_dhfis = 0;
2069
2070 host_stat = ap->ops->bmdma_status(ap);
2071 if (unlikely(host_stat & ATA_DMA_ERR)) {
2072 /* error when transfering data to/from memory */
2073 ata_ehi_clear_desc(ehi);
2074 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2075 ehi->err_mask |= AC_ERR_HOST_BUS;
2076 ehi->action |= ATA_EH_RESET;
2077 return -EINVAL;
2078 }
2079
2080 ap->ops->sff_irq_clear(ap);
2081 __ata_bmdma_stop(ap);
2082
2083 sactive = readl(pp->sactive_block);
2084 done_mask = pp->qc_active ^ sactive;
2085
2086 if (unlikely(done_mask & sactive)) {
2087 ata_ehi_clear_desc(ehi);
2088 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2089 "(%08x->%08x)", pp->qc_active, sactive);
2090 ehi->err_mask |= AC_ERR_HSM;
2091 ehi->action |= ATA_EH_RESET;
2092 return -EINVAL;
2093 }
2094 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2095 if (!(done_mask & (1 << i)))
2096 continue;
2097
2098 qc = ata_qc_from_tag(ap, i);
2099 if (qc) {
2100 ata_qc_complete(qc);
2101 pp->qc_active &= ~(1 << i);
2102 pp->dhfis_bits &= ~(1 << i);
2103 pp->dmafis_bits &= ~(1 << i);
2104 pp->sdbfis_bits |= (1 << i);
2105 nr_done++;
2106 }
2107 }
2108
2109 if (!ap->qc_active) {
2110 DPRINTK("over\n");
2111 nv_swncq_pp_reinit(ap);
2112 return nr_done;
2113 }
2114
2115 if (pp->qc_active & pp->dhfis_bits)
2116 return nr_done;
2117
2118 if ((pp->ncq_flags & ncq_saw_backout) ||
2119 (pp->qc_active ^ pp->dhfis_bits))
2120 /* if the controller cann't get a device to host register FIS,
2121 * The driver needs to reissue the new command.
2122 */
2123 lack_dhfis = 1;
2124
2125 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2126 "SWNCQ:qc_active 0x%X defer_bits %X "
2127 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2128 ap->print_id, ap->qc_active, pp->qc_active,
2129 pp->defer_queue.defer_bits, pp->dhfis_bits,
2130 pp->dmafis_bits, pp->last_issue_tag);
2131
2132 nv_swncq_fis_reinit(ap);
2133
2134 if (lack_dhfis) {
2135 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2136 nv_swncq_issue_atacmd(ap, qc);
2137 return nr_done;
2138 }
2139
2140 if (pp->defer_queue.defer_bits) {
2141 /* send deferral queue command */
2142 qc = nv_swncq_qc_from_dq(ap);
2143 WARN_ON(qc == NULL);
2144 nv_swncq_issue_atacmd(ap, qc);
2145 }
2146
2147 return nr_done;
2148 }
2149
2150 static inline u32 nv_swncq_tag(struct ata_port *ap)
2151 {
2152 struct nv_swncq_port_priv *pp = ap->private_data;
2153 u32 tag;
2154
2155 tag = readb(pp->tag_block) >> 2;
2156 return (tag & 0x1f);
2157 }
2158
2159 static int nv_swncq_dmafis(struct ata_port *ap)
2160 {
2161 struct ata_queued_cmd *qc;
2162 unsigned int rw;
2163 u8 dmactl;
2164 u32 tag;
2165 struct nv_swncq_port_priv *pp = ap->private_data;
2166
2167 __ata_bmdma_stop(ap);
2168 tag = nv_swncq_tag(ap);
2169
2170 DPRINTK("dma setup tag 0x%x\n", tag);
2171 qc = ata_qc_from_tag(ap, tag);
2172
2173 if (unlikely(!qc))
2174 return 0;
2175
2176 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2177
2178 /* load PRD table addr. */
2179 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2180 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2181
2182 /* specify data direction, triple-check start bit is clear */
2183 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2184 dmactl &= ~ATA_DMA_WR;
2185 if (!rw)
2186 dmactl |= ATA_DMA_WR;
2187
2188 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2189
2190 return 1;
2191 }
2192
2193 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2194 {
2195 struct nv_swncq_port_priv *pp = ap->private_data;
2196 struct ata_queued_cmd *qc;
2197 struct ata_eh_info *ehi = &ap->link.eh_info;
2198 u32 serror;
2199 u8 ata_stat;
2200 int rc = 0;
2201
2202 ata_stat = ap->ops->sff_check_status(ap);
2203 nv_swncq_irq_clear(ap, fis);
2204 if (!fis)
2205 return;
2206
2207 if (ap->pflags & ATA_PFLAG_FROZEN)
2208 return;
2209
2210 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2211 nv_swncq_hotplug(ap, fis);
2212 return;
2213 }
2214
2215 if (!pp->qc_active)
2216 return;
2217
2218 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
2219 return;
2220 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
2221
2222 if (ata_stat & ATA_ERR) {
2223 ata_ehi_clear_desc(ehi);
2224 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2225 ehi->err_mask |= AC_ERR_DEV;
2226 ehi->serror |= serror;
2227 ehi->action |= ATA_EH_RESET;
2228 ata_port_freeze(ap);
2229 return;
2230 }
2231
2232 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2233 /* If the IRQ is backout, driver must issue
2234 * the new command again some time later.
2235 */
2236 pp->ncq_flags |= ncq_saw_backout;
2237 }
2238
2239 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2240 pp->ncq_flags |= ncq_saw_sdb;
2241 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2242 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2243 ap->print_id, pp->qc_active, pp->dhfis_bits,
2244 pp->dmafis_bits, readl(pp->sactive_block));
2245 rc = nv_swncq_sdbfis(ap);
2246 if (rc < 0)
2247 goto irq_error;
2248 }
2249
2250 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2251 /* The interrupt indicates the new command
2252 * was transmitted correctly to the drive.
2253 */
2254 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2255 pp->ncq_flags |= ncq_saw_d2h;
2256 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2257 ata_ehi_push_desc(ehi, "illegal fis transaction");
2258 ehi->err_mask |= AC_ERR_HSM;
2259 ehi->action |= ATA_EH_RESET;
2260 goto irq_error;
2261 }
2262
2263 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2264 !(pp->ncq_flags & ncq_saw_dmas)) {
2265 ata_stat = ap->ops->sff_check_status(ap);
2266 if (ata_stat & ATA_BUSY)
2267 goto irq_exit;
2268
2269 if (pp->defer_queue.defer_bits) {
2270 DPRINTK("send next command\n");
2271 qc = nv_swncq_qc_from_dq(ap);
2272 nv_swncq_issue_atacmd(ap, qc);
2273 }
2274 }
2275 }
2276
2277 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2278 /* program the dma controller with appropriate PRD buffers
2279 * and start the DMA transfer for requested command.
2280 */
2281 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2282 pp->ncq_flags |= ncq_saw_dmas;
2283 rc = nv_swncq_dmafis(ap);
2284 }
2285
2286 irq_exit:
2287 return;
2288 irq_error:
2289 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2290 ata_port_freeze(ap);
2291 return;
2292 }
2293
2294 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2295 {
2296 struct ata_host *host = dev_instance;
2297 unsigned int i;
2298 unsigned int handled = 0;
2299 unsigned long flags;
2300 u32 irq_stat;
2301
2302 spin_lock_irqsave(&host->lock, flags);
2303
2304 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2305
2306 for (i = 0; i < host->n_ports; i++) {
2307 struct ata_port *ap = host->ports[i];
2308
2309 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2310 if (ap->link.sactive) {
2311 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2312 handled = 1;
2313 } else {
2314 if (irq_stat) /* reserve Hotplug */
2315 nv_swncq_irq_clear(ap, 0xfff0);
2316
2317 handled += nv_host_intr(ap, (u8)irq_stat);
2318 }
2319 }
2320 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2321 }
2322
2323 spin_unlock_irqrestore(&host->lock, flags);
2324
2325 return IRQ_RETVAL(handled);
2326 }
2327
2328 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2329 {
2330 static int printed_version;
2331 const struct ata_port_info *ppi[] = { NULL, NULL };
2332 struct nv_pi_priv *ipriv;
2333 struct ata_host *host;
2334 struct nv_host_priv *hpriv;
2335 int rc;
2336 u32 bar;
2337 void __iomem *base;
2338 unsigned long type = ent->driver_data;
2339
2340 // Make sure this is a SATA controller by counting the number of bars
2341 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2342 // it's an IDE controller and we ignore it.
2343 for (bar = 0; bar < 6; bar++)
2344 if (pci_resource_start(pdev, bar) == 0)
2345 return -ENODEV;
2346
2347 if (!printed_version++)
2348 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2349
2350 rc = pcim_enable_device(pdev);
2351 if (rc)
2352 return rc;
2353
2354 /* determine type and allocate host */
2355 if (type == CK804 && adma_enabled) {
2356 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2357 type = ADMA;
2358 }
2359
2360 if (type == SWNCQ) {
2361 if (swncq_enabled)
2362 dev_printk(KERN_NOTICE, &pdev->dev,
2363 "Using SWNCQ mode\n");
2364 else
2365 type = GENERIC;
2366 }
2367
2368 ppi[0] = &nv_port_info[type];
2369 ipriv = ppi[0]->private_data;
2370 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
2371 if (rc)
2372 return rc;
2373
2374 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2375 if (!hpriv)
2376 return -ENOMEM;
2377 hpriv->type = type;
2378 host->private_data = hpriv;
2379
2380 /* request and iomap NV_MMIO_BAR */
2381 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2382 if (rc)
2383 return rc;
2384
2385 /* configure SCR access */
2386 base = host->iomap[NV_MMIO_BAR];
2387 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2388 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2389
2390 /* enable SATA space for CK804 */
2391 if (type >= CK804) {
2392 u8 regval;
2393
2394 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2395 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2396 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2397 }
2398
2399 /* init ADMA */
2400 if (type == ADMA) {
2401 rc = nv_adma_host_init(host);
2402 if (rc)
2403 return rc;
2404 } else if (type == SWNCQ)
2405 nv_swncq_host_init(host);
2406
2407 pci_set_master(pdev);
2408 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2409 IRQF_SHARED, ipriv->sht);
2410 }
2411
2412 #ifdef CONFIG_PM
2413 static int nv_pci_device_resume(struct pci_dev *pdev)
2414 {
2415 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2416 struct nv_host_priv *hpriv = host->private_data;
2417 int rc;
2418
2419 rc = ata_pci_device_do_resume(pdev);
2420 if (rc)
2421 return rc;
2422
2423 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2424 if (hpriv->type >= CK804) {
2425 u8 regval;
2426
2427 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2428 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2429 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2430 }
2431 if (hpriv->type == ADMA) {
2432 u32 tmp32;
2433 struct nv_adma_port_priv *pp;
2434 /* enable/disable ADMA on the ports appropriately */
2435 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2436
2437 pp = host->ports[0]->private_data;
2438 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2439 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2440 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2441 else
2442 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
2443 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2444 pp = host->ports[1]->private_data;
2445 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2446 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2447 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2448 else
2449 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
2450 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2451
2452 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2453 }
2454 }
2455
2456 ata_host_resume(host);
2457
2458 return 0;
2459 }
2460 #endif
2461
2462 static void nv_ck804_host_stop(struct ata_host *host)
2463 {
2464 struct pci_dev *pdev = to_pci_dev(host->dev);
2465 u8 regval;
2466
2467 /* disable SATA space for CK804 */
2468 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2469 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2470 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2471 }
2472
2473 static void nv_adma_host_stop(struct ata_host *host)
2474 {
2475 struct pci_dev *pdev = to_pci_dev(host->dev);
2476 u32 tmp32;
2477
2478 /* disable ADMA on the ports */
2479 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2480 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2481 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2482 NV_MCP_SATA_CFG_20_PORT1_EN |
2483 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2484
2485 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2486
2487 nv_ck804_host_stop(host);
2488 }
2489
2490 static int __init nv_init(void)
2491 {
2492 return pci_register_driver(&nv_pci_driver);
2493 }
2494
2495 static void __exit nv_exit(void)
2496 {
2497 pci_unregister_driver(&nv_pci_driver);
2498 }
2499
2500 module_init(nv_init);
2501 module_exit(nv_exit);
2502 module_param_named(adma, adma_enabled, bool, 0444);
2503 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2504 module_param_named(swncq, swncq_enabled, bool, 0444);
2505 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
2506