Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / ata / sata_nv.c
CommitLineData
1da177e4
LT
1/*
2 * sata_nv.c - NVIDIA nForce SATA
3 *
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
6 *
aa7e16d6
JG
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
1da177e4 21 *
af36d7f0
JG
22 *
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
25 *
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
30 * hotplug info, etc.
31 *
fbbb262d
RH
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
36 *
1da177e4
LT
37 */
38
1da177e4
LT
39#include <linux/kernel.h>
40#include <linux/module.h>
41#include <linux/pci.h>
42#include <linux/init.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/interrupt.h>
a9524a76 46#include <linux/device.h>
1da177e4 47#include <scsi/scsi_host.h>
fbbb262d 48#include <scsi/scsi_device.h>
1da177e4
LT
49#include <linux/libata.h>
50
51#define DRV_NAME "sata_nv"
2a3103ce 52#define DRV_VERSION "3.5"
fbbb262d
RH
53
54#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
1da177e4 55
10ad05df 56enum {
0d5ff566
TH
57 NV_MMIO_BAR = 5,
58
10ad05df
JG
59 NV_PORTS = 2,
60 NV_PIO_MASK = 0x1f,
61 NV_MWDMA_MASK = 0x07,
62 NV_UDMA_MASK = 0x7f,
63 NV_PORT0_SCR_REG_OFFSET = 0x00,
64 NV_PORT1_SCR_REG_OFFSET = 0x40,
1da177e4 65
27e4b274 66 /* INT_STATUS/ENABLE */
10ad05df 67 NV_INT_STATUS = 0x10,
10ad05df 68 NV_INT_ENABLE = 0x11,
27e4b274 69 NV_INT_STATUS_CK804 = 0x440,
10ad05df 70 NV_INT_ENABLE_CK804 = 0x441,
1da177e4 71
27e4b274
TH
72 /* INT_STATUS/ENABLE bits */
73 NV_INT_DEV = 0x01,
74 NV_INT_PM = 0x02,
75 NV_INT_ADDED = 0x04,
76 NV_INT_REMOVED = 0x08,
77
78 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
79
39f87582 80 NV_INT_ALL = 0x0f,
5a44efff
TH
81 NV_INT_MASK = NV_INT_DEV |
82 NV_INT_ADDED | NV_INT_REMOVED,
39f87582 83
27e4b274 84 /* INT_CONFIG */
10ad05df
JG
85 NV_INT_CONFIG = 0x12,
86 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
1da177e4 87
10ad05df
JG
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20 = 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
fbbb262d
RH
91 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96 NV_ADMA_MAX_CPBS = 32,
97 NV_ADMA_CPB_SZ = 128,
98 NV_ADMA_APRD_SZ = 16,
99 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
100 NV_ADMA_APRD_SZ,
101 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
102 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
104 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106 /* BAR5 offset to ADMA general registers */
107 NV_ADMA_GEN = 0x400,
108 NV_ADMA_GEN_CTL = 0x00,
109 NV_ADMA_NOTIFIER_CLEAR = 0x30,
110
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT = 0x480,
113
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE = 0x100,
116
117 /* ADMA port registers */
118 NV_ADMA_CTL = 0x40,
119 NV_ADMA_CPB_COUNT = 0x42,
120 NV_ADMA_NEXT_CPB_IDX = 0x43,
121 NV_ADMA_STAT = 0x44,
122 NV_ADMA_CPB_BASE_LOW = 0x48,
123 NV_ADMA_CPB_BASE_HIGH = 0x4C,
124 NV_ADMA_APPEND = 0x50,
125 NV_ADMA_NOTIFIER = 0x68,
126 NV_ADMA_NOTIFIER_ERROR = 0x6C,
127
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
131 NV_ADMA_CTL_GO = (1 << 7),
132 NV_ADMA_CTL_AIEN = (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
135
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE = (1 << 0),
138 NV_CPB_RESP_ATA_ERR = (1 << 3),
139 NV_CPB_RESP_CMD_ERR = (1 << 4),
140 NV_CPB_RESP_CPB_ERR = (1 << 7),
141
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID = (1 << 0),
144 NV_CPB_CTL_QUEUE = (1 << 1),
145 NV_CPB_CTL_APRD_VALID = (1 << 2),
146 NV_CPB_CTL_IEN = (1 << 3),
147 NV_CPB_CTL_FPDMA = (1 << 4),
148
149 /* APRD flags */
150 NV_APRD_WRITE = (1 << 1),
151 NV_APRD_END = (1 << 2),
152 NV_APRD_CONT = (1 << 3),
153
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT = (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
157 NV_ADMA_STAT_HOTPLUG = (1 << 2),
158 NV_ADMA_STAT_CPBERR = (1 << 4),
159 NV_ADMA_STAT_SERROR = (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
161 NV_ADMA_STAT_IDLE = (1 << 8),
162 NV_ADMA_STAT_LEGACY = (1 << 9),
163 NV_ADMA_STAT_STOPPED = (1 << 10),
164 NV_ADMA_STAT_DONE = (1 << 12),
165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
2dcb407e 166 NV_ADMA_STAT_TIMEOUT,
fbbb262d
RH
167
168 /* port flags */
169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
2dec7555 170 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
fbbb262d 171
f140f0f1
KL
172 /* MCP55 reg offset */
173 NV_CTL_MCP55 = 0x400,
174 NV_INT_STATUS_MCP55 = 0x440,
175 NV_INT_ENABLE_MCP55 = 0x444,
176 NV_NCQ_REG_MCP55 = 0x448,
177
178 /* MCP55 */
179 NV_INT_ALL_MCP55 = 0xffff,
180 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
182
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ = 0x02,
185 NV_CTL_SEC_SWNCQ = 0x04,
186
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV = (1 << 0),
189 NV_SWNCQ_IRQ_PM = (1 << 1),
190 NV_SWNCQ_IRQ_ADDED = (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
192
193 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
197
198 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
199 NV_SWNCQ_IRQ_REMOVED,
200
fbbb262d
RH
201};
202
203/* ADMA Physical Region Descriptor - one SG segment */
204struct nv_adma_prd {
205 __le64 addr;
206 __le32 len;
207 u8 flags;
208 u8 packet_len;
209 __le16 reserved;
210};
211
212enum nv_adma_regbits {
213 CMDEND = (1 << 15), /* end of command list */
214 WNB = (1 << 14), /* wait-not-BSY */
215 IGN = (1 << 13), /* ignore this entry */
216 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2 = (1 << (2 + 8)),
218 DA1 = (1 << (1 + 8)),
219 DA0 = (1 << (0 + 8)),
220};
221
222/* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
226struct nv_adma_cpb {
227 u8 resp_flags; /* 0 */
228 u8 reserved1; /* 1 */
229 u8 ctl_flags; /* 2 */
230 /* len is length of taskfile in 64 bit words */
2dcb407e 231 u8 len; /* 3 */
fbbb262d
RH
232 u8 tag; /* 4 */
233 u8 next_cpb_idx; /* 5 */
234 __le16 reserved2; /* 6-7 */
235 __le16 tf[12]; /* 8-31 */
236 struct nv_adma_prd aprd[5]; /* 32-111 */
237 __le64 next_aprd; /* 112-119 */
238 __le64 reserved3; /* 120-127 */
10ad05df 239};
1da177e4 240
fbbb262d
RH
241
242struct nv_adma_port_priv {
243 struct nv_adma_cpb *cpb;
244 dma_addr_t cpb_dma;
245 struct nv_adma_prd *aprd;
246 dma_addr_t aprd_dma;
2dcb407e
JG
247 void __iomem *ctl_block;
248 void __iomem *gen_block;
249 void __iomem *notifier_clear_block;
8959d300 250 u64 adma_dma_mask;
fbbb262d 251 u8 flags;
5e5c74a5 252 int last_issue_ncq;
fbbb262d
RH
253};
254
cdf56bcf
RH
255struct nv_host_priv {
256 unsigned long type;
257};
258
f140f0f1
KL
259struct defer_queue {
260 u32 defer_bits;
261 unsigned int head;
262 unsigned int tail;
263 unsigned int tag[ATA_MAX_QUEUE];
264};
265
266enum ncq_saw_flag_list {
267 ncq_saw_d2h = (1U << 0),
268 ncq_saw_dmas = (1U << 1),
269 ncq_saw_sdb = (1U << 2),
270 ncq_saw_backout = (1U << 3),
271};
272
273struct nv_swncq_port_priv {
274 struct ata_prd *prd; /* our SG list */
275 dma_addr_t prd_dma; /* and its DMA mapping */
276 void __iomem *sactive_block;
277 void __iomem *irq_block;
278 void __iomem *tag_block;
279 u32 qc_active;
280
281 unsigned int last_issue_tag;
282
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue;
285
286 /* for NCQ interrupt analysis */
287 u32 dhfis_bits;
288 u32 dmafis_bits;
289 u32 sdbfis_bits;
290
291 unsigned int ncq_flags;
292};
293
294
5796d1c4 295#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
fbbb262d 296
2dcb407e 297static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
438ac6d5 298#ifdef CONFIG_PM
cdf56bcf 299static int nv_pci_device_resume(struct pci_dev *pdev);
438ac6d5 300#endif
cca3974e 301static void nv_ck804_host_stop(struct ata_host *host);
7d12e780
DH
302static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
82ef04fb
TH
305static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
306static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
1da177e4 307
39f87582
TH
308static void nv_nf2_freeze(struct ata_port *ap);
309static void nv_nf2_thaw(struct ata_port *ap);
3c324283
TH
310static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
311 unsigned long deadline);
39f87582
TH
312static void nv_ck804_freeze(struct ata_port *ap);
313static void nv_ck804_thaw(struct ata_port *ap);
fbbb262d 314static int nv_adma_slave_config(struct scsi_device *sdev);
2dec7555 315static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
fbbb262d
RH
316static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
317static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
318static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
319static void nv_adma_irq_clear(struct ata_port *ap);
320static int nv_adma_port_start(struct ata_port *ap);
321static void nv_adma_port_stop(struct ata_port *ap);
438ac6d5 322#ifdef CONFIG_PM
cdf56bcf
RH
323static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
324static int nv_adma_port_resume(struct ata_port *ap);
438ac6d5 325#endif
53014e25
RH
326static void nv_adma_freeze(struct ata_port *ap);
327static void nv_adma_thaw(struct ata_port *ap);
fbbb262d
RH
328static void nv_adma_error_handler(struct ata_port *ap);
329static void nv_adma_host_stop(struct ata_host *host);
f5ecac2d 330static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
f2fb344b 331static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
39f87582 332
f140f0f1
KL
333static void nv_mcp55_thaw(struct ata_port *ap);
334static void nv_mcp55_freeze(struct ata_port *ap);
335static void nv_swncq_error_handler(struct ata_port *ap);
336static int nv_swncq_slave_config(struct scsi_device *sdev);
337static int nv_swncq_port_start(struct ata_port *ap);
338static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
339static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
340static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
341static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
342static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
343#ifdef CONFIG_PM
344static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
345static int nv_swncq_port_resume(struct ata_port *ap);
346#endif
347
1da177e4
LT
348enum nv_host_type
349{
350 GENERIC,
351 NFORCE2,
27e4b274 352 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
fbbb262d 353 CK804,
f140f0f1
KL
354 ADMA,
355 SWNCQ,
1da177e4
LT
356};
357
3b7d697d 358static const struct pci_device_id nv_pci_tbl[] = {
54bb3a94
JG
359 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
360 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
361 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
362 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
363 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
364 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
365 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
f140f0f1
KL
366 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
367 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
368 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
369 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
e2e031eb
KL
370 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
371 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
372 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
2d2744fc
JG
373
374 { } /* terminate list */
1da177e4
LT
375};
376
1da177e4
LT
377static struct pci_driver nv_pci_driver = {
378 .name = DRV_NAME,
379 .id_table = nv_pci_tbl,
380 .probe = nv_init_one,
438ac6d5 381#ifdef CONFIG_PM
cdf56bcf
RH
382 .suspend = ata_pci_device_suspend,
383 .resume = nv_pci_device_resume,
438ac6d5 384#endif
1daf9ce7 385 .remove = ata_pci_remove_one,
1da177e4
LT
386};
387
193515d5 388static struct scsi_host_template nv_sht = {
68d1d07b 389 ATA_BMDMA_SHT(DRV_NAME),
1da177e4
LT
390};
391
fbbb262d 392static struct scsi_host_template nv_adma_sht = {
68d1d07b 393 ATA_NCQ_SHT(DRV_NAME),
fbbb262d 394 .can_queue = NV_ADMA_MAX_CPBS,
fbbb262d 395 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
fbbb262d
RH
396 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
397 .slave_configure = nv_adma_slave_config,
fbbb262d
RH
398};
399
f140f0f1 400static struct scsi_host_template nv_swncq_sht = {
68d1d07b 401 ATA_NCQ_SHT(DRV_NAME),
f140f0f1 402 .can_queue = ATA_MAX_QUEUE,
f140f0f1 403 .sg_tablesize = LIBATA_MAX_PRD,
f140f0f1
KL
404 .dma_boundary = ATA_DMA_BOUNDARY,
405 .slave_configure = nv_swncq_slave_config,
f140f0f1
KL
406};
407
4c1eb90a 408static struct ata_port_operations nv_common_ops = {
029cfd6b 409 .inherits = &ata_bmdma_port_ops,
1da177e4
LT
410 .scr_read = nv_scr_read,
411 .scr_write = nv_scr_write,
1da177e4
LT
412};
413
4c1eb90a
TH
414/* OSDL bz11195 reports that link doesn't come online after hardreset
415 * on generic nv's and there have been several other similar reports
416 * on linux-ide. Disable hardreset for generic nv's.
417 */
418static struct ata_port_operations nv_generic_ops = {
419 .inherits = &nv_common_ops,
420 .hardreset = ATA_OP_NULL,
421};
422
3c324283
TH
423/* OSDL bz3352 reports that nf2/3 controllers can't determine device
424 * signature reliably. Also, the following thread reports detection
425 * failure on cold boot with the standard debouncing timing.
426 *
427 * http://thread.gmane.org/gmane.linux.ide/34098
428 *
429 * Debounce with hotplug timing and request follow-up SRST.
430 */
029cfd6b 431static struct ata_port_operations nv_nf2_ops = {
4c1eb90a 432 .inherits = &nv_common_ops,
39f87582
TH
433 .freeze = nv_nf2_freeze,
434 .thaw = nv_nf2_thaw,
3c324283 435 .hardreset = nv_nf2_hardreset,
ada364e8
TH
436};
437
3c324283 438/* CK804 finally gets hardreset right */
029cfd6b 439static struct ata_port_operations nv_ck804_ops = {
4c1eb90a 440 .inherits = &nv_common_ops,
39f87582
TH
441 .freeze = nv_ck804_freeze,
442 .thaw = nv_ck804_thaw,
ada364e8
TH
443 .host_stop = nv_ck804_host_stop,
444};
445
029cfd6b 446static struct ata_port_operations nv_adma_ops = {
3c324283 447 .inherits = &nv_ck804_ops,
029cfd6b 448
2dec7555 449 .check_atapi_dma = nv_adma_check_atapi_dma,
5682ed33 450 .sff_tf_read = nv_adma_tf_read,
31cc23b3 451 .qc_defer = ata_std_qc_defer,
fbbb262d
RH
452 .qc_prep = nv_adma_qc_prep,
453 .qc_issue = nv_adma_qc_issue,
5682ed33 454 .sff_irq_clear = nv_adma_irq_clear,
029cfd6b 455
53014e25
RH
456 .freeze = nv_adma_freeze,
457 .thaw = nv_adma_thaw,
fbbb262d 458 .error_handler = nv_adma_error_handler,
f5ecac2d 459 .post_internal_cmd = nv_adma_post_internal_cmd,
029cfd6b 460
fbbb262d
RH
461 .port_start = nv_adma_port_start,
462 .port_stop = nv_adma_port_stop,
438ac6d5 463#ifdef CONFIG_PM
cdf56bcf
RH
464 .port_suspend = nv_adma_port_suspend,
465 .port_resume = nv_adma_port_resume,
438ac6d5 466#endif
fbbb262d
RH
467 .host_stop = nv_adma_host_stop,
468};
469
029cfd6b 470static struct ata_port_operations nv_swncq_ops = {
3c324283 471 .inherits = &nv_generic_ops,
029cfd6b 472
f140f0f1
KL
473 .qc_defer = ata_std_qc_defer,
474 .qc_prep = nv_swncq_qc_prep,
475 .qc_issue = nv_swncq_qc_issue,
029cfd6b 476
f140f0f1
KL
477 .freeze = nv_mcp55_freeze,
478 .thaw = nv_mcp55_thaw,
479 .error_handler = nv_swncq_error_handler,
029cfd6b 480
f140f0f1
KL
481#ifdef CONFIG_PM
482 .port_suspend = nv_swncq_port_suspend,
483 .port_resume = nv_swncq_port_resume,
484#endif
485 .port_start = nv_swncq_port_start,
486};
487
95947193
TH
488struct nv_pi_priv {
489 irq_handler_t irq_handler;
490 struct scsi_host_template *sht;
491};
492
493#define NV_PI_PRIV(_irq_handler, _sht) \
494 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
495
1626aeb8 496static const struct ata_port_info nv_port_info[] = {
ada364e8
TH
497 /* generic */
498 {
0c88758b 499 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
500 .pio_mask = NV_PIO_MASK,
501 .mwdma_mask = NV_MWDMA_MASK,
502 .udma_mask = NV_UDMA_MASK,
503 .port_ops = &nv_generic_ops,
95947193 504 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
ada364e8
TH
505 },
506 /* nforce2/3 */
507 {
0c88758b 508 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
509 .pio_mask = NV_PIO_MASK,
510 .mwdma_mask = NV_MWDMA_MASK,
511 .udma_mask = NV_UDMA_MASK,
512 .port_ops = &nv_nf2_ops,
95947193 513 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
ada364e8
TH
514 },
515 /* ck804 */
516 {
0c88758b 517 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
ada364e8
TH
518 .pio_mask = NV_PIO_MASK,
519 .mwdma_mask = NV_MWDMA_MASK,
520 .udma_mask = NV_UDMA_MASK,
521 .port_ops = &nv_ck804_ops,
95947193 522 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
ada364e8 523 },
fbbb262d
RH
524 /* ADMA */
525 {
fbbb262d
RH
526 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
527 ATA_FLAG_MMIO | ATA_FLAG_NCQ,
528 .pio_mask = NV_PIO_MASK,
529 .mwdma_mask = NV_MWDMA_MASK,
530 .udma_mask = NV_UDMA_MASK,
531 .port_ops = &nv_adma_ops,
95947193 532 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
fbbb262d 533 },
f140f0f1
KL
534 /* SWNCQ */
535 {
f140f0f1
KL
536 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
537 ATA_FLAG_NCQ,
f140f0f1
KL
538 .pio_mask = NV_PIO_MASK,
539 .mwdma_mask = NV_MWDMA_MASK,
540 .udma_mask = NV_UDMA_MASK,
541 .port_ops = &nv_swncq_ops,
95947193 542 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
f140f0f1 543 },
1da177e4
LT
544};
545
546MODULE_AUTHOR("NVIDIA");
547MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
548MODULE_LICENSE("GPL");
549MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
550MODULE_VERSION(DRV_VERSION);
551
06993d22 552static int adma_enabled;
d21279f4 553static int swncq_enabled = 1;
fbbb262d 554
2dec7555
RH
555static void nv_adma_register_mode(struct ata_port *ap)
556{
2dec7555 557 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 558 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
559 u16 tmp, status;
560 int count = 0;
2dec7555
RH
561
562 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
563 return;
564
a2cfe81a 565 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 566 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
a2cfe81a
RH
567 ndelay(50);
568 status = readw(mmio + NV_ADMA_STAT);
569 count++;
570 }
2dcb407e 571 if (count == 20)
a2cfe81a
RH
572 ata_port_printk(ap, KERN_WARNING,
573 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
574 status);
575
2dec7555
RH
576 tmp = readw(mmio + NV_ADMA_CTL);
577 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
578
a2cfe81a
RH
579 count = 0;
580 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 581 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
a2cfe81a
RH
582 ndelay(50);
583 status = readw(mmio + NV_ADMA_STAT);
584 count++;
585 }
2dcb407e 586 if (count == 20)
a2cfe81a
RH
587 ata_port_printk(ap, KERN_WARNING,
588 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
589 status);
590
2dec7555
RH
591 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
592}
593
594static void nv_adma_mode(struct ata_port *ap)
595{
2dec7555 596 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 597 void __iomem *mmio = pp->ctl_block;
a2cfe81a
RH
598 u16 tmp, status;
599 int count = 0;
2dec7555
RH
600
601 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
602 return;
f20b16ff 603
2dec7555
RH
604 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
605
606 tmp = readw(mmio + NV_ADMA_CTL);
607 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
608
a2cfe81a 609 status = readw(mmio + NV_ADMA_STAT);
2dcb407e 610 while (((status & NV_ADMA_STAT_LEGACY) ||
a2cfe81a
RH
611 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
612 ndelay(50);
613 status = readw(mmio + NV_ADMA_STAT);
614 count++;
615 }
2dcb407e 616 if (count == 20)
a2cfe81a
RH
617 ata_port_printk(ap, KERN_WARNING,
618 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
619 status);
620
2dec7555
RH
621 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
622}
623
fbbb262d
RH
624static int nv_adma_slave_config(struct scsi_device *sdev)
625{
626 struct ata_port *ap = ata_shost_to_port(sdev->host);
2dec7555 627 struct nv_adma_port_priv *pp = ap->private_data;
8959d300
RH
628 struct nv_adma_port_priv *port0, *port1;
629 struct scsi_device *sdev0, *sdev1;
2dec7555 630 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
8959d300 631 unsigned long segment_boundary, flags;
fbbb262d
RH
632 unsigned short sg_tablesize;
633 int rc;
2dec7555
RH
634 int adma_enable;
635 u32 current_reg, new_reg, config_mask;
fbbb262d
RH
636
637 rc = ata_scsi_slave_config(sdev);
638
639 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
640 /* Not a proper libata device, ignore */
641 return rc;
642
8959d300
RH
643 spin_lock_irqsave(ap->lock, flags);
644
9af5c9c9 645 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
fbbb262d
RH
646 /*
647 * NVIDIA reports that ADMA mode does not support ATAPI commands.
648 * Therefore ATAPI commands are sent through the legacy interface.
649 * However, the legacy interface only supports 32-bit DMA.
650 * Restrict DMA parameters as required by the legacy interface
651 * when an ATAPI device is connected.
652 */
fbbb262d
RH
653 segment_boundary = ATA_DMA_BOUNDARY;
654 /* Subtract 1 since an extra entry may be needed for padding, see
655 libata-scsi.c */
656 sg_tablesize = LIBATA_MAX_PRD - 1;
f20b16ff 657
2dec7555
RH
658 /* Since the legacy DMA engine is in use, we need to disable ADMA
659 on the port. */
660 adma_enable = 0;
661 nv_adma_register_mode(ap);
2dcb407e 662 } else {
fbbb262d
RH
663 segment_boundary = NV_ADMA_DMA_BOUNDARY;
664 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
2dec7555 665 adma_enable = 1;
fbbb262d 666 }
f20b16ff 667
2dec7555
RH
668 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
669
2dcb407e 670 if (ap->port_no == 1)
2dec7555
RH
671 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
672 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
673 else
674 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
675 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
f20b16ff 676
2dcb407e 677 if (adma_enable) {
2dec7555
RH
678 new_reg = current_reg | config_mask;
679 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
2dcb407e 680 } else {
2dec7555
RH
681 new_reg = current_reg & ~config_mask;
682 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
683 }
f20b16ff 684
2dcb407e 685 if (current_reg != new_reg)
2dec7555 686 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
f20b16ff 687
8959d300
RH
688 port0 = ap->host->ports[0]->private_data;
689 port1 = ap->host->ports[1]->private_data;
690 sdev0 = ap->host->ports[0]->link.device[0].sdev;
691 sdev1 = ap->host->ports[1]->link.device[0].sdev;
692 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
693 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
694 /** We have to set the DMA mask to 32-bit if either port is in
695 ATAPI mode, since they are on the same PCI device which is
696 used for DMA mapping. If we set the mask we also need to set
697 the bounce limit on both ports to ensure that the block
698 layer doesn't feed addresses that cause DMA mapping to
699 choke. If either SCSI device is not allocated yet, it's OK
700 since that port will discover its correct setting when it
701 does get allocated.
702 Note: Setting 32-bit mask should not fail. */
703 if (sdev0)
704 blk_queue_bounce_limit(sdev0->request_queue,
705 ATA_DMA_MASK);
706 if (sdev1)
707 blk_queue_bounce_limit(sdev1->request_queue,
708 ATA_DMA_MASK);
709
710 pci_set_dma_mask(pdev, ATA_DMA_MASK);
711 } else {
712 /** This shouldn't fail as it was set to this value before */
713 pci_set_dma_mask(pdev, pp->adma_dma_mask);
714 if (sdev0)
715 blk_queue_bounce_limit(sdev0->request_queue,
716 pp->adma_dma_mask);
717 if (sdev1)
718 blk_queue_bounce_limit(sdev1->request_queue,
719 pp->adma_dma_mask);
720 }
721
fbbb262d
RH
722 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
723 blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
724 ata_port_printk(ap, KERN_INFO,
8959d300
RH
725 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
726 (unsigned long long)*ap->host->dev->dma_mask,
727 segment_boundary, sg_tablesize);
728
729 spin_unlock_irqrestore(ap->lock, flags);
730
fbbb262d
RH
731 return rc;
732}
733
2dec7555
RH
734static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
735{
736 struct nv_adma_port_priv *pp = qc->ap->private_data;
737 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
738}
739
f2fb344b
RH
740static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
741{
3f3debdb
RH
742 /* Other than when internal or pass-through commands are executed,
743 the only time this function will be called in ADMA mode will be
744 if a command fails. In the failure case we don't care about going
745 into register mode with ADMA commands pending, as the commands will
746 all shortly be aborted anyway. We assume that NCQ commands are not
747 issued via passthrough, which is the only way that switching into
748 ADMA mode could abort outstanding commands. */
f2fb344b
RH
749 nv_adma_register_mode(ap);
750
9363c382 751 ata_sff_tf_read(ap, tf);
f2fb344b
RH
752}
753
2dec7555 754static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
fbbb262d
RH
755{
756 unsigned int idx = 0;
757
2dcb407e 758 if (tf->flags & ATA_TFLAG_ISADDR) {
ac3d6b86
RH
759 if (tf->flags & ATA_TFLAG_LBA48) {
760 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
761 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
762 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
763 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
764 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
765 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
766 } else
767 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
a84471fe 768
ac3d6b86
RH
769 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
770 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
771 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
772 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
fbbb262d 773 }
a84471fe 774
2dcb407e 775 if (tf->flags & ATA_TFLAG_DEVICE)
ac3d6b86 776 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
fbbb262d
RH
777
778 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
a84471fe 779
2dcb407e 780 while (idx < 12)
ac3d6b86 781 cpb[idx++] = cpu_to_le16(IGN);
fbbb262d
RH
782
783 return idx;
784}
785
5bd28a4b 786static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
fbbb262d
RH
787{
788 struct nv_adma_port_priv *pp = ap->private_data;
2dec7555 789 u8 flags = pp->cpb[cpb_num].resp_flags;
fbbb262d
RH
790
791 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
792
5bd28a4b
RH
793 if (unlikely((force_err ||
794 flags & (NV_CPB_RESP_ATA_ERR |
795 NV_CPB_RESP_CMD_ERR |
796 NV_CPB_RESP_CPB_ERR)))) {
9af5c9c9 797 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
798 int freeze = 0;
799
800 ata_ehi_clear_desc(ehi);
2dcb407e 801 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
5bd28a4b 802 if (flags & NV_CPB_RESP_ATA_ERR) {
b64bbc39 803 ata_ehi_push_desc(ehi, "ATA error");
5bd28a4b
RH
804 ehi->err_mask |= AC_ERR_DEV;
805 } else if (flags & NV_CPB_RESP_CMD_ERR) {
b64bbc39 806 ata_ehi_push_desc(ehi, "CMD error");
5bd28a4b
RH
807 ehi->err_mask |= AC_ERR_DEV;
808 } else if (flags & NV_CPB_RESP_CPB_ERR) {
b64bbc39 809 ata_ehi_push_desc(ehi, "CPB error");
5bd28a4b
RH
810 ehi->err_mask |= AC_ERR_SYSTEM;
811 freeze = 1;
812 } else {
813 /* notifier error, but no error in CPB flags? */
b64bbc39 814 ata_ehi_push_desc(ehi, "unknown");
5bd28a4b
RH
815 ehi->err_mask |= AC_ERR_OTHER;
816 freeze = 1;
817 }
818 /* Kill all commands. EH will determine what actually failed. */
819 if (freeze)
820 ata_port_freeze(ap);
821 else
822 ata_port_abort(ap);
823 return 1;
fbbb262d 824 }
5bd28a4b 825
f2fb344b 826 if (likely(flags & NV_CPB_RESP_DONE)) {
fbbb262d 827 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
5bd28a4b
RH
828 VPRINTK("CPB flags done, flags=0x%x\n", flags);
829 if (likely(qc)) {
2dcb407e 830 DPRINTK("Completing qc from tag %d\n", cpb_num);
fbbb262d 831 ata_qc_complete(qc);
2a54cf76 832 } else {
9af5c9c9 833 struct ata_eh_info *ehi = &ap->link.eh_info;
2a54cf76
RH
834 /* Notifier bits set without a command may indicate the drive
835 is misbehaving. Raise host state machine violation on this
836 condition. */
5796d1c4
JG
837 ata_port_printk(ap, KERN_ERR,
838 "notifier for tag %d with no cmd?\n",
839 cpb_num);
2a54cf76 840 ehi->err_mask |= AC_ERR_HSM;
cf480626 841 ehi->action |= ATA_EH_RESET;
2a54cf76
RH
842 ata_port_freeze(ap);
843 return 1;
fbbb262d
RH
844 }
845 }
5bd28a4b 846 return 0;
fbbb262d
RH
847}
848
2dec7555
RH
849static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
850{
9af5c9c9 851 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2dec7555
RH
852
853 /* freeze if hotplugged */
854 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
855 ata_port_freeze(ap);
856 return 1;
857 }
858
859 /* bail out if not our interrupt */
860 if (!(irq_stat & NV_INT_DEV))
861 return 0;
862
863 /* DEV interrupt w/ no active qc? */
864 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
9363c382 865 ata_sff_check_status(ap);
2dec7555
RH
866 return 1;
867 }
868
869 /* handle interrupt */
9363c382 870 return ata_sff_host_intr(ap, qc);
2dec7555
RH
871}
872
fbbb262d
RH
873static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
874{
875 struct ata_host *host = dev_instance;
876 int i, handled = 0;
2dec7555 877 u32 notifier_clears[2];
fbbb262d
RH
878
879 spin_lock(&host->lock);
880
881 for (i = 0; i < host->n_ports; i++) {
882 struct ata_port *ap = host->ports[i];
2dec7555 883 notifier_clears[i] = 0;
fbbb262d
RH
884
885 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
886 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 887 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
888 u16 status;
889 u32 gen_ctl;
fbbb262d 890 u32 notifier, notifier_error;
a617c09f 891
53014e25
RH
892 /* if ADMA is disabled, use standard ata interrupt handler */
893 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
894 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
895 >> (NV_INT_PORT_SHIFT * i);
896 handled += nv_host_intr(ap, irq_stat);
897 continue;
898 }
fbbb262d 899
53014e25 900 /* if in ATA register mode, check for standard interrupts */
fbbb262d 901 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
0d5ff566 902 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
2dec7555 903 >> (NV_INT_PORT_SHIFT * i);
2dcb407e 904 if (ata_tag_valid(ap->link.active_tag))
f740d168
RH
905 /** NV_INT_DEV indication seems unreliable at times
906 at least in ADMA mode. Force it on always when a
907 command is active, to prevent losing interrupts. */
908 irq_stat |= NV_INT_DEV;
2dec7555 909 handled += nv_host_intr(ap, irq_stat);
fbbb262d
RH
910 }
911
912 notifier = readl(mmio + NV_ADMA_NOTIFIER);
913 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
2dec7555 914 notifier_clears[i] = notifier | notifier_error;
fbbb262d 915
cdf56bcf 916 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
fbbb262d 917
2dcb407e 918 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
fbbb262d
RH
919 !notifier_error)
920 /* Nothing to do */
921 continue;
922
923 status = readw(mmio + NV_ADMA_STAT);
924
925 /* Clear status. Ensure the controller sees the clearing before we start
926 looking at any of the CPB statuses, so that any CPB completions after
927 this point in the handler will raise another interrupt. */
928 writew(status, mmio + NV_ADMA_STAT);
929 readw(mmio + NV_ADMA_STAT); /* flush posted write */
930 rmb();
931
5bd28a4b
RH
932 handled++; /* irq handled if we got here */
933
934 /* freeze if hotplugged or controller error */
935 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
936 NV_ADMA_STAT_HOTUNPLUG |
5278b50c
RH
937 NV_ADMA_STAT_TIMEOUT |
938 NV_ADMA_STAT_SERROR))) {
9af5c9c9 939 struct ata_eh_info *ehi = &ap->link.eh_info;
5bd28a4b
RH
940
941 ata_ehi_clear_desc(ehi);
2dcb407e 942 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
5bd28a4b
RH
943 if (status & NV_ADMA_STAT_TIMEOUT) {
944 ehi->err_mask |= AC_ERR_SYSTEM;
b64bbc39 945 ata_ehi_push_desc(ehi, "timeout");
5bd28a4b
RH
946 } else if (status & NV_ADMA_STAT_HOTPLUG) {
947 ata_ehi_hotplugged(ehi);
b64bbc39 948 ata_ehi_push_desc(ehi, "hotplug");
5bd28a4b
RH
949 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
950 ata_ehi_hotplugged(ehi);
b64bbc39 951 ata_ehi_push_desc(ehi, "hot unplug");
5278b50c
RH
952 } else if (status & NV_ADMA_STAT_SERROR) {
953 /* let libata analyze SError and figure out the cause */
b64bbc39
TH
954 ata_ehi_push_desc(ehi, "SError");
955 } else
956 ata_ehi_push_desc(ehi, "unknown");
fbbb262d 957 ata_port_freeze(ap);
fbbb262d
RH
958 continue;
959 }
960
5bd28a4b 961 if (status & (NV_ADMA_STAT_DONE |
a1fe7824
RH
962 NV_ADMA_STAT_CPBERR |
963 NV_ADMA_STAT_CMD_COMPLETE)) {
964 u32 check_commands = notifier_clears[i];
721449bf 965 int pos, error = 0;
8ba5e4cb 966
a1fe7824
RH
967 if (status & NV_ADMA_STAT_CPBERR) {
968 /* Check all active commands */
969 if (ata_tag_valid(ap->link.active_tag))
970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->
974 link.sactive;
975 }
8ba5e4cb 976
fbbb262d 977 /** Check CPBs for completed commands */
721449bf
RH
978 while ((pos = ffs(check_commands)) && !error) {
979 pos--;
980 error = nv_adma_check_cpb(ap, pos,
5796d1c4
JG
981 notifier_error & (1 << pos));
982 check_commands &= ~(1 << pos);
fbbb262d
RH
983 }
984 }
fbbb262d
RH
985 }
986 }
f20b16ff 987
b447916e 988 if (notifier_clears[0] || notifier_clears[1]) {
2dec7555
RH
989 /* Note: Both notifier clear registers must be written
990 if either is set, even if one is zero, according to NVIDIA. */
cdf56bcf
RH
991 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
992 writel(notifier_clears[0], pp->notifier_clear_block);
993 pp = host->ports[1]->private_data;
994 writel(notifier_clears[1], pp->notifier_clear_block);
2dec7555 995 }
fbbb262d
RH
996
997 spin_unlock(&host->lock);
998
999 return IRQ_RETVAL(handled);
1000}
1001
53014e25
RH
1002static void nv_adma_freeze(struct ata_port *ap)
1003{
1004 struct nv_adma_port_priv *pp = ap->private_data;
1005 void __iomem *mmio = pp->ctl_block;
1006 u16 tmp;
1007
1008 nv_ck804_freeze(ap);
1009
1010 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1011 return;
1012
1013 /* clear any outstanding CK804 notifications */
2dcb407e 1014 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25
RH
1015 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1016
1017 /* Disable interrupt */
1018 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1019 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1020 mmio + NV_ADMA_CTL);
5796d1c4 1021 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1022}
1023
1024static void nv_adma_thaw(struct ata_port *ap)
1025{
1026 struct nv_adma_port_priv *pp = ap->private_data;
1027 void __iomem *mmio = pp->ctl_block;
1028 u16 tmp;
1029
1030 nv_ck804_thaw(ap);
1031
1032 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1033 return;
1034
1035 /* Enable interrupt */
1036 tmp = readw(mmio + NV_ADMA_CTL);
2dcb407e 1037 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
53014e25 1038 mmio + NV_ADMA_CTL);
5796d1c4 1039 readw(mmio + NV_ADMA_CTL); /* flush posted write */
53014e25
RH
1040}
1041
fbbb262d
RH
1042static void nv_adma_irq_clear(struct ata_port *ap)
1043{
cdf56bcf
RH
1044 struct nv_adma_port_priv *pp = ap->private_data;
1045 void __iomem *mmio = pp->ctl_block;
53014e25 1046 u32 notifier_clears[2];
fbbb262d 1047
53014e25 1048 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
9363c382 1049 ata_sff_irq_clear(ap);
53014e25
RH
1050 return;
1051 }
1052
1053 /* clear any outstanding CK804 notifications */
2dcb407e 1054 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
53014e25 1055 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
fbbb262d 1056
53014e25
RH
1057 /* clear ADMA status */
1058 writew(0xffff, mmio + NV_ADMA_STAT);
a617c09f 1059
53014e25
RH
1060 /* clear notifiers - note both ports need to be written with
1061 something even though we are only clearing on one */
1062 if (ap->port_no == 0) {
1063 notifier_clears[0] = 0xFFFFFFFF;
1064 notifier_clears[1] = 0;
1065 } else {
1066 notifier_clears[0] = 0;
1067 notifier_clears[1] = 0xFFFFFFFF;
1068 }
1069 pp = ap->host->ports[0]->private_data;
1070 writel(notifier_clears[0], pp->notifier_clear_block);
1071 pp = ap->host->ports[1]->private_data;
1072 writel(notifier_clears[1], pp->notifier_clear_block);
fbbb262d
RH
1073}
1074
f5ecac2d 1075static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
fbbb262d 1076{
f5ecac2d 1077 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d 1078
b447916e 1079 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
9363c382 1080 ata_sff_post_internal_cmd(qc);
fbbb262d
RH
1081}
1082
1083static int nv_adma_port_start(struct ata_port *ap)
1084{
1085 struct device *dev = ap->host->dev;
1086 struct nv_adma_port_priv *pp;
1087 int rc;
1088 void *mem;
1089 dma_addr_t mem_dma;
cdf56bcf 1090 void __iomem *mmio;
8959d300 1091 struct pci_dev *pdev = to_pci_dev(dev);
fbbb262d
RH
1092 u16 tmp;
1093
1094 VPRINTK("ENTER\n");
1095
8959d300
RH
1096 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1097 pad buffers */
1098 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1099 if (rc)
1100 return rc;
1101 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1102 if (rc)
1103 return rc;
1104
fbbb262d
RH
1105 rc = ata_port_start(ap);
1106 if (rc)
1107 return rc;
1108
24dc5f33
TH
1109 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1110 if (!pp)
1111 return -ENOMEM;
fbbb262d 1112
0d5ff566 1113 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
cdf56bcf
RH
1114 ap->port_no * NV_ADMA_PORT_SIZE;
1115 pp->ctl_block = mmio;
0d5ff566 1116 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
cdf56bcf
RH
1117 pp->notifier_clear_block = pp->gen_block +
1118 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1119
8959d300
RH
1120 /* Now that the legacy PRD and padding buffer are allocated we can
1121 safely raise the DMA mask to allocate the CPB/APRD table.
1122 These are allowed to fail since we store the value that ends up
1123 being used to set as the bounce limit in slave_config later if
1124 needed. */
1125 pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1126 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1127 pp->adma_dma_mask = *dev->dma_mask;
1128
24dc5f33
TH
1129 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1130 &mem_dma, GFP_KERNEL);
1131 if (!mem)
1132 return -ENOMEM;
fbbb262d
RH
1133 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1134
1135 /*
1136 * First item in chunk of DMA memory:
1137 * 128-byte command parameter block (CPB)
1138 * one for each command tag
1139 */
1140 pp->cpb = mem;
1141 pp->cpb_dma = mem_dma;
1142
1143 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1144 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
fbbb262d
RH
1145
1146 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1147 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148
1149 /*
1150 * Second item: block of ADMA_SGTBL_LEN s/g entries
1151 */
1152 pp->aprd = mem;
1153 pp->aprd_dma = mem_dma;
1154
1155 ap->private_data = pp;
1156
1157 /* clear any outstanding interrupt conditions */
1158 writew(0xffff, mmio + NV_ADMA_STAT);
1159
1160 /* initialize port variables */
1161 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1162
1163 /* clear CPB fetch count */
1164 writew(0, mmio + NV_ADMA_CPB_COUNT);
1165
cdf56bcf 1166 /* clear GO for register mode, enable interrupt */
fbbb262d 1167 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1168 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1169 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
fbbb262d
RH
1170
1171 tmp = readw(mmio + NV_ADMA_CTL);
1172 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1173 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1174 udelay(1);
1175 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1176 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1177
1178 return 0;
fbbb262d
RH
1179}
1180
1181static void nv_adma_port_stop(struct ata_port *ap)
1182{
fbbb262d 1183 struct nv_adma_port_priv *pp = ap->private_data;
cdf56bcf 1184 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1185
1186 VPRINTK("ENTER\n");
fbbb262d 1187 writew(0, mmio + NV_ADMA_CTL);
fbbb262d
RH
1188}
1189
438ac6d5 1190#ifdef CONFIG_PM
cdf56bcf
RH
1191static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1192{
1193 struct nv_adma_port_priv *pp = ap->private_data;
1194 void __iomem *mmio = pp->ctl_block;
1195
1196 /* Go to register mode - clears GO */
1197 nv_adma_register_mode(ap);
1198
1199 /* clear CPB fetch count */
1200 writew(0, mmio + NV_ADMA_CPB_COUNT);
1201
1202 /* disable interrupt, shut down port */
1203 writew(0, mmio + NV_ADMA_CTL);
1204
1205 return 0;
1206}
1207
1208static int nv_adma_port_resume(struct ata_port *ap)
1209{
1210 struct nv_adma_port_priv *pp = ap->private_data;
1211 void __iomem *mmio = pp->ctl_block;
1212 u16 tmp;
1213
1214 /* set CPB block location */
1215 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
5796d1c4 1216 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
cdf56bcf
RH
1217
1218 /* clear any outstanding interrupt conditions */
1219 writew(0xffff, mmio + NV_ADMA_STAT);
1220
1221 /* initialize port variables */
1222 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1223
1224 /* clear CPB fetch count */
1225 writew(0, mmio + NV_ADMA_CPB_COUNT);
1226
1227 /* clear GO for register mode, enable interrupt */
1228 tmp = readw(mmio + NV_ADMA_CTL);
5796d1c4
JG
1229 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1230 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
cdf56bcf
RH
1231
1232 tmp = readw(mmio + NV_ADMA_CTL);
1233 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1234 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1235 udelay(1);
1236 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
5796d1c4 1237 readw(mmio + NV_ADMA_CTL); /* flush posted write */
cdf56bcf
RH
1238
1239 return 0;
1240}
438ac6d5 1241#endif
fbbb262d 1242
9a829ccf 1243static void nv_adma_setup_port(struct ata_port *ap)
fbbb262d 1244{
9a829ccf
TH
1245 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1246 struct ata_ioports *ioport = &ap->ioaddr;
fbbb262d
RH
1247
1248 VPRINTK("ENTER\n");
1249
9a829ccf 1250 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
fbbb262d 1251
0d5ff566
TH
1252 ioport->cmd_addr = mmio;
1253 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
fbbb262d 1254 ioport->error_addr =
0d5ff566
TH
1255 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1256 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1257 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1258 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1259 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1260 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
fbbb262d 1261 ioport->status_addr =
0d5ff566 1262 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
fbbb262d 1263 ioport->altstatus_addr =
0d5ff566 1264 ioport->ctl_addr = mmio + 0x20;
fbbb262d
RH
1265}
1266
9a829ccf 1267static int nv_adma_host_init(struct ata_host *host)
fbbb262d 1268{
9a829ccf 1269 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
1270 unsigned int i;
1271 u32 tmp32;
1272
1273 VPRINTK("ENTER\n");
1274
1275 /* enable ADMA on the ports */
1276 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1277 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1278 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1279 NV_MCP_SATA_CFG_20_PORT1_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1281
1282 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1283
9a829ccf
TH
1284 for (i = 0; i < host->n_ports; i++)
1285 nv_adma_setup_port(host->ports[i]);
fbbb262d 1286
fbbb262d
RH
1287 return 0;
1288}
1289
1290static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1291 struct scatterlist *sg,
1292 int idx,
1293 struct nv_adma_prd *aprd)
1294{
41949ed5 1295 u8 flags = 0;
fbbb262d
RH
1296 if (qc->tf.flags & ATA_TFLAG_WRITE)
1297 flags |= NV_APRD_WRITE;
1298 if (idx == qc->n_elem - 1)
1299 flags |= NV_APRD_END;
1300 else if (idx != 4)
1301 flags |= NV_APRD_CONT;
1302
1303 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1304 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
2dec7555 1305 aprd->flags = flags;
41949ed5 1306 aprd->packet_len = 0;
fbbb262d
RH
1307}
1308
1309static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1310{
1311 struct nv_adma_port_priv *pp = qc->ap->private_data;
fbbb262d
RH
1312 struct nv_adma_prd *aprd;
1313 struct scatterlist *sg;
ff2aeb1e 1314 unsigned int si;
fbbb262d
RH
1315
1316 VPRINTK("ENTER\n");
1317
ff2aeb1e
TH
1318 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1319 aprd = (si < 5) ? &cpb->aprd[si] :
1320 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1321 nv_adma_fill_aprd(qc, sg, si, aprd);
fbbb262d 1322 }
ff2aeb1e 1323 if (si > 5)
fbbb262d 1324 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
41949ed5
RH
1325 else
1326 cpb->next_aprd = cpu_to_le64(0);
fbbb262d
RH
1327}
1328
382a6652
RH
1329static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1330{
1331 struct nv_adma_port_priv *pp = qc->ap->private_data;
1332
1333 /* ADMA engine can only be used for non-ATAPI DMA commands,
3f3debdb 1334 or interrupt-driven no-data commands. */
b447916e 1335 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
3f3debdb 1336 (qc->tf.flags & ATA_TFLAG_POLLING))
382a6652
RH
1337 return 1;
1338
b447916e 1339 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
382a6652
RH
1340 (qc->tf.protocol == ATA_PROT_NODATA))
1341 return 0;
1342
1343 return 1;
1344}
1345
fbbb262d
RH
1346static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1347{
1348 struct nv_adma_port_priv *pp = qc->ap->private_data;
1349 struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1350 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
fbbb262d
RH
1351 NV_CPB_CTL_IEN;
1352
382a6652 1353 if (nv_adma_use_reg_mode(qc)) {
3f3debdb
RH
1354 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1355 (qc->flags & ATA_QCFLAG_DMAMAP));
2dec7555 1356 nv_adma_register_mode(qc->ap);
9363c382 1357 ata_sff_qc_prep(qc);
fbbb262d
RH
1358 return;
1359 }
1360
41949ed5
RH
1361 cpb->resp_flags = NV_CPB_RESP_DONE;
1362 wmb();
1363 cpb->ctl_flags = 0;
1364 wmb();
fbbb262d
RH
1365
1366 cpb->len = 3;
1367 cpb->tag = qc->tag;
1368 cpb->next_cpb_idx = 0;
1369
1370 /* turn on NCQ flags for NCQ commands */
1371 if (qc->tf.protocol == ATA_PROT_NCQ)
1372 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1373
cdf56bcf
RH
1374 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1375
fbbb262d
RH
1376 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1377
b447916e 1378 if (qc->flags & ATA_QCFLAG_DMAMAP) {
382a6652
RH
1379 nv_adma_fill_sg(qc, cpb);
1380 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1381 } else
1382 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
fbbb262d 1383
5796d1c4
JG
1384 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1385 until we are finished filling in all of the contents */
fbbb262d
RH
1386 wmb();
1387 cpb->ctl_flags = ctl_flags;
41949ed5
RH
1388 wmb();
1389 cpb->resp_flags = 0;
fbbb262d
RH
1390}
1391
1392static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1393{
2dec7555 1394 struct nv_adma_port_priv *pp = qc->ap->private_data;
cdf56bcf 1395 void __iomem *mmio = pp->ctl_block;
5e5c74a5 1396 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
fbbb262d
RH
1397
1398 VPRINTK("ENTER\n");
1399
3f3debdb
RH
1400 /* We can't handle result taskfile with NCQ commands, since
1401 retrieving the taskfile switches us out of ADMA mode and would abort
1402 existing commands. */
1403 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1404 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1405 ata_dev_printk(qc->dev, KERN_ERR,
1406 "NCQ w/ RESULT_TF not allowed\n");
1407 return AC_ERR_SYSTEM;
1408 }
1409
382a6652 1410 if (nv_adma_use_reg_mode(qc)) {
fbbb262d 1411 /* use ATA register mode */
382a6652 1412 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
3f3debdb
RH
1413 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1414 (qc->flags & ATA_QCFLAG_DMAMAP));
fbbb262d 1415 nv_adma_register_mode(qc->ap);
9363c382 1416 return ata_sff_qc_issue(qc);
fbbb262d
RH
1417 } else
1418 nv_adma_mode(qc->ap);
1419
1420 /* write append register, command tag in lower 8 bits
1421 and (number of cpbs to append -1) in top 8 bits */
1422 wmb();
5e5c74a5 1423
b447916e 1424 if (curr_ncq != pp->last_issue_ncq) {
5796d1c4
JG
1425 /* Seems to need some delay before switching between NCQ and
1426 non-NCQ commands, else we get command timeouts and such. */
5e5c74a5
RH
1427 udelay(20);
1428 pp->last_issue_ncq = curr_ncq;
1429 }
1430
fbbb262d
RH
1431 writew(qc->tag, mmio + NV_ADMA_APPEND);
1432
5796d1c4 1433 DPRINTK("Issued tag %u\n", qc->tag);
fbbb262d
RH
1434
1435 return 0;
1436}
1437
7d12e780 1438static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1da177e4 1439{
cca3974e 1440 struct ata_host *host = dev_instance;
1da177e4
LT
1441 unsigned int i;
1442 unsigned int handled = 0;
1443 unsigned long flags;
1444
cca3974e 1445 spin_lock_irqsave(&host->lock, flags);
1da177e4 1446
cca3974e 1447 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
1448 struct ata_port *ap;
1449
cca3974e 1450 ap = host->ports[i];
c1389503 1451 if (ap &&
029f5468 1452 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
1453 struct ata_queued_cmd *qc;
1454
9af5c9c9 1455 qc = ata_qc_from_tag(ap, ap->link.active_tag);
e50362ec 1456 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
9363c382 1457 handled += ata_sff_host_intr(ap, qc);
b887030a
AC
1458 else
1459 // No request pending? Clear interrupt status
1460 // anyway, in case there's one pending.
5682ed33 1461 ap->ops->sff_check_status(ap);
1da177e4
LT
1462 }
1463
1464 }
1465
cca3974e 1466 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
1467
1468 return IRQ_RETVAL(handled);
1469}
1470
cca3974e 1471static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
ada364e8
TH
1472{
1473 int i, handled = 0;
1474
cca3974e
JG
1475 for (i = 0; i < host->n_ports; i++) {
1476 struct ata_port *ap = host->ports[i];
ada364e8
TH
1477
1478 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1479 handled += nv_host_intr(ap, irq_stat);
1480
1481 irq_stat >>= NV_INT_PORT_SHIFT;
1482 }
1483
1484 return IRQ_RETVAL(handled);
1485}
1486
7d12e780 1487static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
ada364e8 1488{
cca3974e 1489 struct ata_host *host = dev_instance;
ada364e8
TH
1490 u8 irq_stat;
1491 irqreturn_t ret;
1492
cca3974e 1493 spin_lock(&host->lock);
0d5ff566 1494 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
cca3974e
JG
1495 ret = nv_do_interrupt(host, irq_stat);
1496 spin_unlock(&host->lock);
ada364e8
TH
1497
1498 return ret;
1499}
1500
7d12e780 1501static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
ada364e8 1502{
cca3974e 1503 struct ata_host *host = dev_instance;
ada364e8
TH
1504 u8 irq_stat;
1505 irqreturn_t ret;
1506
cca3974e 1507 spin_lock(&host->lock);
0d5ff566 1508 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
cca3974e
JG
1509 ret = nv_do_interrupt(host, irq_stat);
1510 spin_unlock(&host->lock);
ada364e8
TH
1511
1512 return ret;
1513}
1514
82ef04fb 1515static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1da177e4 1516{
1da177e4 1517 if (sc_reg > SCR_CONTROL)
da3dbb17 1518 return -EINVAL;
1da177e4 1519
82ef04fb 1520 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1521 return 0;
1da177e4
LT
1522}
1523
82ef04fb 1524static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1da177e4 1525{
1da177e4 1526 if (sc_reg > SCR_CONTROL)
da3dbb17 1527 return -EINVAL;
1da177e4 1528
82ef04fb 1529 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
da3dbb17 1530 return 0;
1da177e4
LT
1531}
1532
39f87582
TH
1533static void nv_nf2_freeze(struct ata_port *ap)
1534{
0d5ff566 1535 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1536 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1537 u8 mask;
1538
0d5ff566 1539 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1540 mask &= ~(NV_INT_ALL << shift);
0d5ff566 1541 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1542}
1543
1544static void nv_nf2_thaw(struct ata_port *ap)
1545{
0d5ff566 1546 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
39f87582
TH
1547 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1548 u8 mask;
1549
0d5ff566 1550 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
39f87582 1551
0d5ff566 1552 mask = ioread8(scr_addr + NV_INT_ENABLE);
39f87582 1553 mask |= (NV_INT_MASK << shift);
0d5ff566 1554 iowrite8(mask, scr_addr + NV_INT_ENABLE);
39f87582
TH
1555}
1556
3c324283
TH
1557static int nv_nf2_hardreset(struct ata_link *link, unsigned int *class,
1558 unsigned long deadline)
1559{
1560 bool online;
1561 int rc;
1562
1563 rc = sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1564 &online, NULL);
1565 return online ? -EAGAIN : rc;
1566}
1567
39f87582
TH
1568static void nv_ck804_freeze(struct ata_port *ap)
1569{
0d5ff566 1570 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1571 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1572 u8 mask;
1573
1574 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1575 mask &= ~(NV_INT_ALL << shift);
1576 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1577}
1578
1579static void nv_ck804_thaw(struct ata_port *ap)
1580{
0d5ff566 1581 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
39f87582
TH
1582 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1583 u8 mask;
1584
1585 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1586
1587 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1588 mask |= (NV_INT_MASK << shift);
1589 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1590}
1591
f140f0f1
KL
1592static void nv_mcp55_freeze(struct ata_port *ap)
1593{
1594 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1595 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1596 u32 mask;
1597
1598 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1599
1600 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1601 mask &= ~(NV_INT_ALL_MCP55 << shift);
1602 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1603 ata_sff_freeze(ap);
f140f0f1
KL
1604}
1605
1606static void nv_mcp55_thaw(struct ata_port *ap)
1607{
1608 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1609 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1610 u32 mask;
1611
1612 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1613
1614 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1615 mask |= (NV_INT_MASK_MCP55 << shift);
1616 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
9363c382 1617 ata_sff_thaw(ap);
f140f0f1
KL
1618}
1619
fbbb262d
RH
1620static void nv_adma_error_handler(struct ata_port *ap)
1621{
1622 struct nv_adma_port_priv *pp = ap->private_data;
b447916e 1623 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
cdf56bcf 1624 void __iomem *mmio = pp->ctl_block;
fbbb262d
RH
1625 int i;
1626 u16 tmp;
a84471fe 1627
b447916e 1628 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
2cb27853
RH
1629 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1630 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1631 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1632 u32 status = readw(mmio + NV_ADMA_STAT);
08af7414
RH
1633 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1634 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
2cb27853 1635
5796d1c4
JG
1636 ata_port_printk(ap, KERN_ERR,
1637 "EH in ADMA mode, notifier 0x%X "
08af7414
RH
1638 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1639 "next cpb count 0x%X next cpb idx 0x%x\n",
1640 notifier, notifier_error, gen_ctl, status,
1641 cpb_count, next_cpb_idx);
2cb27853 1642
b447916e 1643 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
2cb27853 1644 struct nv_adma_cpb *cpb = &pp->cpb[i];
b447916e 1645 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
5796d1c4 1646 ap->link.sactive & (1 << i))
2cb27853
RH
1647 ata_port_printk(ap, KERN_ERR,
1648 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1649 i, cpb->ctl_flags, cpb->resp_flags);
1650 }
1651 }
fbbb262d 1652
fbbb262d
RH
1653 /* Push us back into port register mode for error handling. */
1654 nv_adma_register_mode(ap);
1655
5796d1c4
JG
1656 /* Mark all of the CPBs as invalid to prevent them from
1657 being executed */
b447916e 1658 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
fbbb262d
RH
1659 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1660
1661 /* clear CPB fetch count */
1662 writew(0, mmio + NV_ADMA_CPB_COUNT);
1663
1664 /* Reset channel */
1665 tmp = readw(mmio + NV_ADMA_CTL);
1666 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1667 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1668 udelay(1);
1669 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
b447916e 1670 readw(mmio + NV_ADMA_CTL); /* flush posted write */
fbbb262d
RH
1671 }
1672
9363c382 1673 ata_sff_error_handler(ap);
fbbb262d
RH
1674}
1675
f140f0f1
KL
1676static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1677{
1678 struct nv_swncq_port_priv *pp = ap->private_data;
1679 struct defer_queue *dq = &pp->defer_queue;
1680
1681 /* queue is full */
1682 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1683 dq->defer_bits |= (1 << qc->tag);
1684 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1685}
1686
1687static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1688{
1689 struct nv_swncq_port_priv *pp = ap->private_data;
1690 struct defer_queue *dq = &pp->defer_queue;
1691 unsigned int tag;
1692
1693 if (dq->head == dq->tail) /* null queue */
1694 return NULL;
1695
1696 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1697 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1698 WARN_ON(!(dq->defer_bits & (1 << tag)));
1699 dq->defer_bits &= ~(1 << tag);
1700
1701 return ata_qc_from_tag(ap, tag);
1702}
1703
1704static void nv_swncq_fis_reinit(struct ata_port *ap)
1705{
1706 struct nv_swncq_port_priv *pp = ap->private_data;
1707
1708 pp->dhfis_bits = 0;
1709 pp->dmafis_bits = 0;
1710 pp->sdbfis_bits = 0;
1711 pp->ncq_flags = 0;
1712}
1713
1714static void nv_swncq_pp_reinit(struct ata_port *ap)
1715{
1716 struct nv_swncq_port_priv *pp = ap->private_data;
1717 struct defer_queue *dq = &pp->defer_queue;
1718
1719 dq->head = 0;
1720 dq->tail = 0;
1721 dq->defer_bits = 0;
1722 pp->qc_active = 0;
1723 pp->last_issue_tag = ATA_TAG_POISON;
1724 nv_swncq_fis_reinit(ap);
1725}
1726
1727static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1728{
1729 struct nv_swncq_port_priv *pp = ap->private_data;
1730
1731 writew(fis, pp->irq_block);
1732}
1733
1734static void __ata_bmdma_stop(struct ata_port *ap)
1735{
1736 struct ata_queued_cmd qc;
1737
1738 qc.ap = ap;
1739 ata_bmdma_stop(&qc);
1740}
1741
1742static void nv_swncq_ncq_stop(struct ata_port *ap)
1743{
1744 struct nv_swncq_port_priv *pp = ap->private_data;
1745 unsigned int i;
1746 u32 sactive;
1747 u32 done_mask;
1748
1749 ata_port_printk(ap, KERN_ERR,
1750 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1751 ap->qc_active, ap->link.sactive);
1752 ata_port_printk(ap, KERN_ERR,
1753 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1754 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1755 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1756 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1757
1758 ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
5682ed33 1759 ap->ops->sff_check_status(ap),
f140f0f1
KL
1760 ioread8(ap->ioaddr.error_addr));
1761
1762 sactive = readl(pp->sactive_block);
1763 done_mask = pp->qc_active ^ sactive;
1764
1765 ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1766 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1767 u8 err = 0;
1768 if (pp->qc_active & (1 << i))
1769 err = 0;
1770 else if (done_mask & (1 << i))
1771 err = 1;
1772 else
1773 continue;
1774
1775 ata_port_printk(ap, KERN_ERR,
1776 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1777 (pp->dhfis_bits >> i) & 0x1,
1778 (pp->dmafis_bits >> i) & 0x1,
1779 (pp->sdbfis_bits >> i) & 0x1,
1780 (sactive >> i) & 0x1,
1781 (err ? "error! tag doesn't exit" : " "));
1782 }
1783
1784 nv_swncq_pp_reinit(ap);
5682ed33 1785 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
1786 __ata_bmdma_stop(ap);
1787 nv_swncq_irq_clear(ap, 0xffff);
1788}
1789
1790static void nv_swncq_error_handler(struct ata_port *ap)
1791{
1792 struct ata_eh_context *ehc = &ap->link.eh_context;
1793
1794 if (ap->link.sactive) {
1795 nv_swncq_ncq_stop(ap);
cf480626 1796 ehc->i.action |= ATA_EH_RESET;
f140f0f1
KL
1797 }
1798
9363c382 1799 ata_sff_error_handler(ap);
f140f0f1
KL
1800}
1801
1802#ifdef CONFIG_PM
1803static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1804{
1805 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1806 u32 tmp;
1807
1808 /* clear irq */
1809 writel(~0, mmio + NV_INT_STATUS_MCP55);
1810
1811 /* disable irq */
1812 writel(0, mmio + NV_INT_ENABLE_MCP55);
1813
1814 /* disable swncq */
1815 tmp = readl(mmio + NV_CTL_MCP55);
1816 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1817 writel(tmp, mmio + NV_CTL_MCP55);
1818
1819 return 0;
1820}
1821
1822static int nv_swncq_port_resume(struct ata_port *ap)
1823{
1824 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1825 u32 tmp;
1826
1827 /* clear irq */
1828 writel(~0, mmio + NV_INT_STATUS_MCP55);
1829
1830 /* enable irq */
1831 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1832
1833 /* enable swncq */
1834 tmp = readl(mmio + NV_CTL_MCP55);
1835 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1836
1837 return 0;
1838}
1839#endif
1840
1841static void nv_swncq_host_init(struct ata_host *host)
1842{
1843 u32 tmp;
1844 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1845 struct pci_dev *pdev = to_pci_dev(host->dev);
1846 u8 regval;
1847
1848 /* disable ECO 398 */
1849 pci_read_config_byte(pdev, 0x7f, &regval);
1850 regval &= ~(1 << 7);
1851 pci_write_config_byte(pdev, 0x7f, regval);
1852
1853 /* enable swncq */
1854 tmp = readl(mmio + NV_CTL_MCP55);
1855 VPRINTK("HOST_CTL:0x%X\n", tmp);
1856 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1857
1858 /* enable irq intr */
1859 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1860 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1861 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1862
1863 /* clear port irq */
1864 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1865}
1866
1867static int nv_swncq_slave_config(struct scsi_device *sdev)
1868{
1869 struct ata_port *ap = ata_shost_to_port(sdev->host);
1870 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1871 struct ata_device *dev;
1872 int rc;
1873 u8 rev;
1874 u8 check_maxtor = 0;
1875 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1876
1877 rc = ata_scsi_slave_config(sdev);
1878 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1879 /* Not a proper libata device, ignore */
1880 return rc;
1881
1882 dev = &ap->link.device[sdev->id];
1883 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1884 return rc;
1885
1886 /* if MCP51 and Maxtor, then disable ncq */
1887 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1888 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1889 check_maxtor = 1;
1890
1891 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1892 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1893 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1894 pci_read_config_byte(pdev, 0x8, &rev);
1895 if (rev <= 0xa2)
1896 check_maxtor = 1;
1897 }
1898
1899 if (!check_maxtor)
1900 return rc;
1901
1902 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1903
1904 if (strncmp(model_num, "Maxtor", 6) == 0) {
1905 ata_scsi_change_queue_depth(sdev, 1);
1906 ata_dev_printk(dev, KERN_NOTICE,
1907 "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
1908 }
1909
1910 return rc;
1911}
1912
1913static int nv_swncq_port_start(struct ata_port *ap)
1914{
1915 struct device *dev = ap->host->dev;
1916 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1917 struct nv_swncq_port_priv *pp;
1918 int rc;
1919
1920 rc = ata_port_start(ap);
1921 if (rc)
1922 return rc;
1923
1924 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1925 if (!pp)
1926 return -ENOMEM;
1927
1928 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1929 &pp->prd_dma, GFP_KERNEL);
1930 if (!pp->prd)
1931 return -ENOMEM;
1932 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1933
1934 ap->private_data = pp;
1935 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1936 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1937 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1938
1939 return 0;
1940}
1941
1942static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1943{
1944 if (qc->tf.protocol != ATA_PROT_NCQ) {
9363c382 1945 ata_sff_qc_prep(qc);
f140f0f1
KL
1946 return;
1947 }
1948
1949 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1950 return;
1951
1952 nv_swncq_fill_sg(qc);
1953}
1954
1955static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1956{
1957 struct ata_port *ap = qc->ap;
1958 struct scatterlist *sg;
f140f0f1
KL
1959 struct nv_swncq_port_priv *pp = ap->private_data;
1960 struct ata_prd *prd;
ff2aeb1e 1961 unsigned int si, idx;
f140f0f1
KL
1962
1963 prd = pp->prd + ATA_MAX_PRD * qc->tag;
1964
1965 idx = 0;
ff2aeb1e 1966 for_each_sg(qc->sg, sg, qc->n_elem, si) {
f140f0f1
KL
1967 u32 addr, offset;
1968 u32 sg_len, len;
1969
1970 addr = (u32)sg_dma_address(sg);
1971 sg_len = sg_dma_len(sg);
1972
1973 while (sg_len) {
1974 offset = addr & 0xffff;
1975 len = sg_len;
1976 if ((offset + sg_len) > 0x10000)
1977 len = 0x10000 - offset;
1978
1979 prd[idx].addr = cpu_to_le32(addr);
1980 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1981
1982 idx++;
1983 sg_len -= len;
1984 addr += len;
1985 }
1986 }
1987
ff2aeb1e 1988 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
f140f0f1
KL
1989}
1990
1991static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1992 struct ata_queued_cmd *qc)
1993{
1994 struct nv_swncq_port_priv *pp = ap->private_data;
1995
1996 if (qc == NULL)
1997 return 0;
1998
1999 DPRINTK("Enter\n");
2000
2001 writel((1 << qc->tag), pp->sactive_block);
2002 pp->last_issue_tag = qc->tag;
2003 pp->dhfis_bits &= ~(1 << qc->tag);
2004 pp->dmafis_bits &= ~(1 << qc->tag);
2005 pp->qc_active |= (0x1 << qc->tag);
2006
5682ed33
TH
2007 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2008 ap->ops->sff_exec_command(ap, &qc->tf);
f140f0f1
KL
2009
2010 DPRINTK("Issued tag %u\n", qc->tag);
2011
2012 return 0;
2013}
2014
2015static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2016{
2017 struct ata_port *ap = qc->ap;
2018 struct nv_swncq_port_priv *pp = ap->private_data;
2019
2020 if (qc->tf.protocol != ATA_PROT_NCQ)
9363c382 2021 return ata_sff_qc_issue(qc);
f140f0f1
KL
2022
2023 DPRINTK("Enter\n");
2024
2025 if (!pp->qc_active)
2026 nv_swncq_issue_atacmd(ap, qc);
2027 else
2028 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2029
2030 return 0;
2031}
2032
2033static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2034{
2035 u32 serror;
2036 struct ata_eh_info *ehi = &ap->link.eh_info;
2037
2038 ata_ehi_clear_desc(ehi);
2039
2040 /* AHCI needs SError cleared; otherwise, it might lock up */
2041 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2042 sata_scr_write(&ap->link, SCR_ERROR, serror);
2043
2044 /* analyze @irq_stat */
2045 if (fis & NV_SWNCQ_IRQ_ADDED)
2046 ata_ehi_push_desc(ehi, "hot plug");
2047 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2048 ata_ehi_push_desc(ehi, "hot unplug");
2049
2050 ata_ehi_hotplugged(ehi);
2051
2052 /* okay, let's hand over to EH */
2053 ehi->serror |= serror;
2054
2055 ata_port_freeze(ap);
2056}
2057
2058static int nv_swncq_sdbfis(struct ata_port *ap)
2059{
2060 struct ata_queued_cmd *qc;
2061 struct nv_swncq_port_priv *pp = ap->private_data;
2062 struct ata_eh_info *ehi = &ap->link.eh_info;
2063 u32 sactive;
2064 int nr_done = 0;
2065 u32 done_mask;
2066 int i;
2067 u8 host_stat;
2068 u8 lack_dhfis = 0;
2069
2070 host_stat = ap->ops->bmdma_status(ap);
2071 if (unlikely(host_stat & ATA_DMA_ERR)) {
2072 /* error when transfering data to/from memory */
2073 ata_ehi_clear_desc(ehi);
2074 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2075 ehi->err_mask |= AC_ERR_HOST_BUS;
cf480626 2076 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2077 return -EINVAL;
2078 }
2079
5682ed33 2080 ap->ops->sff_irq_clear(ap);
f140f0f1
KL
2081 __ata_bmdma_stop(ap);
2082
2083 sactive = readl(pp->sactive_block);
2084 done_mask = pp->qc_active ^ sactive;
2085
2086 if (unlikely(done_mask & sactive)) {
2087 ata_ehi_clear_desc(ehi);
2088 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2089 "(%08x->%08x)", pp->qc_active, sactive);
2090 ehi->err_mask |= AC_ERR_HSM;
cf480626 2091 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2092 return -EINVAL;
2093 }
2094 for (i = 0; i < ATA_MAX_QUEUE; i++) {
2095 if (!(done_mask & (1 << i)))
2096 continue;
2097
2098 qc = ata_qc_from_tag(ap, i);
2099 if (qc) {
2100 ata_qc_complete(qc);
2101 pp->qc_active &= ~(1 << i);
2102 pp->dhfis_bits &= ~(1 << i);
2103 pp->dmafis_bits &= ~(1 << i);
2104 pp->sdbfis_bits |= (1 << i);
2105 nr_done++;
2106 }
2107 }
2108
2109 if (!ap->qc_active) {
2110 DPRINTK("over\n");
2111 nv_swncq_pp_reinit(ap);
2112 return nr_done;
2113 }
2114
2115 if (pp->qc_active & pp->dhfis_bits)
2116 return nr_done;
2117
2118 if ((pp->ncq_flags & ncq_saw_backout) ||
2119 (pp->qc_active ^ pp->dhfis_bits))
2120 /* if the controller cann't get a device to host register FIS,
2121 * The driver needs to reissue the new command.
2122 */
2123 lack_dhfis = 1;
2124
2125 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2126 "SWNCQ:qc_active 0x%X defer_bits %X "
2127 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2128 ap->print_id, ap->qc_active, pp->qc_active,
2129 pp->defer_queue.defer_bits, pp->dhfis_bits,
2130 pp->dmafis_bits, pp->last_issue_tag);
2131
2132 nv_swncq_fis_reinit(ap);
2133
2134 if (lack_dhfis) {
2135 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2136 nv_swncq_issue_atacmd(ap, qc);
2137 return nr_done;
2138 }
2139
2140 if (pp->defer_queue.defer_bits) {
2141 /* send deferral queue command */
2142 qc = nv_swncq_qc_from_dq(ap);
2143 WARN_ON(qc == NULL);
2144 nv_swncq_issue_atacmd(ap, qc);
2145 }
2146
2147 return nr_done;
2148}
2149
2150static inline u32 nv_swncq_tag(struct ata_port *ap)
2151{
2152 struct nv_swncq_port_priv *pp = ap->private_data;
2153 u32 tag;
2154
2155 tag = readb(pp->tag_block) >> 2;
2156 return (tag & 0x1f);
2157}
2158
2159static int nv_swncq_dmafis(struct ata_port *ap)
2160{
2161 struct ata_queued_cmd *qc;
2162 unsigned int rw;
2163 u8 dmactl;
2164 u32 tag;
2165 struct nv_swncq_port_priv *pp = ap->private_data;
2166
2167 __ata_bmdma_stop(ap);
2168 tag = nv_swncq_tag(ap);
2169
2170 DPRINTK("dma setup tag 0x%x\n", tag);
2171 qc = ata_qc_from_tag(ap, tag);
2172
2173 if (unlikely(!qc))
2174 return 0;
2175
2176 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2177
2178 /* load PRD table addr. */
2179 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2180 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2181
2182 /* specify data direction, triple-check start bit is clear */
2183 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2184 dmactl &= ~ATA_DMA_WR;
2185 if (!rw)
2186 dmactl |= ATA_DMA_WR;
2187
2188 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2189
2190 return 1;
2191}
2192
2193static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2194{
2195 struct nv_swncq_port_priv *pp = ap->private_data;
2196 struct ata_queued_cmd *qc;
2197 struct ata_eh_info *ehi = &ap->link.eh_info;
2198 u32 serror;
2199 u8 ata_stat;
2200 int rc = 0;
2201
5682ed33 2202 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2203 nv_swncq_irq_clear(ap, fis);
2204 if (!fis)
2205 return;
2206
2207 if (ap->pflags & ATA_PFLAG_FROZEN)
2208 return;
2209
2210 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2211 nv_swncq_hotplug(ap, fis);
2212 return;
2213 }
2214
2215 if (!pp->qc_active)
2216 return;
2217
82ef04fb 2218 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
f140f0f1 2219 return;
82ef04fb 2220 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
f140f0f1
KL
2221
2222 if (ata_stat & ATA_ERR) {
2223 ata_ehi_clear_desc(ehi);
2224 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2225 ehi->err_mask |= AC_ERR_DEV;
2226 ehi->serror |= serror;
cf480626 2227 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2228 ata_port_freeze(ap);
2229 return;
2230 }
2231
2232 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2233 /* If the IRQ is backout, driver must issue
2234 * the new command again some time later.
2235 */
2236 pp->ncq_flags |= ncq_saw_backout;
2237 }
2238
2239 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2240 pp->ncq_flags |= ncq_saw_sdb;
2241 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2242 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2243 ap->print_id, pp->qc_active, pp->dhfis_bits,
2244 pp->dmafis_bits, readl(pp->sactive_block));
2245 rc = nv_swncq_sdbfis(ap);
2246 if (rc < 0)
2247 goto irq_error;
2248 }
2249
2250 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2251 /* The interrupt indicates the new command
2252 * was transmitted correctly to the drive.
2253 */
2254 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2255 pp->ncq_flags |= ncq_saw_d2h;
2256 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2257 ata_ehi_push_desc(ehi, "illegal fis transaction");
2258 ehi->err_mask |= AC_ERR_HSM;
cf480626 2259 ehi->action |= ATA_EH_RESET;
f140f0f1
KL
2260 goto irq_error;
2261 }
2262
2263 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2264 !(pp->ncq_flags & ncq_saw_dmas)) {
5682ed33 2265 ata_stat = ap->ops->sff_check_status(ap);
f140f0f1
KL
2266 if (ata_stat & ATA_BUSY)
2267 goto irq_exit;
2268
2269 if (pp->defer_queue.defer_bits) {
2270 DPRINTK("send next command\n");
2271 qc = nv_swncq_qc_from_dq(ap);
2272 nv_swncq_issue_atacmd(ap, qc);
2273 }
2274 }
2275 }
2276
2277 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2278 /* program the dma controller with appropriate PRD buffers
2279 * and start the DMA transfer for requested command.
2280 */
2281 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2282 pp->ncq_flags |= ncq_saw_dmas;
2283 rc = nv_swncq_dmafis(ap);
2284 }
2285
2286irq_exit:
2287 return;
2288irq_error:
2289 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2290 ata_port_freeze(ap);
2291 return;
2292}
2293
2294static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2295{
2296 struct ata_host *host = dev_instance;
2297 unsigned int i;
2298 unsigned int handled = 0;
2299 unsigned long flags;
2300 u32 irq_stat;
2301
2302 spin_lock_irqsave(&host->lock, flags);
2303
2304 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2305
2306 for (i = 0; i < host->n_ports; i++) {
2307 struct ata_port *ap = host->ports[i];
2308
2309 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2310 if (ap->link.sactive) {
2311 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2312 handled = 1;
2313 } else {
2314 if (irq_stat) /* reserve Hotplug */
2315 nv_swncq_irq_clear(ap, 0xfff0);
2316
2317 handled += nv_host_intr(ap, (u8)irq_stat);
2318 }
2319 }
2320 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2321 }
2322
2323 spin_unlock_irqrestore(&host->lock, flags);
2324
2325 return IRQ_RETVAL(handled);
2326}
2327
5796d1c4 2328static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1da177e4 2329{
5796d1c4 2330 static int printed_version;
1626aeb8 2331 const struct ata_port_info *ppi[] = { NULL, NULL };
95947193 2332 struct nv_pi_priv *ipriv;
9a829ccf 2333 struct ata_host *host;
cdf56bcf 2334 struct nv_host_priv *hpriv;
1da177e4
LT
2335 int rc;
2336 u32 bar;
0d5ff566 2337 void __iomem *base;
fbbb262d 2338 unsigned long type = ent->driver_data;
1da177e4
LT
2339
2340 // Make sure this is a SATA controller by counting the number of bars
2341 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2342 // it's an IDE controller and we ignore it.
5796d1c4 2343 for (bar = 0; bar < 6; bar++)
1da177e4
LT
2344 if (pci_resource_start(pdev, bar) == 0)
2345 return -ENODEV;
2346
cdf56bcf 2347 if (!printed_version++)
a9524a76 2348 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1da177e4 2349
24dc5f33 2350 rc = pcim_enable_device(pdev);
1da177e4 2351 if (rc)
24dc5f33 2352 return rc;
1da177e4 2353
9a829ccf 2354 /* determine type and allocate host */
f140f0f1 2355 if (type == CK804 && adma_enabled) {
fbbb262d
RH
2356 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2357 type = ADMA;
fbbb262d
RH
2358 }
2359
360737a9
JG
2360 if (type == SWNCQ) {
2361 if (swncq_enabled)
2362 dev_printk(KERN_NOTICE, &pdev->dev,
2363 "Using SWNCQ mode\n");
2364 else
2365 type = GENERIC;
2366 }
2367
1626aeb8 2368 ppi[0] = &nv_port_info[type];
95947193 2369 ipriv = ppi[0]->private_data;
9363c382 2370 rc = ata_pci_sff_prepare_host(pdev, ppi, &host);
9a829ccf
TH
2371 if (rc)
2372 return rc;
1da177e4 2373
24dc5f33 2374 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
cdf56bcf 2375 if (!hpriv)
24dc5f33 2376 return -ENOMEM;
9a829ccf
TH
2377 hpriv->type = type;
2378 host->private_data = hpriv;
cdf56bcf 2379
9a829ccf
TH
2380 /* request and iomap NV_MMIO_BAR */
2381 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2382 if (rc)
2383 return rc;
1da177e4 2384
9a829ccf
TH
2385 /* configure SCR access */
2386 base = host->iomap[NV_MMIO_BAR];
2387 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2388 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
1da177e4 2389
ada364e8 2390 /* enable SATA space for CK804 */
fbbb262d 2391 if (type >= CK804) {
ada364e8
TH
2392 u8 regval;
2393
2394 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2395 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2396 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2397 }
2398
9a829ccf 2399 /* init ADMA */
fbbb262d 2400 if (type == ADMA) {
9a829ccf 2401 rc = nv_adma_host_init(host);
fbbb262d 2402 if (rc)
24dc5f33 2403 return rc;
360737a9 2404 } else if (type == SWNCQ)
f140f0f1 2405 nv_swncq_host_init(host);
fbbb262d 2406
9a829ccf 2407 pci_set_master(pdev);
95947193
TH
2408 return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
2409 IRQF_SHARED, ipriv->sht);
1da177e4
LT
2410}
2411
438ac6d5 2412#ifdef CONFIG_PM
cdf56bcf
RH
2413static int nv_pci_device_resume(struct pci_dev *pdev)
2414{
2415 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2416 struct nv_host_priv *hpriv = host->private_data;
ce053fa8 2417 int rc;
cdf56bcf 2418
ce053fa8 2419 rc = ata_pci_device_do_resume(pdev);
b447916e 2420 if (rc)
ce053fa8 2421 return rc;
cdf56bcf
RH
2422
2423 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
b447916e 2424 if (hpriv->type >= CK804) {
cdf56bcf
RH
2425 u8 regval;
2426
2427 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2428 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2429 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2430 }
b447916e 2431 if (hpriv->type == ADMA) {
cdf56bcf
RH
2432 u32 tmp32;
2433 struct nv_adma_port_priv *pp;
2434 /* enable/disable ADMA on the ports appropriately */
2435 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2436
2437 pp = host->ports[0]->private_data;
b447916e 2438 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2439 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2440 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf
RH
2441 else
2442 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
5796d1c4 2443 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
cdf56bcf 2444 pp = host->ports[1]->private_data;
b447916e 2445 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
cdf56bcf 2446 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2447 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2448 else
2449 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
5796d1c4 2450 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
cdf56bcf
RH
2451
2452 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2453 }
2454 }
2455
2456 ata_host_resume(host);
2457
2458 return 0;
2459}
438ac6d5 2460#endif
cdf56bcf 2461
cca3974e 2462static void nv_ck804_host_stop(struct ata_host *host)
ada364e8 2463{
cca3974e 2464 struct pci_dev *pdev = to_pci_dev(host->dev);
ada364e8
TH
2465 u8 regval;
2466
2467 /* disable SATA space for CK804 */
2468 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2469 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2470 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ada364e8
TH
2471}
2472
fbbb262d
RH
2473static void nv_adma_host_stop(struct ata_host *host)
2474{
2475 struct pci_dev *pdev = to_pci_dev(host->dev);
fbbb262d
RH
2476 u32 tmp32;
2477
fbbb262d
RH
2478 /* disable ADMA on the ports */
2479 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2480 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2481 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2482 NV_MCP_SATA_CFG_20_PORT1_EN |
2483 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2484
2485 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2486
2487 nv_ck804_host_stop(host);
2488}
2489
1da177e4
LT
2490static int __init nv_init(void)
2491{
b7887196 2492 return pci_register_driver(&nv_pci_driver);
1da177e4
LT
2493}
2494
2495static void __exit nv_exit(void)
2496{
2497 pci_unregister_driver(&nv_pci_driver);
2498}
2499
2500module_init(nv_init);
2501module_exit(nv_exit);
fbbb262d
RH
2502module_param_named(adma, adma_enabled, bool, 0444);
2503MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
f140f0f1 2504module_param_named(swncq, swncq_enabled, bool, 0444);
d21279f4 2505MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
f140f0f1 2506