PCI: Change all drivers to use pci_device->revision
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
34
35 3) Add hotplug support (easy, once new-EH support appears)
36
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
38
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
40
41 6) Add port multiplier support (intermediate)
42
43 7) Test and verify 3.0 Gbps support
44
45 8) Develop a low-power-consumption strategy, and implement it.
46
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
49 like that.
50
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
55
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
59
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
62
63 13) Verify that 7042 is fully supported. I only have a 6042.
64
65*/
66
67
20f733e7
BR
68#include <linux/kernel.h>
69#include <linux/module.h>
70#include <linux/pci.h>
71#include <linux/init.h>
72#include <linux/blkdev.h>
73#include <linux/delay.h>
74#include <linux/interrupt.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
20f733e7 79#include <linux/libata.h>
20f733e7
BR
80
81#define DRV_NAME "sata_mv"
8bc3fc47 82#define DRV_VERSION "0.81"
20f733e7
BR
83
84enum {
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
89
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
92
93 MV_PCI_REG_BASE = 0,
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
100
20f733e7 101 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 102 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
20f733e7
BR
105
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
110
31961943 111 MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
20f733e7 112
31961943
BR
113 MV_MAX_Q_DEPTH = 32,
114 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115
116 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
117 * CRPB needs alignment on a 256B boundary. Size == 256B
118 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
123 MV_MAX_SG_CT = 176,
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126
20f733e7
BR
127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
31961943 130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
31961943 136 MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
50630195 137 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
1f3461a7 138 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
146
147 CRPB_FLAG_STATUS_SHIFT = 8,
148
149 EPRD_FLAG_END_OF_TBL = (1 << 31),
150
20f733e7
BR
151 /* PCI interface registers */
152
31961943
BR
153 PCI_COMMAND_OFS = 0xc00,
154
20f733e7
BR
155 PCI_MAIN_CMD_STS_OFS = 0xd30,
156 STOP_PCI_MASTER = (1 << 2),
157 PCI_MASTER_EMPTY = (1 << 3),
158 GLOB_SFT_RST = (1 << 4),
159
522479fb
JG
160 MV_PCI_MODE = 0xd00,
161 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
162 MV_PCI_DISC_TIMER = 0xd04,
163 MV_PCI_MSI_TRIGGER = 0xc38,
164 MV_PCI_SERR_MASK = 0xc28,
165 MV_PCI_XBAR_TMOUT = 0x1d04,
166 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
167 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
168 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
169 MV_PCI_ERR_COMMAND = 0x1d50,
170
171 PCI_IRQ_CAUSE_OFS = 0x1d58,
172 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
173 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
47c2b677 213 PHY_MODE3 = 0x310,
bca1c4eb
JG
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
c9d39130
JG
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
31961943
BR
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0),
234 EDMA_ERR_PRD_PAR = (1 << 1),
235 EDMA_ERR_DEV = (1 << 2),
236 EDMA_ERR_DEV_DCON = (1 << 3),
237 EDMA_ERR_DEV_CON = (1 << 4),
238 EDMA_ERR_SERR = (1 << 5),
239 EDMA_ERR_SELF_DIS = (1 << 7),
240 EDMA_ERR_BIST_ASYNC = (1 << 8),
241 EDMA_ERR_CRBQ_PAR = (1 << 9),
242 EDMA_ERR_CRPB_PAR = (1 << 10),
243 EDMA_ERR_INTRL_PAR = (1 << 11),
244 EDMA_ERR_IORDY = (1 << 12),
245 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
246 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
247 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
248 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
249 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
250 EDMA_ERR_TRANS_PROTO = (1 << 31),
8b260248 251 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
20f733e7
BR
252 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
253 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
8b260248 254 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
20f733e7 255 EDMA_ERR_LNK_DATA_RX |
8b260248 256 EDMA_ERR_LNK_DATA_TX |
20f733e7
BR
257 EDMA_ERR_TRANS_PROTO),
258
31961943
BR
259 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
260 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
261
262 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
263 EDMA_REQ_Q_PTR_SHIFT = 5,
264
265 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
266 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
267 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
268 EDMA_RSP_Q_PTR_SHIFT = 3,
269
20f733e7
BR
270 EDMA_CMD_OFS = 0x28,
271 EDMA_EN = (1 << 0),
272 EDMA_DS = (1 << 1),
273 ATA_RST = (1 << 2),
274
c9d39130 275 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 276 EDMA_ARB_CFG = 0x38,
bca1c4eb 277
31961943
BR
278 /* Host private flags (hp_flags) */
279 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
280 MV_HP_ERRATA_50XXB0 = (1 << 1),
281 MV_HP_ERRATA_50XXB2 = (1 << 2),
282 MV_HP_ERRATA_60X1B2 = (1 << 3),
283 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892
JG
284 MV_HP_ERRATA_XX42A0 = (1 << 5),
285 MV_HP_50XX = (1 << 6),
286 MV_HP_GEN_IIE = (1 << 7),
20f733e7 287
31961943
BR
288 /* Port private flags (pp_flags) */
289 MV_PP_FLAG_EDMA_EN = (1 << 0),
290 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
20f733e7
BR
291};
292
c9d39130 293#define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
bca1c4eb 294#define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
e4e7b892
JG
295#define IS_GEN_I(hpriv) IS_50XX(hpriv)
296#define IS_GEN_II(hpriv) IS_60XX(hpriv)
297#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 298
095fec88 299enum {
d88184fb 300 MV_DMA_BOUNDARY = 0xffffffffU,
095fec88
JG
301
302 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
303
304 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
305};
306
522479fb
JG
307enum chip_type {
308 chip_504x,
309 chip_508x,
310 chip_5080,
311 chip_604x,
312 chip_608x,
e4e7b892
JG
313 chip_6042,
314 chip_7042,
522479fb
JG
315};
316
31961943
BR
317/* Command ReQuest Block: 32B */
318struct mv_crqb {
e1469874
ML
319 __le32 sg_addr;
320 __le32 sg_addr_hi;
321 __le16 ctrl_flags;
322 __le16 ata_cmd[11];
31961943 323};
20f733e7 324
e4e7b892 325struct mv_crqb_iie {
e1469874
ML
326 __le32 addr;
327 __le32 addr_hi;
328 __le32 flags;
329 __le32 len;
330 __le32 ata_cmd[4];
e4e7b892
JG
331};
332
31961943
BR
333/* Command ResPonse Block: 8B */
334struct mv_crpb {
e1469874
ML
335 __le16 id;
336 __le16 flags;
337 __le32 tmstmp;
20f733e7
BR
338};
339
31961943
BR
340/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
341struct mv_sg {
e1469874
ML
342 __le32 addr;
343 __le32 flags_size;
344 __le32 addr_hi;
345 __le32 reserved;
31961943 346};
20f733e7 347
31961943
BR
348struct mv_port_priv {
349 struct mv_crqb *crqb;
350 dma_addr_t crqb_dma;
351 struct mv_crpb *crpb;
352 dma_addr_t crpb_dma;
353 struct mv_sg *sg_tbl;
354 dma_addr_t sg_tbl_dma;
31961943
BR
355 u32 pp_flags;
356};
357
bca1c4eb
JG
358struct mv_port_signal {
359 u32 amps;
360 u32 pre;
361};
362
47c2b677
JG
363struct mv_host_priv;
364struct mv_hw_ops {
2a47ce06
JG
365 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
366 unsigned int port);
47c2b677
JG
367 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
368 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
369 void __iomem *mmio);
c9d39130
JG
370 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
371 unsigned int n_hc);
522479fb
JG
372 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
373 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
374};
375
31961943
BR
376struct mv_host_priv {
377 u32 hp_flags;
bca1c4eb 378 struct mv_port_signal signal[8];
47c2b677 379 const struct mv_hw_ops *ops;
20f733e7
BR
380};
381
382static void mv_irq_clear(struct ata_port *ap);
383static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
384static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
c9d39130
JG
385static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
386static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
20f733e7 387static void mv_phy_reset(struct ata_port *ap);
22374677 388static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
31961943
BR
389static int mv_port_start(struct ata_port *ap);
390static void mv_port_stop(struct ata_port *ap);
391static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 392static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 393static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
31961943 394static void mv_eng_timeout(struct ata_port *ap);
20f733e7
BR
395static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
396
2a47ce06
JG
397static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
398 unsigned int port);
47c2b677
JG
399static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
400static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
401 void __iomem *mmio);
c9d39130
JG
402static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
403 unsigned int n_hc);
522479fb
JG
404static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
405static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 406
2a47ce06
JG
407static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
408 unsigned int port);
47c2b677
JG
409static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
410static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
411 void __iomem *mmio);
c9d39130
JG
412static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
413 unsigned int n_hc);
522479fb
JG
414static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
415static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
416static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
417 unsigned int port_no);
418static void mv_stop_and_reset(struct ata_port *ap);
47c2b677 419
193515d5 420static struct scsi_host_template mv_sht = {
20f733e7
BR
421 .module = THIS_MODULE,
422 .name = DRV_NAME,
423 .ioctl = ata_scsi_ioctl,
424 .queuecommand = ata_scsi_queuecmd,
31961943 425 .can_queue = MV_USE_Q_DEPTH,
20f733e7 426 .this_id = ATA_SHT_THIS_ID,
d88184fb 427 .sg_tablesize = MV_MAX_SG_CT,
20f733e7
BR
428 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
429 .emulated = ATA_SHT_EMULATED,
d88184fb 430 .use_clustering = 1,
20f733e7
BR
431 .proc_name = DRV_NAME,
432 .dma_boundary = MV_DMA_BOUNDARY,
433 .slave_configure = ata_scsi_slave_config,
ccf68c34 434 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 435 .bios_param = ata_std_bios_param,
20f733e7
BR
436};
437
c9d39130
JG
438static const struct ata_port_operations mv5_ops = {
439 .port_disable = ata_port_disable,
440
441 .tf_load = ata_tf_load,
442 .tf_read = ata_tf_read,
443 .check_status = ata_check_status,
444 .exec_command = ata_exec_command,
445 .dev_select = ata_std_dev_select,
446
447 .phy_reset = mv_phy_reset,
cffacd85 448 .cable_detect = ata_cable_sata,
c9d39130
JG
449
450 .qc_prep = mv_qc_prep,
451 .qc_issue = mv_qc_issue,
0d5ff566 452 .data_xfer = ata_data_xfer,
c9d39130
JG
453
454 .eng_timeout = mv_eng_timeout,
455
c9d39130 456 .irq_clear = mv_irq_clear,
246ce3b6
AI
457 .irq_on = ata_irq_on,
458 .irq_ack = ata_irq_ack,
c9d39130
JG
459
460 .scr_read = mv5_scr_read,
461 .scr_write = mv5_scr_write,
462
463 .port_start = mv_port_start,
464 .port_stop = mv_port_stop,
c9d39130
JG
465};
466
467static const struct ata_port_operations mv6_ops = {
20f733e7
BR
468 .port_disable = ata_port_disable,
469
470 .tf_load = ata_tf_load,
471 .tf_read = ata_tf_read,
472 .check_status = ata_check_status,
473 .exec_command = ata_exec_command,
474 .dev_select = ata_std_dev_select,
475
476 .phy_reset = mv_phy_reset,
cffacd85 477 .cable_detect = ata_cable_sata,
20f733e7 478
31961943
BR
479 .qc_prep = mv_qc_prep,
480 .qc_issue = mv_qc_issue,
0d5ff566 481 .data_xfer = ata_data_xfer,
20f733e7 482
31961943 483 .eng_timeout = mv_eng_timeout,
20f733e7 484
20f733e7 485 .irq_clear = mv_irq_clear,
246ce3b6
AI
486 .irq_on = ata_irq_on,
487 .irq_ack = ata_irq_ack,
20f733e7
BR
488
489 .scr_read = mv_scr_read,
490 .scr_write = mv_scr_write,
491
31961943
BR
492 .port_start = mv_port_start,
493 .port_stop = mv_port_stop,
20f733e7
BR
494};
495
e4e7b892
JG
496static const struct ata_port_operations mv_iie_ops = {
497 .port_disable = ata_port_disable,
498
499 .tf_load = ata_tf_load,
500 .tf_read = ata_tf_read,
501 .check_status = ata_check_status,
502 .exec_command = ata_exec_command,
503 .dev_select = ata_std_dev_select,
504
505 .phy_reset = mv_phy_reset,
cffacd85 506 .cable_detect = ata_cable_sata,
e4e7b892
JG
507
508 .qc_prep = mv_qc_prep_iie,
509 .qc_issue = mv_qc_issue,
0d5ff566 510 .data_xfer = ata_data_xfer,
e4e7b892
JG
511
512 .eng_timeout = mv_eng_timeout,
513
e4e7b892 514 .irq_clear = mv_irq_clear,
246ce3b6
AI
515 .irq_on = ata_irq_on,
516 .irq_ack = ata_irq_ack,
e4e7b892
JG
517
518 .scr_read = mv_scr_read,
519 .scr_write = mv_scr_write,
520
521 .port_start = mv_port_start,
522 .port_stop = mv_port_stop,
e4e7b892
JG
523};
524
98ac62de 525static const struct ata_port_info mv_port_info[] = {
20f733e7 526 { /* chip_504x */
cca3974e 527 .flags = MV_COMMON_FLAGS,
31961943 528 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 529 .udma_mask = ATA_UDMA6,
c9d39130 530 .port_ops = &mv5_ops,
20f733e7
BR
531 },
532 { /* chip_508x */
cca3974e 533 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
31961943 534 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 535 .udma_mask = ATA_UDMA6,
c9d39130 536 .port_ops = &mv5_ops,
20f733e7 537 },
47c2b677 538 { /* chip_5080 */
cca3974e 539 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
47c2b677 540 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 541 .udma_mask = ATA_UDMA6,
c9d39130 542 .port_ops = &mv5_ops,
47c2b677 543 },
20f733e7 544 { /* chip_604x */
cca3974e 545 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
31961943 546 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 547 .udma_mask = ATA_UDMA6,
c9d39130 548 .port_ops = &mv6_ops,
20f733e7
BR
549 },
550 { /* chip_608x */
cca3974e 551 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
31961943
BR
552 MV_FLAG_DUAL_HC),
553 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 554 .udma_mask = ATA_UDMA6,
c9d39130 555 .port_ops = &mv6_ops,
20f733e7 556 },
e4e7b892 557 { /* chip_6042 */
cca3974e 558 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
e4e7b892 559 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 560 .udma_mask = ATA_UDMA6,
e4e7b892
JG
561 .port_ops = &mv_iie_ops,
562 },
563 { /* chip_7042 */
e93f09dc 564 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
e4e7b892 565 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 566 .udma_mask = ATA_UDMA6,
e4e7b892
JG
567 .port_ops = &mv_iie_ops,
568 },
20f733e7
BR
569};
570
3b7d697d 571static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
572 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
573 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
574 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
575 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
576
577 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
578 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
579 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
580 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
581 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
582
583 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
584
d9f9c6bc
FA
585 /* Adaptec 1430SA */
586 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
587
e93f09dc
OJ
588 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
589
6a3d586d
MT
590 /* add Marvell 7042 support */
591 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
592
2d2744fc 593 { } /* terminate list */
20f733e7
BR
594};
595
596static struct pci_driver mv_pci_driver = {
597 .name = DRV_NAME,
598 .id_table = mv_pci_tbl,
599 .probe = mv_init_one,
600 .remove = ata_pci_remove_one,
601};
602
47c2b677
JG
603static const struct mv_hw_ops mv5xxx_ops = {
604 .phy_errata = mv5_phy_errata,
605 .enable_leds = mv5_enable_leds,
606 .read_preamp = mv5_read_preamp,
607 .reset_hc = mv5_reset_hc,
522479fb
JG
608 .reset_flash = mv5_reset_flash,
609 .reset_bus = mv5_reset_bus,
47c2b677
JG
610};
611
612static const struct mv_hw_ops mv6xxx_ops = {
613 .phy_errata = mv6_phy_errata,
614 .enable_leds = mv6_enable_leds,
615 .read_preamp = mv6_read_preamp,
616 .reset_hc = mv6_reset_hc,
522479fb
JG
617 .reset_flash = mv6_reset_flash,
618 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
619};
620
ddef9bb3
JG
621/*
622 * module options
623 */
624static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
625
626
d88184fb
JG
627/* move to PCI layer or libata core? */
628static int pci_go_64(struct pci_dev *pdev)
629{
630 int rc;
631
632 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
633 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
634 if (rc) {
635 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
636 if (rc) {
637 dev_printk(KERN_ERR, &pdev->dev,
638 "64-bit DMA enable failed\n");
639 return rc;
640 }
641 }
642 } else {
643 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
644 if (rc) {
645 dev_printk(KERN_ERR, &pdev->dev,
646 "32-bit DMA enable failed\n");
647 return rc;
648 }
649 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
650 if (rc) {
651 dev_printk(KERN_ERR, &pdev->dev,
652 "32-bit consistent DMA enable failed\n");
653 return rc;
654 }
655 }
656
657 return rc;
658}
659
20f733e7
BR
660/*
661 * Functions
662 */
663
664static inline void writelfl(unsigned long data, void __iomem *addr)
665{
666 writel(data, addr);
667 (void) readl(addr); /* flush to avoid PCI posted write */
668}
669
20f733e7
BR
670static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
671{
672 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
673}
674
c9d39130
JG
675static inline unsigned int mv_hc_from_port(unsigned int port)
676{
677 return port >> MV_PORT_HC_SHIFT;
678}
679
680static inline unsigned int mv_hardport_from_port(unsigned int port)
681{
682 return port & MV_PORT_MASK;
683}
684
685static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
686 unsigned int port)
687{
688 return mv_hc_base(base, mv_hc_from_port(port));
689}
690
20f733e7
BR
691static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
692{
c9d39130 693 return mv_hc_base_from_port(base, port) +
8b260248 694 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 695 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
696}
697
698static inline void __iomem *mv_ap_base(struct ata_port *ap)
699{
0d5ff566 700 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
701}
702
cca3974e 703static inline int mv_get_hc_count(unsigned long port_flags)
31961943 704{
cca3974e 705 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
706}
707
708static void mv_irq_clear(struct ata_port *ap)
20f733e7 709{
20f733e7
BR
710}
711
05b308e1
BR
712/**
713 * mv_start_dma - Enable eDMA engine
714 * @base: port base address
715 * @pp: port private data
716 *
beec7dbc
TH
717 * Verify the local cache of the eDMA state is accurate with a
718 * WARN_ON.
05b308e1
BR
719 *
720 * LOCKING:
721 * Inherited from caller.
722 */
afb0edd9 723static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
20f733e7 724{
afb0edd9
BR
725 if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
726 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
727 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
728 }
beec7dbc 729 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
730}
731
05b308e1
BR
732/**
733 * mv_stop_dma - Disable eDMA engine
734 * @ap: ATA channel to manipulate
735 *
beec7dbc
TH
736 * Verify the local cache of the eDMA state is accurate with a
737 * WARN_ON.
05b308e1
BR
738 *
739 * LOCKING:
740 * Inherited from caller.
741 */
31961943 742static void mv_stop_dma(struct ata_port *ap)
20f733e7 743{
31961943
BR
744 void __iomem *port_mmio = mv_ap_base(ap);
745 struct mv_port_priv *pp = ap->private_data;
31961943
BR
746 u32 reg;
747 int i;
748
afb0edd9
BR
749 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
750 /* Disable EDMA if active. The disable bit auto clears.
31961943 751 */
31961943
BR
752 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
753 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 754 } else {
beec7dbc 755 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
afb0edd9 756 }
8b260248 757
31961943
BR
758 /* now properly wait for the eDMA to stop */
759 for (i = 1000; i > 0; i--) {
760 reg = readl(port_mmio + EDMA_CMD_OFS);
761 if (!(EDMA_EN & reg)) {
762 break;
763 }
764 udelay(100);
765 }
766
31961943 767 if (EDMA_EN & reg) {
f15a1daf 768 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
afb0edd9 769 /* FIXME: Consider doing a reset here to recover */
31961943 770 }
20f733e7
BR
771}
772
8a70f8dc 773#ifdef ATA_DEBUG
31961943 774static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 775{
31961943
BR
776 int b, w;
777 for (b = 0; b < bytes; ) {
778 DPRINTK("%p: ", start + b);
779 for (w = 0; b < bytes && w < 4; w++) {
780 printk("%08x ",readl(start + b));
781 b += sizeof(u32);
782 }
783 printk("\n");
784 }
31961943 785}
8a70f8dc
JG
786#endif
787
31961943
BR
788static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
789{
790#ifdef ATA_DEBUG
791 int b, w;
792 u32 dw;
793 for (b = 0; b < bytes; ) {
794 DPRINTK("%02x: ", b);
795 for (w = 0; b < bytes && w < 4; w++) {
796 (void) pci_read_config_dword(pdev,b,&dw);
797 printk("%08x ",dw);
798 b += sizeof(u32);
799 }
800 printk("\n");
801 }
802#endif
803}
804static void mv_dump_all_regs(void __iomem *mmio_base, int port,
805 struct pci_dev *pdev)
806{
807#ifdef ATA_DEBUG
8b260248 808 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
809 port >> MV_PORT_HC_SHIFT);
810 void __iomem *port_base;
811 int start_port, num_ports, p, start_hc, num_hcs, hc;
812
813 if (0 > port) {
814 start_hc = start_port = 0;
815 num_ports = 8; /* shld be benign for 4 port devs */
816 num_hcs = 2;
817 } else {
818 start_hc = port >> MV_PORT_HC_SHIFT;
819 start_port = port;
820 num_ports = num_hcs = 1;
821 }
8b260248 822 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
823 num_ports > 1 ? num_ports - 1 : start_port);
824
825 if (NULL != pdev) {
826 DPRINTK("PCI config space regs:\n");
827 mv_dump_pci_cfg(pdev, 0x68);
828 }
829 DPRINTK("PCI regs:\n");
830 mv_dump_mem(mmio_base+0xc00, 0x3c);
831 mv_dump_mem(mmio_base+0xd00, 0x34);
832 mv_dump_mem(mmio_base+0xf00, 0x4);
833 mv_dump_mem(mmio_base+0x1d00, 0x6c);
834 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 835 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
836 DPRINTK("HC regs (HC %i):\n", hc);
837 mv_dump_mem(hc_base, 0x1c);
838 }
839 for (p = start_port; p < start_port + num_ports; p++) {
840 port_base = mv_port_base(mmio_base, p);
841 DPRINTK("EDMA regs (port %i):\n",p);
842 mv_dump_mem(port_base, 0x54);
843 DPRINTK("SATA regs (port %i):\n",p);
844 mv_dump_mem(port_base+0x300, 0x60);
845 }
846#endif
20f733e7
BR
847}
848
849static unsigned int mv_scr_offset(unsigned int sc_reg_in)
850{
851 unsigned int ofs;
852
853 switch (sc_reg_in) {
854 case SCR_STATUS:
855 case SCR_CONTROL:
856 case SCR_ERROR:
857 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
858 break;
859 case SCR_ACTIVE:
860 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
861 break;
862 default:
863 ofs = 0xffffffffU;
864 break;
865 }
866 return ofs;
867}
868
869static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
870{
871 unsigned int ofs = mv_scr_offset(sc_reg_in);
872
35177265 873 if (0xffffffffU != ofs)
20f733e7 874 return readl(mv_ap_base(ap) + ofs);
35177265 875 else
20f733e7 876 return (u32) ofs;
20f733e7
BR
877}
878
879static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
880{
881 unsigned int ofs = mv_scr_offset(sc_reg_in);
882
35177265 883 if (0xffffffffU != ofs)
20f733e7 884 writelfl(val, mv_ap_base(ap) + ofs);
20f733e7
BR
885}
886
e4e7b892
JG
887static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
888{
889 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
890
891 /* set up non-NCQ EDMA configuration */
e4e7b892
JG
892 cfg &= ~(1 << 9); /* disable equeue */
893
e728eabe
JG
894 if (IS_GEN_I(hpriv)) {
895 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 896 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 897 }
e4e7b892 898
e728eabe
JG
899 else if (IS_GEN_II(hpriv)) {
900 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 901 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
902 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
903 }
e4e7b892
JG
904
905 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
906 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
907 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
908 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
909 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
910 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
911 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
912 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
e4e7b892
JG
913 }
914
915 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
916}
917
05b308e1
BR
918/**
919 * mv_port_start - Port specific init/start routine.
920 * @ap: ATA channel to manipulate
921 *
922 * Allocate and point to DMA memory, init port private memory,
923 * zero indices.
924 *
925 * LOCKING:
926 * Inherited from caller.
927 */
31961943
BR
928static int mv_port_start(struct ata_port *ap)
929{
cca3974e
JG
930 struct device *dev = ap->host->dev;
931 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
932 struct mv_port_priv *pp;
933 void __iomem *port_mmio = mv_ap_base(ap);
934 void *mem;
935 dma_addr_t mem_dma;
24dc5f33 936 int rc;
31961943 937
24dc5f33 938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 939 if (!pp)
24dc5f33 940 return -ENOMEM;
31961943 941
24dc5f33
TH
942 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
943 GFP_KERNEL);
6037d6bb 944 if (!mem)
24dc5f33 945 return -ENOMEM;
31961943
BR
946 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
947
6037d6bb
JG
948 rc = ata_pad_alloc(ap, dev);
949 if (rc)
24dc5f33 950 return rc;
6037d6bb 951
8b260248 952 /* First item in chunk of DMA memory:
31961943
BR
953 * 32-slot command request table (CRQB), 32 bytes each in size
954 */
955 pp->crqb = mem;
956 pp->crqb_dma = mem_dma;
957 mem += MV_CRQB_Q_SZ;
958 mem_dma += MV_CRQB_Q_SZ;
959
8b260248 960 /* Second item:
31961943
BR
961 * 32-slot command response table (CRPB), 8 bytes each in size
962 */
963 pp->crpb = mem;
964 pp->crpb_dma = mem_dma;
965 mem += MV_CRPB_Q_SZ;
966 mem_dma += MV_CRPB_Q_SZ;
967
968 /* Third item:
969 * Table of scatter-gather descriptors (ePRD), 16 bytes each
970 */
971 pp->sg_tbl = mem;
972 pp->sg_tbl_dma = mem_dma;
973
e4e7b892 974 mv_edma_cfg(hpriv, port_mmio);
31961943
BR
975
976 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
8b260248 977 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
31961943
BR
978 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
979
e4e7b892
JG
980 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
981 writelfl(pp->crqb_dma & 0xffffffff,
982 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
983 else
984 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
31961943
BR
985
986 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
e4e7b892
JG
987
988 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
989 writelfl(pp->crpb_dma & 0xffffffff,
990 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
991 else
992 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
993
8b260248 994 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
31961943
BR
995 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
996
31961943
BR
997 /* Don't turn on EDMA here...do it before DMA commands only. Else
998 * we'll be unable to send non-data, PIO, etc due to restricted access
999 * to shadow regs.
1000 */
1001 ap->private_data = pp;
1002 return 0;
1003}
1004
05b308e1
BR
1005/**
1006 * mv_port_stop - Port specific cleanup/stop routine.
1007 * @ap: ATA channel to manipulate
1008 *
1009 * Stop DMA, cleanup port memory.
1010 *
1011 * LOCKING:
cca3974e 1012 * This routine uses the host lock to protect the DMA stop.
05b308e1 1013 */
31961943
BR
1014static void mv_port_stop(struct ata_port *ap)
1015{
afb0edd9 1016 unsigned long flags;
31961943 1017
cca3974e 1018 spin_lock_irqsave(&ap->host->lock, flags);
31961943 1019 mv_stop_dma(ap);
cca3974e 1020 spin_unlock_irqrestore(&ap->host->lock, flags);
31961943
BR
1021}
1022
05b308e1
BR
1023/**
1024 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1025 * @qc: queued command whose SG list to source from
1026 *
1027 * Populate the SG list and mark the last entry.
1028 *
1029 * LOCKING:
1030 * Inherited from caller.
1031 */
d88184fb 1032static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1033{
1034 struct mv_port_priv *pp = qc->ap->private_data;
d88184fb 1035 unsigned int n_sg = 0;
972c26bd 1036 struct scatterlist *sg;
d88184fb 1037 struct mv_sg *mv_sg;
31961943 1038
d88184fb 1039 mv_sg = pp->sg_tbl;
972c26bd 1040 ata_for_each_sg(sg, qc) {
d88184fb
JG
1041 dma_addr_t addr = sg_dma_address(sg);
1042 u32 sg_len = sg_dma_len(sg);
22374677 1043
d88184fb
JG
1044 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1045 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1046 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
22374677 1047
d88184fb
JG
1048 if (ata_sg_is_last(sg, qc))
1049 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
22374677 1050
d88184fb
JG
1051 mv_sg++;
1052 n_sg++;
31961943 1053 }
d88184fb
JG
1054
1055 return n_sg;
31961943
BR
1056}
1057
a6432436 1058static inline unsigned mv_inc_q_index(unsigned index)
31961943 1059{
a6432436 1060 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
31961943
BR
1061}
1062
e1469874 1063static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1064{
559eedad 1065 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1066 (last ? CRQB_CMD_LAST : 0);
559eedad 1067 *cmdw = cpu_to_le16(tmp);
31961943
BR
1068}
1069
05b308e1
BR
1070/**
1071 * mv_qc_prep - Host specific command preparation.
1072 * @qc: queued command to prepare
1073 *
1074 * This routine simply redirects to the general purpose routine
1075 * if command is not DMA. Else, it handles prep of the CRQB
1076 * (command request block), does some sanity checking, and calls
1077 * the SG load routine.
1078 *
1079 * LOCKING:
1080 * Inherited from caller.
1081 */
31961943
BR
1082static void mv_qc_prep(struct ata_queued_cmd *qc)
1083{
1084 struct ata_port *ap = qc->ap;
1085 struct mv_port_priv *pp = ap->private_data;
e1469874 1086 __le16 *cw;
31961943
BR
1087 struct ata_taskfile *tf;
1088 u16 flags = 0;
a6432436 1089 unsigned in_index;
31961943 1090
e4e7b892 1091 if (ATA_PROT_DMA != qc->tf.protocol)
31961943 1092 return;
20f733e7 1093
31961943
BR
1094 /* Fill in command request block
1095 */
e4e7b892 1096 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1097 flags |= CRQB_FLAG_READ;
beec7dbc 1098 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1099 flags |= qc->tag << CRQB_TAG_SHIFT;
1100
a6432436
ML
1101 /* get current queue index from hardware */
1102 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1103 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1104
1105 pp->crqb[in_index].sg_addr =
31961943 1106 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1107 pp->crqb[in_index].sg_addr_hi =
31961943 1108 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1109 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1110
a6432436 1111 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1112 tf = &qc->tf;
1113
1114 /* Sadly, the CRQB cannot accomodate all registers--there are
1115 * only 11 bytes...so we must pick and choose required
1116 * registers based on the command. So, we drop feature and
1117 * hob_feature for [RW] DMA commands, but they are needed for
1118 * NCQ. NCQ will drop hob_nsect.
20f733e7 1119 */
31961943
BR
1120 switch (tf->command) {
1121 case ATA_CMD_READ:
1122 case ATA_CMD_READ_EXT:
1123 case ATA_CMD_WRITE:
1124 case ATA_CMD_WRITE_EXT:
c15d85c8 1125 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1126 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1127 break;
1128#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1129 case ATA_CMD_FPDMA_READ:
1130 case ATA_CMD_FPDMA_WRITE:
8b260248 1131 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1132 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1133 break;
1134#endif /* FIXME: remove this line when NCQ added */
1135 default:
1136 /* The only other commands EDMA supports in non-queued and
1137 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1138 * of which are defined/used by Linux. If we get here, this
1139 * driver needs work.
1140 *
1141 * FIXME: modify libata to give qc_prep a return value and
1142 * return error here.
1143 */
1144 BUG_ON(tf->command);
1145 break;
1146 }
1147 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1148 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1149 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1150 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1151 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1152 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1153 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1154 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1155 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1156
e4e7b892
JG
1157 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1158 return;
1159 mv_fill_sg(qc);
1160}
1161
1162/**
1163 * mv_qc_prep_iie - Host specific command preparation.
1164 * @qc: queued command to prepare
1165 *
1166 * This routine simply redirects to the general purpose routine
1167 * if command is not DMA. Else, it handles prep of the CRQB
1168 * (command request block), does some sanity checking, and calls
1169 * the SG load routine.
1170 *
1171 * LOCKING:
1172 * Inherited from caller.
1173 */
1174static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1175{
1176 struct ata_port *ap = qc->ap;
1177 struct mv_port_priv *pp = ap->private_data;
1178 struct mv_crqb_iie *crqb;
1179 struct ata_taskfile *tf;
a6432436 1180 unsigned in_index;
e4e7b892
JG
1181 u32 flags = 0;
1182
1183 if (ATA_PROT_DMA != qc->tf.protocol)
1184 return;
1185
e4e7b892
JG
1186 /* Fill in Gen IIE command request block
1187 */
1188 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1189 flags |= CRQB_FLAG_READ;
1190
beec7dbc 1191 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892
JG
1192 flags |= qc->tag << CRQB_TAG_SHIFT;
1193
a6432436
ML
1194 /* get current queue index from hardware */
1195 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1196 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1197
1198 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1199 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1200 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1201 crqb->flags = cpu_to_le32(flags);
1202
1203 tf = &qc->tf;
1204 crqb->ata_cmd[0] = cpu_to_le32(
1205 (tf->command << 16) |
1206 (tf->feature << 24)
1207 );
1208 crqb->ata_cmd[1] = cpu_to_le32(
1209 (tf->lbal << 0) |
1210 (tf->lbam << 8) |
1211 (tf->lbah << 16) |
1212 (tf->device << 24)
1213 );
1214 crqb->ata_cmd[2] = cpu_to_le32(
1215 (tf->hob_lbal << 0) |
1216 (tf->hob_lbam << 8) |
1217 (tf->hob_lbah << 16) |
1218 (tf->hob_feature << 24)
1219 );
1220 crqb->ata_cmd[3] = cpu_to_le32(
1221 (tf->nsect << 0) |
1222 (tf->hob_nsect << 8)
1223 );
1224
1225 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1226 return;
31961943
BR
1227 mv_fill_sg(qc);
1228}
1229
05b308e1
BR
1230/**
1231 * mv_qc_issue - Initiate a command to the host
1232 * @qc: queued command to start
1233 *
1234 * This routine simply redirects to the general purpose routine
1235 * if command is not DMA. Else, it sanity checks our local
1236 * caches of the request producer/consumer indices then enables
1237 * DMA and bumps the request producer index.
1238 *
1239 * LOCKING:
1240 * Inherited from caller.
1241 */
9a3d9eb0 1242static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943
BR
1243{
1244 void __iomem *port_mmio = mv_ap_base(qc->ap);
1245 struct mv_port_priv *pp = qc->ap->private_data;
a6432436 1246 unsigned in_index;
31961943
BR
1247 u32 in_ptr;
1248
1249 if (ATA_PROT_DMA != qc->tf.protocol) {
1250 /* We're about to send a non-EDMA capable command to the
1251 * port. Turn off EDMA so there won't be problems accessing
1252 * shadow block, etc registers.
1253 */
1254 mv_stop_dma(qc->ap);
1255 return ata_qc_issue_prot(qc);
1256 }
1257
a6432436
ML
1258 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1259 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
31961943 1260
31961943 1261 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1262 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1263 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1264
a6432436 1265 in_index = mv_inc_q_index(in_index); /* now incr producer index */
31961943 1266
afb0edd9 1267 mv_start_dma(port_mmio, pp);
31961943
BR
1268
1269 /* and write the request in pointer to kick the EDMA to life */
1270 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
a6432436 1271 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1272 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1273
1274 return 0;
1275}
1276
05b308e1
BR
1277/**
1278 * mv_get_crpb_status - get status from most recently completed cmd
1279 * @ap: ATA channel to manipulate
1280 *
1281 * This routine is for use when the port is in DMA mode, when it
1282 * will be using the CRPB (command response block) method of
beec7dbc 1283 * returning command completion information. We check indices
05b308e1
BR
1284 * are good, grab status, and bump the response consumer index to
1285 * prove that we're up to date.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
31961943
BR
1290static u8 mv_get_crpb_status(struct ata_port *ap)
1291{
1292 void __iomem *port_mmio = mv_ap_base(ap);
1293 struct mv_port_priv *pp = ap->private_data;
a6432436 1294 unsigned out_index;
31961943 1295 u32 out_ptr;
806a6e7a 1296 u8 ata_status;
31961943 1297
a6432436
ML
1298 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1299 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
31961943 1300
a6432436
ML
1301 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1302 >> CRPB_FLAG_STATUS_SHIFT;
806a6e7a 1303
31961943 1304 /* increment our consumer index... */
a6432436 1305 out_index = mv_inc_q_index(out_index);
8b260248 1306
31961943 1307 /* and, until we do NCQ, there should only be 1 CRPB waiting */
a6432436
ML
1308 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1309 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943
BR
1310
1311 /* write out our inc'd consumer index so EDMA knows we're caught up */
1312 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
a6432436 1313 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
31961943
BR
1314 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1315
1316 /* Return ATA status register for completed CRPB */
806a6e7a 1317 return ata_status;
31961943
BR
1318}
1319
05b308e1
BR
1320/**
1321 * mv_err_intr - Handle error interrupts on the port
1322 * @ap: ATA channel to manipulate
9b358e30 1323 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1324 *
1325 * In most cases, just clear the interrupt and move on. However,
1326 * some cases require an eDMA reset, which is done right before
1327 * the COMRESET in mv_phy_reset(). The SERR case requires a
1328 * clear of pending errors in the SATA SERROR register. Finally,
1329 * if the port disabled DMA, update our cached copy to match.
1330 *
1331 * LOCKING:
1332 * Inherited from caller.
1333 */
9b358e30 1334static void mv_err_intr(struct ata_port *ap, int reset_allowed)
31961943
BR
1335{
1336 void __iomem *port_mmio = mv_ap_base(ap);
1337 u32 edma_err_cause, serr = 0;
20f733e7
BR
1338
1339 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1340
1341 if (EDMA_ERR_SERR & edma_err_cause) {
81952c54
TH
1342 sata_scr_read(ap, SCR_ERROR, &serr);
1343 sata_scr_write_flush(ap, SCR_ERROR, serr);
20f733e7 1344 }
afb0edd9
BR
1345 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1346 struct mv_port_priv *pp = ap->private_data;
1347 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1348 }
1349 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
44877b4e 1350 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
20f733e7
BR
1351
1352 /* Clear EDMA now that SERR cleanup done */
1353 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1354
1355 /* check for fatal here and recover if needed */
9b358e30 1356 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
c9d39130 1357 mv_stop_and_reset(ap);
20f733e7
BR
1358}
1359
05b308e1
BR
1360/**
1361 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1362 * @host: host specific structure
05b308e1
BR
1363 * @relevant: port error bits relevant to this host controller
1364 * @hc: which host controller we're to look at
1365 *
1366 * Read then write clear the HC interrupt status then walk each
1367 * port connected to the HC and see if it needs servicing. Port
1368 * success ints are reported in the HC interrupt status reg, the
1369 * port error ints are reported in the higher level main
1370 * interrupt status register and thus are passed in via the
1371 * 'relevant' argument.
1372 *
1373 * LOCKING:
1374 * Inherited from caller.
1375 */
cca3974e 1376static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1377{
0d5ff566 1378 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1379 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7
BR
1380 struct ata_queued_cmd *qc;
1381 u32 hc_irq_cause;
31961943 1382 int shift, port, port0, hard_port, handled;
a7dac447 1383 unsigned int err_mask;
20f733e7 1384
35177265 1385 if (hc == 0)
20f733e7 1386 port0 = 0;
35177265 1387 else
20f733e7 1388 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1389
1390 /* we'll need the HC success int register in most cases */
1391 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
35177265 1392 if (hc_irq_cause)
31961943 1393 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1394
1395 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1396 hc,relevant,hc_irq_cause);
1397
1398 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cd85f6e2 1399 u8 ata_status = 0;
cca3974e 1400 struct ata_port *ap = host->ports[port];
63af2a5c 1401 struct mv_port_priv *pp = ap->private_data;
55d8ca4f 1402
e857f141 1403 hard_port = mv_hardport_from_port(port); /* range 0..3 */
31961943 1404 handled = 0; /* ensure ata_status is set if handled++ */
20f733e7 1405
63af2a5c 1406 /* Note that DEV_IRQ might happen spuriously during EDMA,
e857f141
ML
1407 * and should be ignored in such cases.
1408 * The cause of this is still under investigation.
8190bdb9 1409 */
63af2a5c
ML
1410 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1411 /* EDMA: check for response queue interrupt */
1412 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1413 ata_status = mv_get_crpb_status(ap);
1414 handled = 1;
1415 }
1416 } else {
1417 /* PIO: check for device (drive) interrupt */
1418 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
0d5ff566 1419 ata_status = readb(ap->ioaddr.status_addr);
63af2a5c 1420 handled = 1;
e857f141
ML
1421 /* ignore spurious intr if drive still BUSY */
1422 if (ata_status & ATA_BUSY) {
1423 ata_status = 0;
1424 handled = 0;
1425 }
63af2a5c 1426 }
20f733e7
BR
1427 }
1428
029f5468 1429 if (ap && (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1430 continue;
1431
a7dac447
JG
1432 err_mask = ac_err_mask(ata_status);
1433
31961943 1434 shift = port << 1; /* (port * 2) */
20f733e7
BR
1435 if (port >= MV_PORTS_PER_HC) {
1436 shift++; /* skip bit 8 in the HC Main IRQ reg */
1437 }
1438 if ((PORT0_ERR << shift) & relevant) {
9b358e30 1439 mv_err_intr(ap, 1);
a7dac447 1440 err_mask |= AC_ERR_OTHER;
63af2a5c 1441 handled = 1;
20f733e7 1442 }
8b260248 1443
63af2a5c 1444 if (handled) {
20f733e7 1445 qc = ata_qc_from_tag(ap, ap->active_tag);
63af2a5c 1446 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
20f733e7
BR
1447 VPRINTK("port %u IRQ found for qc, "
1448 "ata_status 0x%x\n", port,ata_status);
20f733e7 1449 /* mark qc status appropriately */
701db69d 1450 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
a22e2eb0
AL
1451 qc->err_mask |= err_mask;
1452 ata_qc_complete(qc);
1453 }
20f733e7
BR
1454 }
1455 }
1456 }
1457 VPRINTK("EXIT\n");
1458}
1459
05b308e1 1460/**
8b260248 1461 * mv_interrupt -
05b308e1
BR
1462 * @irq: unused
1463 * @dev_instance: private data; in this case the host structure
1464 * @regs: unused
1465 *
1466 * Read the read only register to determine if any host
1467 * controllers have pending interrupts. If so, call lower level
1468 * routine to handle. Also check for PCI errors which are only
1469 * reported here.
1470 *
8b260248 1471 * LOCKING:
cca3974e 1472 * This routine holds the host lock while processing pending
05b308e1
BR
1473 * interrupts.
1474 */
7d12e780 1475static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1476{
cca3974e 1477 struct ata_host *host = dev_instance;
20f733e7 1478 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1479 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
615ab953 1480 struct mv_host_priv *hpriv;
20f733e7
BR
1481 u32 irq_stat;
1482
20f733e7 1483 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1484
1485 /* check the cases where we either have nothing pending or have read
1486 * a bogus register value which can indicate HW removal or PCI fault
1487 */
35177265 1488 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1489 return IRQ_NONE;
20f733e7 1490
cca3974e
JG
1491 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1492 spin_lock(&host->lock);
20f733e7
BR
1493
1494 for (hc = 0; hc < n_hcs; hc++) {
1495 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1496 if (relevant) {
cca3974e 1497 mv_host_intr(host, relevant, hc);
31961943 1498 handled++;
20f733e7
BR
1499 }
1500 }
615ab953 1501
cca3974e 1502 hpriv = host->private_data;
615ab953
ML
1503 if (IS_60XX(hpriv)) {
1504 /* deal with the interrupt coalescing bits */
1505 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1506 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1507 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1508 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1509 }
1510 }
1511
20f733e7 1512 if (PCI_ERR & irq_stat) {
31961943
BR
1513 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1514 readl(mmio + PCI_IRQ_CAUSE_OFS));
1515
afb0edd9 1516 DPRINTK("All regs @ PCI error\n");
cca3974e 1517 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
20f733e7 1518
31961943
BR
1519 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1520 handled++;
1521 }
cca3974e 1522 spin_unlock(&host->lock);
20f733e7
BR
1523
1524 return IRQ_RETVAL(handled);
1525}
1526
c9d39130
JG
1527static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1528{
1529 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1530 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1531
1532 return hc_mmio + ofs;
1533}
1534
1535static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1536{
1537 unsigned int ofs;
1538
1539 switch (sc_reg_in) {
1540 case SCR_STATUS:
1541 case SCR_ERROR:
1542 case SCR_CONTROL:
1543 ofs = sc_reg_in * sizeof(u32);
1544 break;
1545 default:
1546 ofs = 0xffffffffU;
1547 break;
1548 }
1549 return ofs;
1550}
1551
1552static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1553{
0d5ff566
TH
1554 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1555 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1556 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1557
1558 if (ofs != 0xffffffffU)
0d5ff566 1559 return readl(addr + ofs);
c9d39130
JG
1560 else
1561 return (u32) ofs;
1562}
1563
1564static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1565{
0d5ff566
TH
1566 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1567 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1568 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1569
1570 if (ofs != 0xffffffffU)
0d5ff566 1571 writelfl(val, addr + ofs);
c9d39130
JG
1572}
1573
522479fb
JG
1574static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1575{
522479fb
JG
1576 int early_5080;
1577
44c10138 1578 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1579
1580 if (!early_5080) {
1581 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1582 tmp |= (1 << 0);
1583 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1584 }
1585
1586 mv_reset_pci_bus(pdev, mmio);
1587}
1588
1589static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1590{
1591 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1592}
1593
47c2b677 1594static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1595 void __iomem *mmio)
1596{
c9d39130
JG
1597 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1598 u32 tmp;
1599
1600 tmp = readl(phy_mmio + MV5_PHY_MODE);
1601
1602 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1603 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1604}
1605
47c2b677 1606static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1607{
522479fb
JG
1608 u32 tmp;
1609
1610 writel(0, mmio + MV_GPIO_PORT_CTL);
1611
1612 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1613
1614 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1615 tmp |= ~(1 << 0);
1616 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1617}
1618
2a47ce06
JG
1619static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1620 unsigned int port)
bca1c4eb 1621{
c9d39130
JG
1622 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1623 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1624 u32 tmp;
1625 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1626
1627 if (fix_apm_sq) {
1628 tmp = readl(phy_mmio + MV5_LT_MODE);
1629 tmp |= (1 << 19);
1630 writel(tmp, phy_mmio + MV5_LT_MODE);
1631
1632 tmp = readl(phy_mmio + MV5_PHY_CTL);
1633 tmp &= ~0x3;
1634 tmp |= 0x1;
1635 writel(tmp, phy_mmio + MV5_PHY_CTL);
1636 }
1637
1638 tmp = readl(phy_mmio + MV5_PHY_MODE);
1639 tmp &= ~mask;
1640 tmp |= hpriv->signal[port].pre;
1641 tmp |= hpriv->signal[port].amps;
1642 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1643}
1644
c9d39130
JG
1645
1646#undef ZERO
1647#define ZERO(reg) writel(0, port_mmio + (reg))
1648static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1649 unsigned int port)
1650{
1651 void __iomem *port_mmio = mv_port_base(mmio, port);
1652
1653 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1654
1655 mv_channel_reset(hpriv, mmio, port);
1656
1657 ZERO(0x028); /* command */
1658 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1659 ZERO(0x004); /* timer */
1660 ZERO(0x008); /* irq err cause */
1661 ZERO(0x00c); /* irq err mask */
1662 ZERO(0x010); /* rq bah */
1663 ZERO(0x014); /* rq inp */
1664 ZERO(0x018); /* rq outp */
1665 ZERO(0x01c); /* respq bah */
1666 ZERO(0x024); /* respq outp */
1667 ZERO(0x020); /* respq inp */
1668 ZERO(0x02c); /* test control */
1669 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1670}
1671#undef ZERO
1672
1673#define ZERO(reg) writel(0, hc_mmio + (reg))
1674static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1675 unsigned int hc)
47c2b677 1676{
c9d39130
JG
1677 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1678 u32 tmp;
1679
1680 ZERO(0x00c);
1681 ZERO(0x010);
1682 ZERO(0x014);
1683 ZERO(0x018);
1684
1685 tmp = readl(hc_mmio + 0x20);
1686 tmp &= 0x1c1c1c1c;
1687 tmp |= 0x03030303;
1688 writel(tmp, hc_mmio + 0x20);
1689}
1690#undef ZERO
1691
1692static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1693 unsigned int n_hc)
1694{
1695 unsigned int hc, port;
1696
1697 for (hc = 0; hc < n_hc; hc++) {
1698 for (port = 0; port < MV_PORTS_PER_HC; port++)
1699 mv5_reset_hc_port(hpriv, mmio,
1700 (hc * MV_PORTS_PER_HC) + port);
1701
1702 mv5_reset_one_hc(hpriv, mmio, hc);
1703 }
1704
1705 return 0;
47c2b677
JG
1706}
1707
101ffae2
JG
1708#undef ZERO
1709#define ZERO(reg) writel(0, mmio + (reg))
1710static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1711{
1712 u32 tmp;
1713
1714 tmp = readl(mmio + MV_PCI_MODE);
1715 tmp &= 0xff00ffff;
1716 writel(tmp, mmio + MV_PCI_MODE);
1717
1718 ZERO(MV_PCI_DISC_TIMER);
1719 ZERO(MV_PCI_MSI_TRIGGER);
1720 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1721 ZERO(HC_MAIN_IRQ_MASK_OFS);
1722 ZERO(MV_PCI_SERR_MASK);
1723 ZERO(PCI_IRQ_CAUSE_OFS);
1724 ZERO(PCI_IRQ_MASK_OFS);
1725 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1726 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1727 ZERO(MV_PCI_ERR_ATTRIBUTE);
1728 ZERO(MV_PCI_ERR_COMMAND);
1729}
1730#undef ZERO
1731
1732static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1733{
1734 u32 tmp;
1735
1736 mv5_reset_flash(hpriv, mmio);
1737
1738 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1739 tmp &= 0x3;
1740 tmp |= (1 << 5) | (1 << 6);
1741 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1742}
1743
1744/**
1745 * mv6_reset_hc - Perform the 6xxx global soft reset
1746 * @mmio: base address of the HBA
1747 *
1748 * This routine only applies to 6xxx parts.
1749 *
1750 * LOCKING:
1751 * Inherited from caller.
1752 */
c9d39130
JG
1753static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1754 unsigned int n_hc)
101ffae2
JG
1755{
1756 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1757 int i, rc = 0;
1758 u32 t;
1759
1760 /* Following procedure defined in PCI "main command and status
1761 * register" table.
1762 */
1763 t = readl(reg);
1764 writel(t | STOP_PCI_MASTER, reg);
1765
1766 for (i = 0; i < 1000; i++) {
1767 udelay(1);
1768 t = readl(reg);
1769 if (PCI_MASTER_EMPTY & t) {
1770 break;
1771 }
1772 }
1773 if (!(PCI_MASTER_EMPTY & t)) {
1774 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1775 rc = 1;
1776 goto done;
1777 }
1778
1779 /* set reset */
1780 i = 5;
1781 do {
1782 writel(t | GLOB_SFT_RST, reg);
1783 t = readl(reg);
1784 udelay(1);
1785 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1786
1787 if (!(GLOB_SFT_RST & t)) {
1788 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1789 rc = 1;
1790 goto done;
1791 }
1792
1793 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1794 i = 5;
1795 do {
1796 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1797 t = readl(reg);
1798 udelay(1);
1799 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1800
1801 if (GLOB_SFT_RST & t) {
1802 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1803 rc = 1;
1804 }
1805done:
1806 return rc;
1807}
1808
47c2b677 1809static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1810 void __iomem *mmio)
1811{
1812 void __iomem *port_mmio;
1813 u32 tmp;
1814
ba3fe8fb
JG
1815 tmp = readl(mmio + MV_RESET_CFG);
1816 if ((tmp & (1 << 0)) == 0) {
47c2b677 1817 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
1818 hpriv->signal[idx].pre = 0x1 << 5;
1819 return;
1820 }
1821
1822 port_mmio = mv_port_base(mmio, idx);
1823 tmp = readl(port_mmio + PHY_MODE2);
1824
1825 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1826 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1827}
1828
47c2b677 1829static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1830{
47c2b677 1831 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
1832}
1833
c9d39130 1834static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 1835 unsigned int port)
bca1c4eb 1836{
c9d39130
JG
1837 void __iomem *port_mmio = mv_port_base(mmio, port);
1838
bca1c4eb 1839 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
1840 int fix_phy_mode2 =
1841 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 1842 int fix_phy_mode4 =
47c2b677
JG
1843 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1844 u32 m2, tmp;
1845
1846 if (fix_phy_mode2) {
1847 m2 = readl(port_mmio + PHY_MODE2);
1848 m2 &= ~(1 << 16);
1849 m2 |= (1 << 31);
1850 writel(m2, port_mmio + PHY_MODE2);
1851
1852 udelay(200);
1853
1854 m2 = readl(port_mmio + PHY_MODE2);
1855 m2 &= ~((1 << 16) | (1 << 31));
1856 writel(m2, port_mmio + PHY_MODE2);
1857
1858 udelay(200);
1859 }
1860
1861 /* who knows what this magic does */
1862 tmp = readl(port_mmio + PHY_MODE3);
1863 tmp &= ~0x7F800000;
1864 tmp |= 0x2A800000;
1865 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
1866
1867 if (fix_phy_mode4) {
47c2b677 1868 u32 m4;
bca1c4eb
JG
1869
1870 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
1871
1872 if (hp_flags & MV_HP_ERRATA_60X1B2)
1873 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
1874
1875 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1876
1877 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
1878
1879 if (hp_flags & MV_HP_ERRATA_60X1B2)
1880 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
1881 }
1882
1883 /* Revert values of pre-emphasis and signal amps to the saved ones */
1884 m2 = readl(port_mmio + PHY_MODE2);
1885
1886 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
1887 m2 |= hpriv->signal[port].amps;
1888 m2 |= hpriv->signal[port].pre;
47c2b677 1889 m2 &= ~(1 << 16);
bca1c4eb 1890
e4e7b892
JG
1891 /* according to mvSata 3.6.1, some IIE values are fixed */
1892 if (IS_GEN_IIE(hpriv)) {
1893 m2 &= ~0xC30FF01F;
1894 m2 |= 0x0000900F;
1895 }
1896
bca1c4eb
JG
1897 writel(m2, port_mmio + PHY_MODE2);
1898}
1899
c9d39130
JG
1900static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1901 unsigned int port_no)
1902{
1903 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1904
1905 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1906
1907 if (IS_60XX(hpriv)) {
1908 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
1909 ifctl |= (1 << 7); /* enable gen2i speed */
1910 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
1911 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1912 }
1913
1914 udelay(25); /* allow reset propagation */
1915
1916 /* Spec never mentions clearing the bit. Marvell's driver does
1917 * clear the bit, however.
1918 */
1919 writelfl(0, port_mmio + EDMA_CMD_OFS);
1920
1921 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1922
1923 if (IS_50XX(hpriv))
1924 mdelay(1);
1925}
1926
1927static void mv_stop_and_reset(struct ata_port *ap)
1928{
cca3974e 1929 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 1930 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
c9d39130
JG
1931
1932 mv_stop_dma(ap);
1933
1934 mv_channel_reset(hpriv, mmio, ap->port_no);
1935
22374677
JG
1936 __mv_phy_reset(ap, 0);
1937}
1938
1939static inline void __msleep(unsigned int msec, int can_sleep)
1940{
1941 if (can_sleep)
1942 msleep(msec);
1943 else
1944 mdelay(msec);
c9d39130
JG
1945}
1946
05b308e1 1947/**
22374677 1948 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
1949 * @ap: ATA channel to manipulate
1950 *
1951 * Part of this is taken from __sata_phy_reset and modified to
1952 * not sleep since this routine gets called from interrupt level.
1953 *
1954 * LOCKING:
1955 * Inherited from caller. This is coded to safe to call at
1956 * interrupt level, i.e. it does not sleep.
31961943 1957 */
22374677 1958static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
20f733e7 1959{
095fec88 1960 struct mv_port_priv *pp = ap->private_data;
cca3974e 1961 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7
BR
1962 void __iomem *port_mmio = mv_ap_base(ap);
1963 struct ata_taskfile tf;
1964 struct ata_device *dev = &ap->device[0];
31961943 1965 unsigned long timeout;
22374677
JG
1966 int retry = 5;
1967 u32 sstatus;
20f733e7
BR
1968
1969 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
1970
095fec88 1971 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
31961943
BR
1972 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
1973 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
20f733e7 1974
22374677
JG
1975 /* Issue COMRESET via SControl */
1976comreset_retry:
81952c54 1977 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
22374677
JG
1978 __msleep(1, can_sleep);
1979
81952c54 1980 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
22374677
JG
1981 __msleep(20, can_sleep);
1982
1983 timeout = jiffies + msecs_to_jiffies(200);
31961943 1984 do {
81952c54 1985 sata_scr_read(ap, SCR_STATUS, &sstatus);
62f1d0e6 1986 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 1987 break;
22374677
JG
1988
1989 __msleep(1, can_sleep);
31961943 1990 } while (time_before(jiffies, timeout));
20f733e7 1991
22374677
JG
1992 /* work around errata */
1993 if (IS_60XX(hpriv) &&
1994 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
1995 (retry-- > 0))
1996 goto comreset_retry;
095fec88
JG
1997
1998 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
31961943
BR
1999 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2000 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2001
81952c54 2002 if (ata_port_online(ap)) {
31961943
BR
2003 ata_port_probe(ap);
2004 } else {
81952c54 2005 sata_scr_read(ap, SCR_STATUS, &sstatus);
f15a1daf
TH
2006 ata_port_printk(ap, KERN_INFO,
2007 "no device found (phy stat %08x)\n", sstatus);
31961943 2008 ata_port_disable(ap);
20f733e7
BR
2009 return;
2010 }
2011
22374677
JG
2012 /* even after SStatus reflects that device is ready,
2013 * it seems to take a while for link to be fully
2014 * established (and thus Status no longer 0x80/0x7F),
2015 * so we poll a bit for that, here.
2016 */
2017 retry = 20;
2018 while (1) {
2019 u8 drv_stat = ata_check_status(ap);
2020 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2021 break;
2022 __msleep(500, can_sleep);
2023 if (retry-- <= 0)
2024 break;
2025 }
2026
0d5ff566
TH
2027 tf.lbah = readb(ap->ioaddr.lbah_addr);
2028 tf.lbam = readb(ap->ioaddr.lbam_addr);
2029 tf.lbal = readb(ap->ioaddr.lbal_addr);
2030 tf.nsect = readb(ap->ioaddr.nsect_addr);
20f733e7
BR
2031
2032 dev->class = ata_dev_classify(&tf);
e1211e3f 2033 if (!ata_dev_enabled(dev)) {
20f733e7
BR
2034 VPRINTK("Port disabled post-sig: No device present.\n");
2035 ata_port_disable(ap);
2036 }
095fec88
JG
2037
2038 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2039
2040 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2041
bca1c4eb 2042 VPRINTK("EXIT\n");
20f733e7
BR
2043}
2044
22374677
JG
2045static void mv_phy_reset(struct ata_port *ap)
2046{
2047 __mv_phy_reset(ap, 1);
2048}
2049
05b308e1
BR
2050/**
2051 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2052 * @ap: ATA channel to manipulate
2053 *
2054 * Intent is to clear all pending error conditions, reset the
2055 * chip/bus, fail the command, and move on.
2056 *
2057 * LOCKING:
cca3974e 2058 * This routine holds the host lock while failing the command.
05b308e1 2059 */
31961943
BR
2060static void mv_eng_timeout(struct ata_port *ap)
2061{
0d5ff566 2062 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2063 struct ata_queued_cmd *qc;
2f9719b6 2064 unsigned long flags;
31961943 2065
f15a1daf 2066 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
31961943 2067 DPRINTK("All regs @ start of eng_timeout\n");
0d5ff566 2068 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
31961943
BR
2069
2070 qc = ata_qc_from_tag(ap, ap->active_tag);
2071 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
0d5ff566 2072 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
31961943 2073
cca3974e 2074 spin_lock_irqsave(&ap->host->lock, flags);
9b358e30 2075 mv_err_intr(ap, 0);
c9d39130 2076 mv_stop_and_reset(ap);
cca3974e 2077 spin_unlock_irqrestore(&ap->host->lock, flags);
31961943 2078
9b358e30
ML
2079 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2080 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2081 qc->err_mask |= AC_ERR_TIMEOUT;
2082 ata_eh_qc_complete(qc);
2083 }
31961943
BR
2084}
2085
05b308e1
BR
2086/**
2087 * mv_port_init - Perform some early initialization on a single port.
2088 * @port: libata data structure storing shadow register addresses
2089 * @port_mmio: base address of the port
2090 *
2091 * Initialize shadow register mmio addresses, clear outstanding
2092 * interrupts on the port, and unmask interrupts for the future
2093 * start of the port.
2094 *
2095 * LOCKING:
2096 * Inherited from caller.
2097 */
31961943 2098static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2099{
0d5ff566 2100 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2101 unsigned serr_ofs;
2102
8b260248 2103 /* PIO related setup
31961943
BR
2104 */
2105 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2106 port->error_addr =
31961943
BR
2107 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2108 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2109 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2110 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2111 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2112 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2113 port->status_addr =
31961943
BR
2114 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2115 /* special case: control/altstatus doesn't have ATA_REG_ address */
2116 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2117
2118 /* unused: */
8d9db2d2 2119 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2120
31961943
BR
2121 /* Clear any currently outstanding port interrupt conditions */
2122 serr_ofs = mv_scr_offset(SCR_ERROR);
2123 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2124 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2125
20f733e7 2126 /* unmask all EDMA error interrupts */
31961943 2127 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2128
8b260248 2129 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2130 readl(port_mmio + EDMA_CFG_OFS),
2131 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2132 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2133}
2134
4447d351 2135static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2136{
4447d351
TH
2137 struct pci_dev *pdev = to_pci_dev(host->dev);
2138 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2139 u32 hp_flags = hpriv->hp_flags;
2140
bca1c4eb 2141 switch(board_idx) {
47c2b677
JG
2142 case chip_5080:
2143 hpriv->ops = &mv5xxx_ops;
2144 hp_flags |= MV_HP_50XX;
2145
44c10138 2146 switch (pdev->revision) {
47c2b677
JG
2147 case 0x1:
2148 hp_flags |= MV_HP_ERRATA_50XXB0;
2149 break;
2150 case 0x3:
2151 hp_flags |= MV_HP_ERRATA_50XXB2;
2152 break;
2153 default:
2154 dev_printk(KERN_WARNING, &pdev->dev,
2155 "Applying 50XXB2 workarounds to unknown rev\n");
2156 hp_flags |= MV_HP_ERRATA_50XXB2;
2157 break;
2158 }
2159 break;
2160
bca1c4eb
JG
2161 case chip_504x:
2162 case chip_508x:
47c2b677 2163 hpriv->ops = &mv5xxx_ops;
bca1c4eb
JG
2164 hp_flags |= MV_HP_50XX;
2165
44c10138 2166 switch (pdev->revision) {
47c2b677
JG
2167 case 0x0:
2168 hp_flags |= MV_HP_ERRATA_50XXB0;
2169 break;
2170 case 0x3:
2171 hp_flags |= MV_HP_ERRATA_50XXB2;
2172 break;
2173 default:
2174 dev_printk(KERN_WARNING, &pdev->dev,
2175 "Applying B2 workarounds to unknown rev\n");
2176 hp_flags |= MV_HP_ERRATA_50XXB2;
2177 break;
bca1c4eb
JG
2178 }
2179 break;
2180
2181 case chip_604x:
2182 case chip_608x:
47c2b677
JG
2183 hpriv->ops = &mv6xxx_ops;
2184
44c10138 2185 switch (pdev->revision) {
47c2b677
JG
2186 case 0x7:
2187 hp_flags |= MV_HP_ERRATA_60X1B2;
2188 break;
2189 case 0x9:
2190 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2191 break;
2192 default:
2193 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2194 "Applying B2 workarounds to unknown rev\n");
2195 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2196 break;
2197 }
2198 break;
2199
e4e7b892
JG
2200 case chip_7042:
2201 case chip_6042:
2202 hpriv->ops = &mv6xxx_ops;
2203
2204 hp_flags |= MV_HP_GEN_IIE;
2205
44c10138 2206 switch (pdev->revision) {
e4e7b892
JG
2207 case 0x0:
2208 hp_flags |= MV_HP_ERRATA_XX42A0;
2209 break;
2210 case 0x1:
2211 hp_flags |= MV_HP_ERRATA_60X1C0;
2212 break;
2213 default:
2214 dev_printk(KERN_WARNING, &pdev->dev,
2215 "Applying 60X1C0 workarounds to unknown rev\n");
2216 hp_flags |= MV_HP_ERRATA_60X1C0;
2217 break;
2218 }
2219 break;
2220
bca1c4eb
JG
2221 default:
2222 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2223 return 1;
2224 }
2225
2226 hpriv->hp_flags = hp_flags;
2227
2228 return 0;
2229}
2230
05b308e1 2231/**
47c2b677 2232 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2233 * @host: ATA host to initialize
2234 * @board_idx: controller index
05b308e1
BR
2235 *
2236 * If possible, do an early global reset of the host. Then do
2237 * our port init and clear/unmask all/relevant host interrupts.
2238 *
2239 * LOCKING:
2240 * Inherited from caller.
2241 */
4447d351 2242static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2243{
2244 int rc = 0, n_hc, port, hc;
4447d351
TH
2245 struct pci_dev *pdev = to_pci_dev(host->dev);
2246 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2247 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2248
47c2b677
JG
2249 /* global interrupt mask */
2250 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2251
4447d351 2252 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2253 if (rc)
2254 goto done;
2255
4447d351 2256 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2257
4447d351 2258 for (port = 0; port < host->n_ports; port++)
47c2b677 2259 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2260
c9d39130 2261 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2262 if (rc)
20f733e7 2263 goto done;
20f733e7 2264
522479fb
JG
2265 hpriv->ops->reset_flash(hpriv, mmio);
2266 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2267 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2268
4447d351 2269 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2270 if (IS_60XX(hpriv)) {
c9d39130
JG
2271 void __iomem *port_mmio = mv_port_base(mmio, port);
2272
2a47ce06 2273 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2274 ifctl |= (1 << 7); /* enable gen2i speed */
2275 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2276 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2277 }
2278
c9d39130 2279 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2280 }
2281
4447d351 2282 for (port = 0; port < host->n_ports; port++) {
2a47ce06 2283 void __iomem *port_mmio = mv_port_base(mmio, port);
4447d351 2284 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
20f733e7
BR
2285 }
2286
2287 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2288 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2289
2290 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2291 "(before clear)=0x%08x\n", hc,
2292 readl(hc_mmio + HC_CFG_OFS),
2293 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2294
2295 /* Clear any currently outstanding hc interrupt conditions */
2296 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2297 }
2298
31961943
BR
2299 /* Clear any currently outstanding host interrupt conditions */
2300 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2301
2302 /* and unmask interrupt generation for host regs */
2303 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
fb621e2f
JG
2304
2305 if (IS_50XX(hpriv))
2306 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2307 else
2308 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2309
2310 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2311 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2312 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2313 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2314 readl(mmio + PCI_IRQ_CAUSE_OFS),
2315 readl(mmio + PCI_IRQ_MASK_OFS));
bca1c4eb 2316
31961943 2317done:
20f733e7
BR
2318 return rc;
2319}
2320
05b308e1
BR
2321/**
2322 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2323 * @host: ATA host to print info about
05b308e1
BR
2324 *
2325 * FIXME: complete this.
2326 *
2327 * LOCKING:
2328 * Inherited from caller.
2329 */
4447d351 2330static void mv_print_info(struct ata_host *host)
31961943 2331{
4447d351
TH
2332 struct pci_dev *pdev = to_pci_dev(host->dev);
2333 struct mv_host_priv *hpriv = host->private_data;
44c10138 2334 u8 scc;
c1e4fe71 2335 const char *scc_s, *gen;
31961943
BR
2336
2337 /* Use this to determine the HW stepping of the chip so we know
2338 * what errata to workaround
2339 */
31961943
BR
2340 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2341 if (scc == 0)
2342 scc_s = "SCSI";
2343 else if (scc == 0x01)
2344 scc_s = "RAID";
2345 else
c1e4fe71
JG
2346 scc_s = "?";
2347
2348 if (IS_GEN_I(hpriv))
2349 gen = "I";
2350 else if (IS_GEN_II(hpriv))
2351 gen = "II";
2352 else if (IS_GEN_IIE(hpriv))
2353 gen = "IIE";
2354 else
2355 gen = "?";
31961943 2356
a9524a76 2357 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2358 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2359 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2360 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2361}
2362
05b308e1
BR
2363/**
2364 * mv_init_one - handle a positive probe of a Marvell host
2365 * @pdev: PCI device found
2366 * @ent: PCI device ID entry for the matched host
2367 *
2368 * LOCKING:
2369 * Inherited from caller.
2370 */
20f733e7
BR
2371static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2372{
2373 static int printed_version = 0;
20f733e7 2374 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2375 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2376 struct ata_host *host;
2377 struct mv_host_priv *hpriv;
2378 int n_ports, rc;
20f733e7 2379
a9524a76
JG
2380 if (!printed_version++)
2381 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2382
4447d351
TH
2383 /* allocate host */
2384 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2385
2386 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2387 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2388 if (!host || !hpriv)
2389 return -ENOMEM;
2390 host->private_data = hpriv;
2391
2392 /* acquire resources */
24dc5f33
TH
2393 rc = pcim_enable_device(pdev);
2394 if (rc)
20f733e7 2395 return rc;
20f733e7 2396
0d5ff566
TH
2397 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2398 if (rc == -EBUSY)
24dc5f33 2399 pcim_pin_device(pdev);
0d5ff566 2400 if (rc)
24dc5f33 2401 return rc;
4447d351 2402 host->iomap = pcim_iomap_table(pdev);
20f733e7 2403
d88184fb
JG
2404 rc = pci_go_64(pdev);
2405 if (rc)
2406 return rc;
2407
20f733e7 2408 /* initialize adapter */
4447d351 2409 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2410 if (rc)
2411 return rc;
20f733e7 2412
31961943 2413 /* Enable interrupts */
6a59dcf8 2414 if (msi && pci_enable_msi(pdev))
31961943 2415 pci_intx(pdev, 1);
20f733e7 2416
31961943 2417 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2418 mv_print_info(host);
20f733e7 2419
4447d351
TH
2420 pci_set_master(pdev);
2421 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2422 &mv_sht);
20f733e7
BR
2423}
2424
2425static int __init mv_init(void)
2426{
b7887196 2427 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2428}
2429
2430static void __exit mv_exit(void)
2431{
2432 pci_unregister_driver(&mv_pci_driver);
2433}
2434
2435MODULE_AUTHOR("Brett Russ");
2436MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2437MODULE_LICENSE("GPL");
2438MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2439MODULE_VERSION(DRV_VERSION);
2440
ddef9bb3
JG
2441module_param(msi, int, 0444);
2442MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2443
20f733e7
BR
2444module_init(mv_init);
2445module_exit(mv_exit);